keyword
stringclasses 7
values | repo_name
stringlengths 8
98
| file_path
stringlengths 4
244
| file_extension
stringclasses 29
values | file_size
int64 0
84.1M
| line_count
int64 0
1.6M
| content
stringlengths 1
84.1M
⌀ | language
stringclasses 14
values |
|---|---|---|---|---|---|---|---|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
|
.h
| 5,539
| 159
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MATRIXBASEEIGENVALUES_H
#define EIGEN_MATRIXBASEEIGENVALUES_H
namespace Eigen {
namespace internal {
template<typename Derived, bool IsComplex>
struct eigenvalues_selector
{
// this is the implementation for the case IsComplex = true
static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
run(const MatrixBase<Derived>& m)
{
typedef typename Derived::PlainObject PlainObject;
PlainObject m_eval(m);
return ComplexEigenSolver<PlainObject>(m_eval, false).eigenvalues();
}
};
template<typename Derived>
struct eigenvalues_selector<Derived, false>
{
static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
run(const MatrixBase<Derived>& m)
{
typedef typename Derived::PlainObject PlainObject;
PlainObject m_eval(m);
return EigenSolver<PlainObject>(m_eval, false).eigenvalues();
}
};
} // end namespace internal
/** \brief Computes the eigenvalues of a matrix
* \returns Column vector containing the eigenvalues.
*
* \eigenvalues_module
* This function computes the eigenvalues with the help of the EigenSolver
* class (for real matrices) or the ComplexEigenSolver class (for complex
* matrices).
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix.
*
* The SelfAdjointView class provides a better algorithm for selfadjoint
* matrices.
*
* Example: \include MatrixBase_eigenvalues.cpp
* Output: \verbinclude MatrixBase_eigenvalues.out
*
* \sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(),
* SelfAdjointView::eigenvalues()
*/
template<typename Derived>
inline typename MatrixBase<Derived>::EigenvaluesReturnType
MatrixBase<Derived>::eigenvalues() const
{
return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived());
}
/** \brief Computes the eigenvalues of a matrix
* \returns Column vector containing the eigenvalues.
*
* \eigenvalues_module
* This function computes the eigenvalues with the help of the
* SelfAdjointEigenSolver class. The eigenvalues are repeated according to
* their algebraic multiplicity, so there are as many eigenvalues as rows in
* the matrix.
*
* Example: \include SelfAdjointView_eigenvalues.cpp
* Output: \verbinclude SelfAdjointView_eigenvalues.out
*
* \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()
*/
template<typename MatrixType, unsigned int UpLo>
inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
{
PlainObject thisAsMatrix(*this);
return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();
}
/** \brief Computes the L2 operator norm
* \returns Operator norm of the matrix.
*
* \eigenvalues_module
* This function computes the L2 operator norm of a matrix, which is also
* known as the spectral norm. The norm of a matrix \f$ A \f$ is defined to be
* \f[ \|A\|_2 = \max_x \frac{\|Ax\|_2}{\|x\|_2} \f]
* where the maximum is over all vectors and the norm on the right is the
* Euclidean vector norm. The norm equals the largest singular value, which is
* the square root of the largest eigenvalue of the positive semi-definite
* matrix \f$ A^*A \f$.
*
* The current implementation uses the eigenvalues of \f$ A^*A \f$, as computed
* by SelfAdjointView::eigenvalues(), to compute the operator norm of a
* matrix. The SelfAdjointView class provides a better algorithm for
* selfadjoint matrices.
*
* Example: \include MatrixBase_operatorNorm.cpp
* Output: \verbinclude MatrixBase_operatorNorm.out
*
* \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm()
*/
template<typename Derived>
inline typename MatrixBase<Derived>::RealScalar
MatrixBase<Derived>::operatorNorm() const
{
using std::sqrt;
typename Derived::PlainObject m_eval(derived());
// FIXME if it is really guaranteed that the eigenvalues are already sorted,
// then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
return sqrt((m_eval*m_eval.adjoint())
.eval()
.template selfadjointView<Lower>()
.eigenvalues()
.maxCoeff()
);
}
/** \brief Computes the L2 operator norm
* \returns Operator norm of the matrix.
*
* \eigenvalues_module
* This function computes the L2 operator norm of a self-adjoint matrix. For a
* self-adjoint matrix, the operator norm is the largest eigenvalue.
*
* The current implementation uses the eigenvalues of the matrix, as computed
* by eigenvalues(), to compute the operator norm of the matrix.
*
* Example: \include SelfAdjointView_operatorNorm.cpp
* Output: \verbinclude SelfAdjointView_operatorNorm.out
*
* \sa eigenvalues(), MatrixBase::operatorNorm()
*/
template<typename MatrixType, unsigned int UpLo>
inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
SelfAdjointView<MatrixType, UpLo>::operatorNorm() const
{
return eigenvalues().cwiseAbs().maxCoeff();
}
} // end namespace Eigen
#endif
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h
|
.h
| 4,178
| 92
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Complex Schur needed to complex unsymmetrical eigenvalues/eigenvectors.
********************************************************************************
*/
#ifndef EIGEN_COMPLEX_SCHUR_LAPACKE_H
#define EIGEN_COMPLEX_SCHUR_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SCHUR_COMPLEX(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, LAPACKE_PREFIX_U, EIGCOLROW, LAPACKE_COLROW) \
template<> template<typename InputType> inline \
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, bool computeU) \
{ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
typedef MatrixType::RealScalar RealScalar; \
typedef std::complex<RealScalar> ComplexScalar; \
\
eigen_assert(matrix.cols() == matrix.rows()); \
\
m_matUisUptodate = false; \
if(matrix.cols() == 1) \
{ \
m_matT = matrix.derived().template cast<ComplexScalar>(); \
if(computeU) m_matU = ComplexMatrixType::Identity(1,1); \
m_info = Success; \
m_isInitialized = true; \
m_matUisUptodate = computeU; \
return *this; \
} \
lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), sdim, info; \
lapack_int matrix_order = LAPACKE_COLROW; \
char jobvs, sort='N'; \
LAPACK_##LAPACKE_PREFIX_U##_SELECT1 select = 0; \
jobvs = (computeU) ? 'V' : 'N'; \
m_matU.resize(n, n); \
lapack_int ldvs = internal::convert_index<lapack_int>(m_matU.outerStride()); \
m_matT = matrix; \
lapack_int lda = internal::convert_index<lapack_int>(m_matT.outerStride()); \
Matrix<EIGTYPE, Dynamic, Dynamic> w; \
w.resize(n, 1);\
info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)w.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \
if(info == 0) \
m_info = Success; \
else \
m_info = NoConvergence; \
\
m_isInitialized = true; \
m_matUisUptodate = computeU; \
return *this; \
\
}
EIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float, c, C, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float, c, C, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_COMPLEX_SCHUR_LAPACKE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h
|
.h
| 4,104
| 88
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Self-adjoint eigenvalues/eigenvectors.
********************************************************************************
*/
#ifndef EIGEN_SAEIGENSOLVER_LAPACKE_H
#define EIGEN_SAEIGENSOLVER_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW ) \
template<> template<typename InputType> inline \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, int options) \
{ \
eigen_assert(matrix.cols() == matrix.rows()); \
eigen_assert((options&~(EigVecMask|GenEigMask))==0 \
&& (options&EigVecMask)!=EigVecMask \
&& "invalid option parameter"); \
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \
lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), lda, info; \
m_eivalues.resize(n,1); \
m_subdiag.resize(n-1); \
m_eivec = matrix; \
\
if(n==1) \
{ \
m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0)); \
if(computeEigenvectors) m_eivec.setOnes(n,n); \
m_info = Success; \
m_isInitialized = true; \
m_eigenvectorsOk = computeEigenvectors; \
return *this; \
} \
\
lda = internal::convert_index<lapack_int>(m_eivec.outerStride()); \
char jobz, uplo='L'/*, range='A'*/; \
jobz = computeEigenvectors ? 'V' : 'N'; \
\
info = LAPACKE_##LAPACKE_NAME( LAPACK_COL_MAJOR, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \
m_info = (info==0) ? Success : NoConvergence; \
m_isInitialized = true; \
m_eigenvectorsOk = computeEigenvectors; \
return *this; \
}
#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME ) \
EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, ColMajor ) \
EIGEN_LAPACKE_EIG_SELFADJ_2(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, RowMajor )
EIGEN_LAPACKE_EIG_SELFADJ(double, double, double, dsyev)
EIGEN_LAPACKE_EIG_SELFADJ(float, float, float, ssyev)
EIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev)
EIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float, float, cheev)
} // end namespace Eigen
#endif // EIGEN_SAEIGENSOLVER_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h
|
.h
| 3,650
| 78
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Real Schur needed to real unsymmetrical eigenvalues/eigenvectors.
********************************************************************************
*/
#ifndef EIGEN_REAL_SCHUR_LAPACKE_H
#define EIGEN_REAL_SCHUR_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SCHUR_REAL(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, LAPACKE_PREFIX_U, EIGCOLROW, LAPACKE_COLROW) \
template<> template<typename InputType> inline \
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, bool computeU) \
{ \
eigen_assert(matrix.cols() == matrix.rows()); \
\
lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), sdim, info; \
lapack_int matrix_order = LAPACKE_COLROW; \
char jobvs, sort='N'; \
LAPACK_##LAPACKE_PREFIX_U##_SELECT2 select = 0; \
jobvs = (computeU) ? 'V' : 'N'; \
m_matU.resize(n, n); \
lapack_int ldvs = internal::convert_index<lapack_int>(m_matU.outerStride()); \
m_matT = matrix; \
lapack_int lda = internal::convert_index<lapack_int>(m_matT.outerStride()); \
Matrix<EIGTYPE, Dynamic, Dynamic> wr, wi; \
wr.resize(n, 1); wi.resize(n, 1); \
info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)wr.data(), (LAPACKE_TYPE*)wi.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \
if(info == 0) \
m_info = Success; \
else \
m_info = NoConvergence; \
\
m_isInitialized = true; \
m_matUisUptodate = computeU; \
return *this; \
\
}
EIGEN_LAPACKE_SCHUR_REAL(double, double, d, D, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_REAL(float, float, s, S, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SCHUR_REAL(double, double, d, D, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SCHUR_REAL(float, float, s, S, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_REAL_SCHUR_LAPACKE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/ComplexEigenSolver.h
|
.h
| 12,558
| 347
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Claire Maurice
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H
#define EIGEN_COMPLEX_EIGEN_SOLVER_H
#include "./ComplexSchur.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class ComplexEigenSolver
*
* \brief Computes eigenvalues and eigenvectors of general complex matrices
*
* \tparam _MatrixType the type of the matrix of which we are
* computing the eigendecomposition; this is expected to be an
* instantiation of the Matrix class template.
*
* The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
* \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v
* \f$. If \f$ D \f$ is a diagonal matrix with the eigenvalues on
* the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as
* its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is
* almost always invertible, in which case we have \f$ A = V D V^{-1}
* \f$. This is called the eigendecomposition.
*
* The main function in this class is compute(), which computes the
* eigenvalues and eigenvectors of a given function. The
* documentation for that function contains an example showing the
* main features of the class.
*
* \sa class EigenSolver, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class ComplexEigenSolver
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
*
* This is a column vector with entries of type #ComplexScalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options&(~RowMajor), MaxColsAtCompileTime, 1> EigenvalueType;
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
/** \brief Default constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute().
*/
ComplexEigenSolver()
: m_eivec(),
m_eivalues(),
m_schur(),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_matX()
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa ComplexEigenSolver()
*/
explicit ComplexEigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_schur(size),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_matX(size, size)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
*
* This constructor calls compute() to compute the eigendecomposition.
*/
template<typename InputType>
explicit ComplexEigenSolver(const EigenBase<InputType>& matrix, bool computeEigenvectors = true)
: m_eivec(matrix.rows(),matrix.cols()),
m_eivalues(matrix.cols()),
m_schur(matrix.rows()),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_matX(matrix.rows(),matrix.cols())
{
compute(matrix.derived(), computeEigenvectors);
}
/** \brief Returns the eigenvectors of given matrix.
*
* \returns A const reference to the matrix whose columns are the eigenvectors.
*
* \pre Either the constructor
* ComplexEigenSolver(const MatrixType& matrix, bool) or the member
* function compute(const MatrixType& matrix, bool) has been called before
* to compute the eigendecomposition of a matrix, and
* \p computeEigenvectors was set to true (the default).
*
* This function returns a matrix whose columns are the eigenvectors. Column
* \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k
* \f$ as returned by eigenvalues(). The eigenvectors are normalized to
* have (Euclidean) norm equal to one. The matrix returned by this
* function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D
* V^{-1} \f$, if it exists.
*
* Example: \include ComplexEigenSolver_eigenvectors.cpp
* Output: \verbinclude ComplexEigenSolver_eigenvectors.out
*/
const EigenvectorType& eigenvectors() const
{
eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec;
}
/** \brief Returns the eigenvalues of given matrix.
*
* \returns A const reference to the column vector containing the eigenvalues.
*
* \pre Either the constructor
* ComplexEigenSolver(const MatrixType& matrix, bool) or the member
* function compute(const MatrixType& matrix, bool) has been called before
* to compute the eigendecomposition of a matrix.
*
* This function returns a column vector containing the
* eigenvalues. Eigenvalues are repeated according to their
* algebraic multiplicity, so there are as many eigenvalues as
* rows in the matrix. The eigenvalues are not sorted in any particular
* order.
*
* Example: \include ComplexEigenSolver_eigenvalues.cpp
* Output: \verbinclude ComplexEigenSolver_eigenvalues.out
*/
const EigenvalueType& eigenvalues() const
{
eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
return m_eivalues;
}
/** \brief Computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of the complex matrix \p matrix.
* The eigenvalues() function can be used to retrieve them. If
* \p computeEigenvectors is true, then the eigenvectors are also computed
* and can be retrieved by calling eigenvectors().
*
* The matrix is first reduced to Schur form using the
* ComplexSchur class. The Schur decomposition is then used to
* compute the eigenvalues and eigenvectors.
*
* The cost of the computation is dominated by the cost of the
* Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$
* is the size of the matrix.
*
* Example: \include ComplexEigenSolver_compute.cpp
* Output: \verbinclude ComplexEigenSolver_compute.out
*/
template<typename InputType>
ComplexEigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
return m_schur.info();
}
/** \brief Sets the maximum number of iterations allowed. */
ComplexEigenSolver& setMaxIterations(Index maxIters)
{
m_schur.setMaxIterations(maxIters);
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_schur.getMaxIterations();
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
EigenvectorType m_eivec;
EigenvalueType m_eivalues;
ComplexSchur<MatrixType> m_schur;
bool m_isInitialized;
bool m_eigenvectorsOk;
EigenvectorType m_matX;
private:
void doComputeEigenvectors(RealScalar matrixnorm);
void sortEigenvalues(bool computeEigenvectors);
};
template<typename MatrixType>
template<typename InputType>
ComplexEigenSolver<MatrixType>&
ComplexEigenSolver<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeEigenvectors)
{
check_template_parameters();
// this code is inspired from Jampack
eigen_assert(matrix.cols() == matrix.rows());
// Do a complex Schur decomposition, A = U T U^*
// The eigenvalues are on the diagonal of T.
m_schur.compute(matrix.derived(), computeEigenvectors);
if(m_schur.info() == Success)
{
m_eivalues = m_schur.matrixT().diagonal();
if(computeEigenvectors)
doComputeEigenvectors(m_schur.matrixT().norm());
sortEigenvalues(computeEigenvectors);
}
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
template<typename MatrixType>
void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
{
const Index n = m_eivalues.size();
matrixnorm = numext::maxi(matrixnorm,(std::numeric_limits<RealScalar>::min)());
// Compute X such that T = X D X^(-1), where D is the diagonal of T.
// The matrix X is unit triangular.
m_matX = EigenvectorType::Zero(n, n);
for(Index k=n-1 ; k>=0 ; k--)
{
m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
// Compute X(i,k) using the (i,k) entry of the equation X T = D X
for(Index i=k-1 ; i>=0 ; i--)
{
m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
if(k-i-1>0)
m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value();
ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k);
if(z==ComplexScalar(0))
{
// If the i-th and k-th eigenvalue are equal, then z equals 0.
// Use a small value instead, to prevent division by zero.
numext::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
}
m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
}
}
// Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
m_eivec.noalias() = m_schur.matrixU() * m_matX;
// .. and normalize the eigenvectors
for(Index k=0 ; k<n ; k++)
{
m_eivec.col(k).normalize();
}
}
template<typename MatrixType>
void ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)
{
const Index n = m_eivalues.size();
for (Index i=0; i<n; i++)
{
Index k;
m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
if (k != 0)
{
k += i;
std::swap(m_eivalues[k],m_eivalues[i]);
if(computeEigenvectors)
m_eivec.col(i).swap(m_eivec.col(k));
}
}
}
} // end namespace Eigen
#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/RealSchur.h
|
.h
| 20,749
| 554
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REAL_SCHUR_H
#define EIGEN_REAL_SCHUR_H
#include "./HessenbergDecomposition.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class RealSchur
*
* \brief Performs a real Schur decomposition of a square matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* real Schur decomposition; this is expected to be an instantiation of the
* Matrix class template.
*
* Given a real square matrix A, this class computes the real Schur
* decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and
* T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose
* inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
* matrix is a block-triangular matrix whose diagonal consists of 1-by-1
* blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the
* blocks on the diagonal of T are the same as the eigenvalues of the matrix
* A, and thus the real Schur decomposition is used in EigenSolver to compute
* the eigendecomposition of a matrix.
*
* Call the function compute() to compute the real Schur decomposition of a
* given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)
* constructor which computes the real Schur decomposition at construction
* time. Once the decomposition is computed, you can use the matrixU() and
* matrixT() functions to retrieve the matrices U and T in the decomposition.
*
* The documentation of RealSchur(const MatrixType&, bool) contains an example
* of the typical use of this class.
*
* \note The implementation is adapted from
* <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
* Their code is based on EISPACK.
*
* \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class RealSchur
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size, size),
m_matU(size, size),
m_workspaceVector(size),
m_hess(size),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{ }
/** \brief Constructor; computes real Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
*
* This constructor calls compute() to compute the Schur decomposition.
*
* Example: \include RealSchur_RealSchur_MatrixType.cpp
* Output: \verbinclude RealSchur_RealSchur_MatrixType.out
*/
template<typename InputType>
explicit RealSchur(const EigenBase<InputType>& matrix, bool computeU = true)
: m_matT(matrix.rows(),matrix.cols()),
m_matU(matrix.rows(),matrix.cols()),
m_workspaceVector(matrix.rows()),
m_hess(matrix.rows()),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{
compute(matrix.derived(), computeU);
}
/** \brief Returns the orthogonal matrix in the Schur decomposition.
*
* \returns A const reference to the matrix U.
*
* \pre Either the constructor RealSchur(const MatrixType&, bool) or the
* member function compute(const MatrixType&, bool) has been called before
* to compute the Schur decomposition of a matrix, and \p computeU was set
* to true (the default value).
*
* \sa RealSchur(const MatrixType&, bool) for an example
*/
const MatrixType& matrixU() const
{
eigen_assert(m_isInitialized && "RealSchur is not initialized.");
eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition.");
return m_matU;
}
/** \brief Returns the quasi-triangular matrix in the Schur decomposition.
*
* \returns A const reference to the matrix T.
*
* \pre Either the constructor RealSchur(const MatrixType&, bool) or the
* member function compute(const MatrixType&, bool) has been called before
* to compute the Schur decomposition of a matrix.
*
* \sa RealSchur(const MatrixType&, bool) for an example
*/
const MatrixType& matrixT() const
{
eigen_assert(m_isInitialized && "RealSchur is not initialized.");
return m_matT;
}
/** \brief Computes Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
* \returns Reference to \c *this
*
* The Schur decomposition is computed by first reducing the matrix to
* Hessenberg form using the class HessenbergDecomposition. The Hessenberg
* matrix is then reduced to triangular form by performing Francis QR
* iterations with implicit double shift. The cost of computing the Schur
* decomposition depends on the number of iterations; as a rough guide, it
* may be taken to be \f$25n^3\f$ flops if \a computeU is true and
* \f$10n^3\f$ flops if \a computeU is false.
*
* Example: \include RealSchur_compute.cpp
* Output: \verbinclude RealSchur_compute.out
*
* \sa compute(const MatrixType&, bool, Index)
*/
template<typename InputType>
RealSchur& compute(const EigenBase<InputType>& matrix, bool computeU = true);
/** \brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T
* \param[in] matrixH Matrix in Hessenberg form H
* \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
* \param computeU Computes the matriX U of the Schur vectors
* \return Reference to \c *this
*
* This routine assumes that the matrix is already reduced in Hessenberg form matrixH
* using either the class HessenbergDecomposition or another mean.
* It computes the upper quasi-triangular matrix T of the Schur decomposition of H
* When computeU is true, this routine computes the matrix U such that
* A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
*
* NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
* is not available, the user should give an identity matrix (Q.setIdentity())
*
* \sa compute(const MatrixType&, bool)
*/
template<typename HessMatrixType, typename OrthMatrixType>
RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "RealSchur is not initialized.");
return m_info;
}
/** \brief Sets the maximum number of iterations allowed.
*
* If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
* of the matrix.
*/
RealSchur& setMaxIterations(Index maxIters)
{
m_maxIters = maxIters;
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_maxIters;
}
/** \brief Maximum number of iterations per row.
*
* If not otherwise specified, the maximum number of iterations is this number times the size of the
* matrix. It is currently set to 40.
*/
static const int m_maxIterationsPerRow = 40;
private:
MatrixType m_matT;
MatrixType m_matU;
ColumnVectorType m_workspaceVector;
HessenbergDecomposition<MatrixType> m_hess;
ComputationInfo m_info;
bool m_isInitialized;
bool m_matUisUptodate;
Index m_maxIters;
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
Index findSmallSubdiagEntry(Index iu, const Scalar& considerAsZero);
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
};
template<typename MatrixType>
template<typename InputType>
RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeU)
{
const Scalar considerAsZero = (std::numeric_limits<Scalar>::min)();
eigen_assert(matrix.cols() == matrix.rows());
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * matrix.rows();
Scalar scale = matrix.derived().cwiseAbs().maxCoeff();
if(scale<considerAsZero)
{
m_matT.setZero(matrix.rows(),matrix.cols());
if(computeU)
m_matU.setIdentity(matrix.rows(),matrix.cols());
m_info = Success;
m_isInitialized = true;
m_matUisUptodate = computeU;
return *this;
}
// Step 1. Reduce to Hessenberg form
m_hess.compute(matrix.derived()/scale);
// Step 2. Reduce to real Schur form
computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);
m_matT *= scale;
return *this;
}
template<typename MatrixType>
template<typename HessMatrixType, typename OrthMatrixType>
RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)
{
using std::abs;
m_matT = matrixH;
if(computeU)
m_matU = matrixQ;
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * matrixH.rows();
m_workspaceVector.resize(m_matT.cols());
Scalar* workspace = &m_workspaceVector.coeffRef(0);
// The matrix m_matT is divided in three parts.
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active window).
// Rows iu+1,...,end are already brought in triangular form.
Index iu = m_matT.cols() - 1;
Index iter = 0; // iteration count for current eigenvalue
Index totalIter = 0; // iteration count for whole matrix
Scalar exshift(0); // sum of exceptional shifts
Scalar norm = computeNormOfT();
// sub-diagonal entries smaller than considerAsZero will be treated as zero.
// We use eps^2 to enable more precision in small eigenvalues.
Scalar considerAsZero = numext::maxi<Scalar>( norm * numext::abs2(NumTraits<Scalar>::epsilon()),
(std::numeric_limits<Scalar>::min)() );
if(norm!=Scalar(0))
{
while (iu >= 0)
{
Index il = findSmallSubdiagEntry(iu,considerAsZero);
// Check for convergence
if (il == iu) // One root found
{
m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift;
if (iu > 0)
m_matT.coeffRef(iu, iu-1) = Scalar(0);
iu--;
iter = 0;
}
else if (il == iu-1) // Two roots found
{
splitOffTwoRows(iu, computeU, exshift);
iu -= 2;
iter = 0;
}
else // No convergence yet
{
// The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
Vector3s firstHouseholderVector = Vector3s::Zero(), shiftInfo;
computeShift(iu, iter, exshift, shiftInfo);
iter = iter + 1;
totalIter = totalIter + 1;
if (totalIter > maxIters) break;
Index im;
initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
}
}
}
if(totalIter <= maxIters)
m_info = Success;
else
m_info = NoConvergence;
m_isInitialized = true;
m_matUisUptodate = computeU;
return *this;
}
/** \internal Computes and returns vector L1 norm of T */
template<typename MatrixType>
inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
{
const Index size = m_matT.cols();
// FIXME to be efficient the following would requires a triangular reduxion code
// Scalar norm = m_matT.upper().cwiseAbs().sum()
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
Scalar norm(0);
for (Index j = 0; j < size; ++j)
norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
return norm;
}
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
inline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& considerAsZero)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
s = numext::maxi<Scalar>(s * NumTraits<Scalar>::epsilon(), considerAsZero);
if (abs(m_matT.coeff(res,res-1)) <= s)
break;
res--;
}
return res;
}
/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift)
{
using std::sqrt;
using std::abs;
const Index size = m_matT.cols();
// The eigenvalues of the 2x2 matrix [a b; c d] are
// trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu));
Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); // q = tr^2 / 4 - det = discr/4
m_matT.coeffRef(iu,iu) += exshift;
m_matT.coeffRef(iu-1,iu-1) += exshift;
if (q >= Scalar(0)) // Two real eigenvalues
{
Scalar z = sqrt(abs(q));
JacobiRotation<Scalar> rot;
if (p >= Scalar(0))
rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
else
rot.makeGivens(p - z, m_matT.coeff(iu, iu-1));
m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());
m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);
m_matT.coeffRef(iu, iu-1) = Scalar(0);
if (computeU)
m_matU.applyOnTheRight(iu-1, iu, rot);
}
if (iu > 1)
m_matT.coeffRef(iu-1, iu-2) = Scalar(0);
}
/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
{
using std::sqrt;
using std::abs;
shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
// Wilkinson's original ad hoc shift
if (iter == 10)
{
exshift += shiftInfo.coeff(0);
for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2));
shiftInfo.coeffRef(0) = Scalar(0.75) * s;
shiftInfo.coeffRef(1) = Scalar(0.75) * s;
shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30)
{
Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
s = s * s + shiftInfo.coeff(2);
if (s > Scalar(0))
{
s = sqrt(s);
if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
s = -s;
s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
exshift += s;
for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= s;
shiftInfo.setConstant(Scalar(0.964));
}
}
}
/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
{
using std::abs;
Vector3s& v = firstHouseholderVector; // alias to save typing
for (im = iu-2; im >= il; --im)
{
const Scalar Tmm = m_matT.coeff(im,im);
const Scalar r = shiftInfo.coeff(0) - Tmm;
const Scalar s = shiftInfo.coeff(1) - Tmm;
v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1);
v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s;
v.coeffRef(2) = m_matT.coeff(im+2,im+1);
if (im == il) {
break;
}
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
break;
}
}
/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
template<typename MatrixType>
inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
{
eigen_assert(im >= il);
eigen_assert(im <= iu-2);
const Index size = m_matT.cols();
for (Index k = im; k <= iu-2; ++k)
{
bool firstIteration = (k == im);
Vector3s v;
if (firstIteration)
v = firstHouseholderVector;
else
v = m_matT.template block<3,1>(k,k-1);
Scalar tau, beta;
Matrix<Scalar, 2, 1> ess;
v.makeHouseholder(ess, tau, beta);
if (beta != Scalar(0)) // if v is not zero
{
if (firstIteration && k > il)
m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);
else if (!firstIteration)
m_matT.coeffRef(k,k-1) = beta;
// These Householder transformations form the O(n^3) part of the algorithm
m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
if (computeU)
m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
}
}
Matrix<Scalar, 2, 1> v = m_matT.template block<2,1>(iu-1, iu-2);
Scalar tau, beta;
Matrix<Scalar, 1, 1> ess;
v.makeHouseholder(ess, tau, beta);
if (beta != Scalar(0)) // if v is not zero
{
m_matT.coeffRef(iu-1, iu-2) = beta;
m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);
m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);
if (computeU)
m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
}
// clean up pollution due to round-off errors
for (Index i = im+2; i <= iu; ++i)
{
m_matT.coeffRef(i,i-2) = Scalar(0);
if (i > im+2)
m_matT.coeffRef(i,i-3) = Scalar(0);
}
}
} // end namespace Eigen
#endif // EIGEN_REAL_SCHUR_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/HessenbergDecomposition.h
|
.h
| 14,351
| 375
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_HESSENBERGDECOMPOSITION_H
#define EIGEN_HESSENBERGDECOMPOSITION_H
namespace Eigen {
namespace internal {
template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType;
template<typename MatrixType>
struct traits<HessenbergDecompositionMatrixHReturnType<MatrixType> >
{
typedef MatrixType ReturnType;
};
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class HessenbergDecomposition
*
* \brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation
*
* \tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
*
* This class performs an Hessenberg decomposition of a matrix \f$ A \f$. In
* the real case, the Hessenberg decomposition consists of an orthogonal
* matrix \f$ Q \f$ and a Hessenberg matrix \f$ H \f$ such that \f$ A = Q H
* Q^T \f$. An orthogonal matrix is a matrix whose inverse equals its
* transpose (\f$ Q^{-1} = Q^T \f$). A Hessenberg matrix has zeros below the
* subdiagonal, so it is almost upper triangular. The Hessenberg decomposition
* of a complex matrix is \f$ A = Q H Q^* \f$ with \f$ Q \f$ unitary (that is,
* \f$ Q^{-1} = Q^* \f$).
*
* Call the function compute() to compute the Hessenberg decomposition of a
* given matrix. Alternatively, you can use the
* HessenbergDecomposition(const MatrixType&) constructor which computes the
* Hessenberg decomposition at construction time. Once the decomposition is
* computed, you can use the matrixH() and matrixQ() functions to construct
* the matrices H and Q in the decomposition.
*
* The documentation for matrixH() contains an example of the typical use of
* this class.
*
* \sa class ComplexSchur, class Tridiagonalization, \ref QR_Module "QR Module"
*/
template<typename _MatrixType> class HessenbergDecomposition
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
Size = MatrixType::RowsAtCompileTime,
SizeMinusOne = Size == Dynamic ? Dynamic : Size - 1,
Options = MatrixType::Options,
MaxSize = MatrixType::MaxRowsAtCompileTime,
MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Type for vector of Householder coefficients.
*
* This is column vector with entries of type #Scalar. The length of the
* vector is one less than the size of #MatrixType, if it is a fixed-side
* type.
*/
typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
/** \brief Return type of matrixQ() */
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;
/** \brief Default constructor; the decomposition will be computed later.
*
* \param [in] size The size of the matrix whose Hessenberg decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size),
m_temp(size),
m_isInitialized(false)
{
if(size>1)
m_hCoeffs.resize(size-1);
}
/** \brief Constructor; computes Hessenberg decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
*
* This constructor calls compute() to compute the Hessenberg
* decomposition.
*
* \sa matrixH() for an example.
*/
template<typename InputType>
explicit HessenbergDecomposition(const EigenBase<InputType>& matrix)
: m_matrix(matrix.derived()),
m_temp(matrix.rows()),
m_isInitialized(false)
{
if(matrix.rows()<2)
{
m_isInitialized = true;
return;
}
m_hCoeffs.resize(matrix.rows()-1,1);
_compute(m_matrix, m_hCoeffs, m_temp);
m_isInitialized = true;
}
/** \brief Computes Hessenberg decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
* \returns Reference to \c *this
*
* The Hessenberg decomposition is computed by bringing the columns of the
* matrix successively in the required form using Householder reflections
* (see, e.g., Algorithm 7.4.2 in Golub \& Van Loan, <i>%Matrix
* Computations</i>). The cost is \f$ 10n^3/3 \f$ flops, where \f$ n \f$
* denotes the size of the given matrix.
*
* This method reuses of the allocated data in the HessenbergDecomposition
* object.
*
* Example: \include HessenbergDecomposition_compute.cpp
* Output: \verbinclude HessenbergDecomposition_compute.out
*/
template<typename InputType>
HessenbergDecomposition& compute(const EigenBase<InputType>& matrix)
{
m_matrix = matrix.derived();
if(matrix.rows()<2)
{
m_isInitialized = true;
return *this;
}
m_hCoeffs.resize(matrix.rows()-1,1);
_compute(m_matrix, m_hCoeffs, m_temp);
m_isInitialized = true;
return *this;
}
/** \brief Returns the Householder coefficients.
*
* \returns a const reference to the vector of Householder coefficients
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The Householder coefficients allow the reconstruction of the matrix
* \f$ Q \f$ in the Hessenberg decomposition from the packed data.
*
* \sa packedMatrix(), \ref Householder_Module "Householder module"
*/
const CoeffVectorType& householderCoefficients() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return m_hCoeffs;
}
/** \brief Returns the internal representation of the decomposition
*
* \returns a const reference to a matrix with the internal representation
* of the decomposition.
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The returned matrix contains the following information:
* - the upper part and lower sub-diagonal represent the Hessenberg matrix H
* - the rest of the lower part contains the Householder vectors that, combined with
* Householder coefficients returned by householderCoefficients(),
* allows to reconstruct the matrix Q as
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
* Here, the matrices \f$ H_i \f$ are the Householder transformations
* \f$ H_i = (I - h_i v_i v_i^T) \f$
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
* \f$ v_i \f$ is the Householder vector defined by
* \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
* with M the matrix returned by this function.
*
* See LAPACK for further details on this packed storage.
*
* Example: \include HessenbergDecomposition_packedMatrix.cpp
* Output: \verbinclude HessenbergDecomposition_packedMatrix.out
*
* \sa householderCoefficients()
*/
const MatrixType& packedMatrix() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return m_matrix;
}
/** \brief Reconstructs the orthogonal matrix Q in the decomposition
*
* \returns object representing the matrix Q
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* This function returns a light-weight object of template class
* HouseholderSequence. You can either apply it directly to a matrix or
* you can convert it to a matrix of type #MatrixType.
*
* \sa matrixH() for an example, class HouseholderSequence
*/
HouseholderSequenceType matrixQ() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
.setLength(m_matrix.rows() - 1)
.setShift(1);
}
/** \brief Constructs the Hessenberg matrix H in the decomposition
*
* \returns expression object representing the matrix H
*
* \pre Either the constructor HessenbergDecomposition(const MatrixType&)
* or the member function compute(const MatrixType&) has been called
* before to compute the Hessenberg decomposition of a matrix.
*
* The object returned by this function constructs the Hessenberg matrix H
* when it is assigned to a matrix or otherwise evaluated. The matrix H is
* constructed from the packed matrix as returned by packedMatrix(): The
* upper part (including the subdiagonal) of the packed matrix contains
* the matrix H. It may sometimes be better to directly use the packed
* matrix instead of constructing the matrix H.
*
* Example: \include HessenbergDecomposition_matrixH.cpp
* Output: \verbinclude HessenbergDecomposition_matrixH.out
*
* \sa matrixQ(), packedMatrix()
*/
MatrixHReturnType matrixH() const
{
eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
return MatrixHReturnType(*this);
}
private:
typedef Matrix<Scalar, 1, Size, Options | RowMajor, 1, MaxSize> VectorType;
typedef typename NumTraits<Scalar>::Real RealScalar;
static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp);
protected:
MatrixType m_matrix;
CoeffVectorType m_hCoeffs;
VectorType m_temp;
bool m_isInitialized;
};
/** \internal
* Performs a tridiagonal decomposition of \a matA in place.
*
* \param matA the input selfadjoint matrix
* \param hCoeffs returned Householder coefficients
*
* The result is written in the lower triangular part of \a matA.
*
* Implemented from Golub's "%Matrix Computations", algorithm 8.3.1.
*
* \sa packedMatrix()
*/
template<typename MatrixType>
void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
{
eigen_assert(matA.rows()==matA.cols());
Index n = matA.rows();
temp.resize(n);
for (Index i = 0; i<n-1; ++i)
{
// let's consider the vector v = i-th column starting at position i+1
Index remainingSize = n-i-1;
RealScalar beta;
Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
matA.col(i).coeffRef(i+1) = beta;
hCoeffs.coeffRef(i) = h;
// Apply similarity transformation to remaining columns,
// i.e., compute A = H A H'
// A = H A
matA.bottomRightCorner(remainingSize, remainingSize)
.applyHouseholderOnTheLeft(matA.col(i).tail(remainingSize-1), h, &temp.coeffRef(0));
// A = A H'
matA.rightCols(remainingSize)
.applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
}
}
namespace internal {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \brief Expression type for return value of HessenbergDecomposition::matrixH()
*
* \tparam MatrixType type of matrix in the Hessenberg decomposition
*
* Objects of this type represent the Hessenberg matrix in the Hessenberg
* decomposition of some matrix. The object holds a reference to the
* HessenbergDecomposition class until the it is assigned or evaluated for
* some other reason (the reference should remain valid during the life time
* of this object). This class is the return type of
* HessenbergDecomposition::matrixH(); there is probably no other use for this
* class.
*/
template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >
{
public:
/** \brief Constructor.
*
* \param[in] hess Hessenberg decomposition
*/
HessenbergDecompositionMatrixHReturnType(const HessenbergDecomposition<MatrixType>& hess) : m_hess(hess) { }
/** \brief Hessenberg matrix in decomposition.
*
* \param[out] result Hessenberg matrix in decomposition \p hess which
* was passed to the constructor
*/
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
result = m_hess.packedMatrix();
Index n = result.rows();
if (n>2)
result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero();
}
Index rows() const { return m_hess.packedMatrix().rows(); }
Index cols() const { return m_hess.packedMatrix().cols(); }
protected:
const HessenbergDecomposition<MatrixType>& m_hess;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_HESSENBERGDECOMPOSITION_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/Tridiagonalization.h
|
.h
| 22,444
| 557
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TRIDIAGONALIZATION_H
#define EIGEN_TRIDIAGONALIZATION_H
namespace Eigen {
namespace internal {
template<typename MatrixType> struct TridiagonalizationMatrixTReturnType;
template<typename MatrixType>
struct traits<TridiagonalizationMatrixTReturnType<MatrixType> >
: public traits<typename MatrixType::PlainObject>
{
typedef typename MatrixType::PlainObject ReturnType; // FIXME shall it be a BandMatrix?
enum { Flags = 0 };
};
template<typename MatrixType, typename CoeffVectorType>
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs);
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class Tridiagonalization
*
* \brief Tridiagonal decomposition of a selfadjoint matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* tridiagonal decomposition; this is expected to be an instantiation of the
* Matrix class template.
*
* This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that:
* \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix.
*
* A tridiagonal matrix is a matrix which has nonzero elements only on the
* main diagonal and the first diagonal below and above it. The Hessenberg
* decomposition of a selfadjoint matrix is in fact a tridiagonal
* decomposition. This class is used in SelfAdjointEigenSolver to compute the
* eigenvalues and eigenvectors of a selfadjoint matrix.
*
* Call the function compute() to compute the tridiagonal decomposition of a
* given matrix. Alternatively, you can use the Tridiagonalization(const MatrixType&)
* constructor which computes the tridiagonal Schur decomposition at
* construction time. Once the decomposition is computed, you can use the
* matrixQ() and matrixT() functions to retrieve the matrices Q and T in the
* decomposition.
*
* The documentation of Tridiagonalization(const MatrixType&) contains an
* example of the typical use of this class.
*
* \sa class HessenbergDecomposition, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class Tridiagonalization
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
Size = MatrixType::RowsAtCompileTime,
SizeMinusOne = Size == Dynamic ? Dynamic : (Size > 1 ? Size - 1 : 1),
Options = MatrixType::Options,
MaxSize = MatrixType::MaxRowsAtCompileTime,
MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : (MaxSize > 1 ? MaxSize - 1 : 1)
};
typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type DiagonalType;
typedef Matrix<RealScalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> SubDiagonalType;
typedef typename internal::remove_all<typename MatrixType::RealReturnType>::type MatrixTypeRealView;
typedef internal::TridiagonalizationMatrixTReturnType<MatrixTypeRealView> MatrixTReturnType;
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
typename internal::add_const_on_value_type<typename Diagonal<const MatrixType>::RealReturnType>::type,
const Diagonal<const MatrixType>
>::type DiagonalReturnType;
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
typename internal::add_const_on_value_type<typename Diagonal<const MatrixType, -1>::RealReturnType>::type,
const Diagonal<const MatrixType, -1>
>::type SubDiagonalReturnType;
/** \brief Return type of matrixQ() */
typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose tridiagonal
* decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)
: m_matrix(size,size),
m_hCoeffs(size > 1 ? size-1 : 1),
m_isInitialized(false)
{}
/** \brief Constructor; computes tridiagonal decomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
* is to be computed.
*
* This constructor calls compute() to compute the tridiagonal decomposition.
*
* Example: \include Tridiagonalization_Tridiagonalization_MatrixType.cpp
* Output: \verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out
*/
template<typename InputType>
explicit Tridiagonalization(const EigenBase<InputType>& matrix)
: m_matrix(matrix.derived()),
m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1),
m_isInitialized(false)
{
internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
m_isInitialized = true;
}
/** \brief Computes tridiagonal decomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
* is to be computed.
* \returns Reference to \c *this
*
* The tridiagonal decomposition is computed by bringing the columns of
* the matrix successively in the required form using Householder
* reflections. The cost is \f$ 4n^3/3 \f$ flops, where \f$ n \f$ denotes
* the size of the given matrix.
*
* This method reuses of the allocated data in the Tridiagonalization
* object, if the size of the matrix does not change.
*
* Example: \include Tridiagonalization_compute.cpp
* Output: \verbinclude Tridiagonalization_compute.out
*/
template<typename InputType>
Tridiagonalization& compute(const EigenBase<InputType>& matrix)
{
m_matrix = matrix.derived();
m_hCoeffs.resize(matrix.rows()-1, 1);
internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
m_isInitialized = true;
return *this;
}
/** \brief Returns the Householder coefficients.
*
* \returns a const reference to the vector of Householder coefficients
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* The Householder coefficients allow the reconstruction of the matrix
* \f$ Q \f$ in the tridiagonal decomposition from the packed data.
*
* Example: \include Tridiagonalization_householderCoefficients.cpp
* Output: \verbinclude Tridiagonalization_householderCoefficients.out
*
* \sa packedMatrix(), \ref Householder_Module "Householder module"
*/
inline CoeffVectorType householderCoefficients() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_hCoeffs;
}
/** \brief Returns the internal representation of the decomposition
*
* \returns a const reference to a matrix with the internal representation
* of the decomposition.
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* The returned matrix contains the following information:
* - the strict upper triangular part is equal to the input matrix A.
* - the diagonal and lower sub-diagonal represent the real tridiagonal
* symmetric matrix T.
* - the rest of the lower part contains the Householder vectors that,
* combined with Householder coefficients returned by
* householderCoefficients(), allows to reconstruct the matrix Q as
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
* Here, the matrices \f$ H_i \f$ are the Householder transformations
* \f$ H_i = (I - h_i v_i v_i^T) \f$
* where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
* \f$ v_i \f$ is the Householder vector defined by
* \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
* with M the matrix returned by this function.
*
* See LAPACK for further details on this packed storage.
*
* Example: \include Tridiagonalization_packedMatrix.cpp
* Output: \verbinclude Tridiagonalization_packedMatrix.out
*
* \sa householderCoefficients()
*/
inline const MatrixType& packedMatrix() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_matrix;
}
/** \brief Returns the unitary matrix Q in the decomposition
*
* \returns object representing the matrix Q
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* This function returns a light-weight object of template class
* HouseholderSequence. You can either apply it directly to a matrix or
* you can convert it to a matrix of type #MatrixType.
*
* \sa Tridiagonalization(const MatrixType&) for an example,
* matrixT(), class HouseholderSequence
*/
HouseholderSequenceType matrixQ() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
.setLength(m_matrix.rows() - 1)
.setShift(1);
}
/** \brief Returns an expression of the tridiagonal matrix T in the decomposition
*
* \returns expression object representing the matrix T
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* Currently, this function can be used to extract the matrix T from internal
* data and copy it to a dense matrix object. In most cases, it may be
* sufficient to directly use the packed matrix or the vector expressions
* returned by diagonal() and subDiagonal() instead of creating a new
* dense copy matrix with this function.
*
* \sa Tridiagonalization(const MatrixType&) for an example,
* matrixQ(), packedMatrix(), diagonal(), subDiagonal()
*/
MatrixTReturnType matrixT() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return MatrixTReturnType(m_matrix.real());
}
/** \brief Returns the diagonal of the tridiagonal matrix T in the decomposition.
*
* \returns expression representing the diagonal of T
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* Example: \include Tridiagonalization_diagonal.cpp
* Output: \verbinclude Tridiagonalization_diagonal.out
*
* \sa matrixT(), subDiagonal()
*/
DiagonalReturnType diagonal() const;
/** \brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition.
*
* \returns expression representing the subdiagonal of T
*
* \pre Either the constructor Tridiagonalization(const MatrixType&) or
* the member function compute(const MatrixType&) has been called before
* to compute the tridiagonal decomposition of a matrix.
*
* \sa diagonal() for an example, matrixT()
*/
SubDiagonalReturnType subDiagonal() const;
protected:
MatrixType m_matrix;
CoeffVectorType m_hCoeffs;
bool m_isInitialized;
};
template<typename MatrixType>
typename Tridiagonalization<MatrixType>::DiagonalReturnType
Tridiagonalization<MatrixType>::diagonal() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_matrix.diagonal().real();
}
template<typename MatrixType>
typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
Tridiagonalization<MatrixType>::subDiagonal() const
{
eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
return m_matrix.template diagonal<-1>().real();
}
namespace internal {
/** \internal
* Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place.
*
* \param[in,out] matA On input the selfadjoint matrix. Only the \b lower triangular part is referenced.
* On output, the strict upper part is left unchanged, and the lower triangular part
* represents the T and Q matrices in packed format has detailed below.
* \param[out] hCoeffs returned Householder coefficients (see below)
*
* On output, the tridiagonal selfadjoint matrix T is stored in the diagonal
* and lower sub-diagonal of the matrix \a matA.
* The unitary matrix Q is represented in a compact way as a product of
* Householder reflectors \f$ H_i \f$ such that:
* \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
* The Householder reflectors are defined as
* \f$ H_i = (I - h_i v_i v_i^T) \f$
* where \f$ h_i = hCoeffs[i]\f$ is the \f$ i \f$th Householder coefficient and
* \f$ v_i \f$ is the Householder vector defined by
* \f$ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T \f$.
*
* Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
*
* \sa Tridiagonalization::packedMatrix()
*/
template<typename MatrixType, typename CoeffVectorType>
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
{
using numext::conj;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
Index n = matA.rows();
eigen_assert(n==matA.cols());
eigen_assert(n==hCoeffs.size()+1 || n==1);
for (Index i = 0; i<n-1; ++i)
{
Index remainingSize = n-i-1;
RealScalar beta;
Scalar h;
matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
// Apply similarity transformation to remaining columns,
// i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
matA.col(i).coeffRef(i+1) = 1;
hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()
* (conj(h) * matA.col(i).tail(remainingSize)));
hCoeffs.tail(n-i-1) += (conj(h)*RealScalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()
.rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), Scalar(-1));
matA.col(i).coeffRef(i+1) = beta;
hCoeffs.coeffRef(i) = h;
}
}
// forward declaration, implementation at the end of this file
template<typename MatrixType,
int Size=MatrixType::ColsAtCompileTime,
bool IsComplex=NumTraits<typename MatrixType::Scalar>::IsComplex>
struct tridiagonalization_inplace_selector;
/** \brief Performs a full tridiagonalization in place
*
* \param[in,out] mat On input, the selfadjoint matrix whose tridiagonal
* decomposition is to be computed. Only the lower triangular part referenced.
* The rest is left unchanged. On output, the orthogonal matrix Q
* in the decomposition if \p extractQ is true.
* \param[out] diag The diagonal of the tridiagonal matrix T in the
* decomposition.
* \param[out] subdiag The subdiagonal of the tridiagonal matrix T in
* the decomposition.
* \param[in] extractQ If true, the orthogonal matrix Q in the
* decomposition is computed and stored in \p mat.
*
* Computes the tridiagonal decomposition of the selfadjoint matrix \p mat in place
* such that \f$ mat = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real
* symmetric tridiagonal matrix.
*
* The tridiagonal matrix T is passed to the output parameters \p diag and \p subdiag. If
* \p extractQ is true, then the orthogonal matrix Q is passed to \p mat. Otherwise the lower
* part of the matrix \p mat is destroyed.
*
* The vectors \p diag and \p subdiag are not resized. The function
* assumes that they are already of the correct size. The length of the
* vector \p diag should equal the number of rows in \p mat, and the
* length of the vector \p subdiag should be one left.
*
* This implementation contains an optimized path for 3-by-3 matrices
* which is especially useful for plane fitting.
*
* \note Currently, it requires two temporary vectors to hold the intermediate
* Householder coefficients, and to reconstruct the matrix Q from the Householder
* reflectors.
*
* Example (this uses the same matrix as the example in
* Tridiagonalization::Tridiagonalization(const MatrixType&)):
* \include Tridiagonalization_decomposeInPlace.cpp
* Output: \verbinclude Tridiagonalization_decomposeInPlace.out
*
* \sa class Tridiagonalization
*/
template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
}
/** \internal
* General full tridiagonalization
*/
template<typename MatrixType, int Size, bool IsComplex>
struct tridiagonalization_inplace_selector
{
typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
CoeffVectorType hCoeffs(mat.cols()-1);
tridiagonalization_inplace(mat,hCoeffs);
diag = mat.diagonal().real();
subdiag = mat.template diagonal<-1>().real();
if(extractQ)
mat = HouseholderSequenceType(mat, hCoeffs.conjugate())
.setLength(mat.rows() - 1)
.setShift(1);
}
};
/** \internal
* Specialization for 3x3 real matrices.
* Especially useful for plane fitting.
*/
template<typename MatrixType>
struct tridiagonalization_inplace_selector<MatrixType,3,false>
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
using std::sqrt;
const RealScalar tol = (std::numeric_limits<RealScalar>::min)();
diag[0] = mat(0,0);
RealScalar v1norm2 = numext::abs2(mat(2,0));
if(v1norm2 <= tol)
{
diag[1] = mat(1,1);
diag[2] = mat(2,2);
subdiag[0] = mat(1,0);
subdiag[1] = mat(2,1);
if (extractQ)
mat.setIdentity();
}
else
{
RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2);
RealScalar invBeta = RealScalar(1)/beta;
Scalar m01 = mat(1,0) * invBeta;
Scalar m02 = mat(2,0) * invBeta;
Scalar q = RealScalar(2)*m01*mat(2,1) + m02*(mat(2,2) - mat(1,1));
diag[1] = mat(1,1) + m02*q;
diag[2] = mat(2,2) - m02*q;
subdiag[0] = beta;
subdiag[1] = mat(2,1) - m01 * q;
if (extractQ)
{
mat << 1, 0, 0,
0, m01, m02,
0, m02, -m01;
}
}
}
};
/** \internal
* Trivial specialization for 1x1 matrices
*/
template<typename MatrixType, bool IsComplex>
struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
{
typedef typename MatrixType::Scalar Scalar;
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
{
diag(0,0) = numext::real(mat(0,0));
if(extractQ)
mat(0,0) = Scalar(1);
}
};
/** \internal
* \eigenvalues_module \ingroup Eigenvalues_Module
*
* \brief Expression type for return value of Tridiagonalization::matrixT()
*
* \tparam MatrixType type of underlying dense matrix
*/
template<typename MatrixType> struct TridiagonalizationMatrixTReturnType
: public ReturnByValue<TridiagonalizationMatrixTReturnType<MatrixType> >
{
public:
/** \brief Constructor.
*
* \param[in] mat The underlying dense matrix
*/
TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { }
template <typename ResultType>
inline void evalTo(ResultType& result) const
{
result.setZero();
result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate();
result.diagonal() = m_matrix.diagonal();
result.template diagonal<-1>() = m_matrix.template diagonal<-1>();
}
Index rows() const { return m_matrix.rows(); }
Index cols() const { return m_matrix.cols(); }
protected:
typename MatrixType::Nested m_matrix;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_TRIDIAGONALIZATION_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
|
.h
| 33,705
| 872
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
#define EIGEN_SELFADJOINTEIGENSOLVER_H
#include "./Tridiagonalization.h"
namespace Eigen {
template<typename _MatrixType>
class GeneralizedSelfAdjointEigenSolver;
namespace internal {
template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
template<typename MatrixType, typename DiagType, typename SubDiagType>
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class SelfAdjointEigenSolver
*
* \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* eigendecomposition; this is expected to be an instantiation of the Matrix
* class template.
*
* A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
* matrices, this means that the matrix is symmetric: it equals its
* transpose. This class computes the eigenvalues and eigenvectors of a
* selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
* \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a
* selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
* the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
* eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
* matrices, the matrix \f$ V \f$ is always invertible). This is called the
* eigendecomposition.
*
* The algorithm exploits the fact that the matrix is selfadjoint, making it
* faster and more accurate than the general purpose eigenvalue algorithms
* implemented in EigenSolver and ComplexEigenSolver.
*
* Only the \b lower \b triangular \b part of the input matrix is referenced.
*
* Call the function compute() to compute the eigenvalues and eigenvectors of
* a given matrix. Alternatively, you can use the
* SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
* the eigenvalues and eigenvectors at construction time. Once the eigenvalue
* and eigenvectors are computed, they can be retrieved with the eigenvalues()
* and eigenvectors() functions.
*
* The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
* contains an example of the typical use of this class.
*
* To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
* the likes, see the class GeneralizedSelfAdjointEigenSolver.
*
* \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class SelfAdjointEigenSolver
{
public:
typedef _MatrixType MatrixType;
enum {
Size = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
/** \brief Real scalar type for \p _MatrixType.
*
* This is just \c Scalar if #Scalar is real (e.g., \c float or
* \c double), and the type of the real part of \c Scalar if #Scalar is
* complex.
*/
typedef typename NumTraits<Scalar>::Real RealScalar;
friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>;
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
*
* This is a column vector with entries of type #RealScalar.
* The length of the vector is the size of \p _MatrixType.
*/
typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
typedef Tridiagonalization<MatrixType> TridiagonalizationType;
typedef typename TridiagonalizationType::SubDiagonalType SubDiagonalType;
/** \brief Default constructor for fixed-size matrices.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). This constructor
* can only be used if \p _MatrixType is a fixed-size matrix; use
* SelfAdjointEigenSolver(Index) for dynamic-size matrices.
*
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
*/
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver()
: m_eivec(),
m_eivalues(),
m_subdiag(),
m_isInitialized(false)
{ }
/** \brief Constructor, pre-allocates memory for dynamic-size matrices.
*
* \param [in] size Positive integer, size of the matrix whose
* eigenvalues and eigenvectors will be computed.
*
* This constructor is useful for dynamic-size matrices, when the user
* intends to perform decompositions via compute(). The \p size
* parameter is only used as a hint. It is not an error to give a wrong
* \p size, but it may impair performance.
*
* \sa compute() for an example
*/
EIGEN_DEVICE_FUNC
explicit SelfAdjointEigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_subdiag(size > 1 ? size - 1 : 1),
m_isInitialized(false)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
* be computed. Only the lower triangular part of the matrix is referenced.
* \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
*
* This constructor calls compute(const MatrixType&, int) to compute the
* eigenvalues of the matrix \p matrix. The eigenvectors are computed if
* \p options equals #ComputeEigenvectors.
*
* Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
* Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
*
* \sa compute(const MatrixType&, int)
*/
template<typename InputType>
EIGEN_DEVICE_FUNC
explicit SelfAdjointEigenSolver(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors)
: m_eivec(matrix.rows(), matrix.cols()),
m_eivalues(matrix.cols()),
m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
m_isInitialized(false)
{
compute(matrix.derived(), options);
}
/** \brief Computes eigendecomposition of given matrix.
*
* \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
* be computed. Only the lower triangular part of the matrix is referenced.
* \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of \p matrix. The eigenvalues()
* function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
* then the eigenvectors are also computed and can be retrieved by
* calling eigenvectors().
*
* This implementation uses a symmetric QR algorithm. The matrix is first
* reduced to tridiagonal form using the Tridiagonalization class. The
* tridiagonal matrix is then brought to diagonal form with implicit
* symmetric QR steps with Wilkinson shift. Details can be found in
* Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
*
* The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
* are required and \f$ 4n^3/3 \f$ if they are not required.
*
* This method reuses the memory in the SelfAdjointEigenSolver object that
* was allocated when the object was constructed, if the size of the
* matrix does not change.
*
* Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
* Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
*
* \sa SelfAdjointEigenSolver(const MatrixType&, int)
*/
template<typename InputType>
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver& compute(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors);
/** \brief Computes eigendecomposition of given matrix using a closed-form algorithm
*
* This is a variant of compute(const MatrixType&, int options) which
* directly solves the underlying polynomial equation.
*
* Currently only 2x2 and 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).
*
* This method is usually significantly faster than the QR iterative algorithm
* but it might also be less accurate. It is also worth noting that
* for 3x3 matrices it involves trigonometric operations which are
* not necessarily available for all scalar types.
*
* For the 3x3 case, we observed the following worst case relative error regarding the eigenvalues:
* - double: 1e-8
* - float: 1e-3
*
* \sa compute(const MatrixType&, int options)
*/
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);
/**
*\brief Computes the eigen decomposition from a tridiagonal symmetric matrix
*
* \param[in] diag The vector containing the diagonal of the matrix.
* \param[in] subdiag The subdiagonal of the matrix.
* \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
* \returns Reference to \c *this
*
* This function assumes that the matrix has been reduced to tridiagonal form.
*
* \sa compute(const MatrixType&, int) for more information
*/
SelfAdjointEigenSolver& computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options=ComputeEigenvectors);
/** \brief Returns the eigenvectors of given matrix.
*
* \returns A const reference to the matrix whose columns are the eigenvectors.
*
* \pre The eigenvectors have been computed before.
*
* Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
* to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
* eigenvectors are normalized to have (Euclidean) norm equal to one. If
* this object was used to solve the eigenproblem for the selfadjoint
* matrix \f$ A \f$, then the matrix returned by this function is the
* matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
*
* Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
* Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
*
* \sa eigenvalues()
*/
EIGEN_DEVICE_FUNC
const EigenvectorsType& eigenvectors() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec;
}
/** \brief Returns the eigenvalues of given matrix.
*
* \returns A const reference to the column vector containing the eigenvalues.
*
* \pre The eigenvalues have been computed before.
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix. The eigenvalues
* are sorted in increasing order.
*
* Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
* Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
*
* \sa eigenvectors(), MatrixBase::eigenvalues()
*/
EIGEN_DEVICE_FUNC
const RealVectorType& eigenvalues() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
return m_eivalues;
}
/** \brief Computes the positive-definite square root of the matrix.
*
* \returns the positive-definite square root of the matrix
*
* \pre The eigenvalues and eigenvectors of a positive-definite matrix
* have been computed before.
*
* The square root of a positive-definite matrix \f$ A \f$ is the
* positive-definite matrix whose square equals \f$ A \f$. This function
* uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
* square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
*
* Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
* Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
*
* \sa operatorInverseSqrt(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
*/
EIGEN_DEVICE_FUNC
MatrixType operatorSqrt() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
}
/** \brief Computes the inverse square root of the matrix.
*
* \returns the inverse positive-definite square root of the matrix
*
* \pre The eigenvalues and eigenvectors of a positive-definite matrix
* have been computed before.
*
* This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
* compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
* cheaper than first computing the square root with operatorSqrt() and
* then its inverse with MatrixBase::inverse().
*
* Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
* Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
*
* \sa operatorSqrt(), MatrixBase::inverse(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
*/
EIGEN_DEVICE_FUNC
MatrixType operatorInverseSqrt() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
}
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
EIGEN_DEVICE_FUNC
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
return m_info;
}
/** \brief Maximum number of iterations.
*
* The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
* denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
*/
static const int m_maxIterations = 30;
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
EigenvectorsType m_eivec;
RealVectorType m_eivalues;
typename TridiagonalizationType::SubDiagonalType m_subdiag;
ComputationInfo m_info;
bool m_isInitialized;
bool m_eigenvectorsOk;
};
namespace internal {
/** \internal
*
* \eigenvalues_module \ingroup Eigenvalues_Module
*
* Performs a QR step on a tridiagonal symmetric matrix represented as a
* pair of two vectors \a diag and \a subdiag.
*
* \param diag the diagonal part of the input selfadjoint tridiagonal matrix
* \param subdiag the sub-diagonal part of the input selfadjoint tridiagonal matrix
* \param start starting index of the submatrix to work on
* \param end last+1 index of the submatrix to work on
* \param matrixQ pointer to the column-major matrix holding the eigenvectors, can be 0
* \param n size of the input matrix
*
* For compilation efficiency reasons, this procedure does not use eigen expression
* for its arguments.
*
* Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
* "implicit symmetric QR step with Wilkinson shift"
*/
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
}
template<typename MatrixType>
template<typename InputType>
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::compute(const EigenBase<InputType>& a_matrix, int options)
{
check_template_parameters();
const InputType &matrix(a_matrix.derived());
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
Index n = matrix.cols();
m_eivalues.resize(n,1);
if(n==1)
{
m_eivec = matrix;
m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0));
if(computeEigenvectors)
m_eivec.setOnes(n,n);
m_info = Success;
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
// declare some aliases
RealVectorType& diag = m_eivalues;
EigenvectorsType& mat = m_eivec;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
mat = matrix.template triangularView<Lower>();
RealScalar scale = mat.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
mat.template triangularView<Lower>() /= scale;
m_subdiag.resize(n-1);
internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
// scale back the eigen values
m_eivalues *= scale;
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options)
{
//TODO : Add an option to scale the values beforehand
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
m_eivalues = diag;
m_subdiag = subdiag;
if (computeEigenvectors)
{
m_eivec.setIdentity(diag.size(), diag.size());
}
m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
namespace internal {
/**
* \internal
* \brief Compute the eigendecomposition from a tridiagonal matrix
*
* \param[in,out] diag : On input, the diagonal of the matrix, on output the eigenvalues
* \param[in,out] subdiag : The subdiagonal part of the matrix (entries are modified during the decomposition)
* \param[in] maxIterations : the maximum number of iterations
* \param[in] computeEigenvectors : whether the eigenvectors have to be computed or not
* \param[out] eivec : The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input.
* \returns \c Success or \c NoConvergence
*/
template<typename MatrixType, typename DiagType, typename SubDiagType>
ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
{
using std::abs;
ComputationInfo info;
typedef typename MatrixType::Scalar Scalar;
Index n = diag.size();
Index end = n-1;
Index start = 0;
Index iter = 0; // total number of iterations
typedef typename DiagType::RealScalar RealScalar;
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
while (end>0)
{
for (Index i = start; i<end; ++i)
if (internal::isMuchSmallerThan(abs(subdiag[i]),(abs(diag[i])+abs(diag[i+1])),precision) || abs(subdiag[i]) <= considerAsZero)
subdiag[i] = 0;
// find the largest unreduced block
while (end>0 && subdiag[end-1]==RealScalar(0))
{
end--;
}
if (end<=0)
break;
// if we spent too many iterations, we give up
iter++;
if(iter > maxIterations * n) break;
start = end - 1;
while (start>0 && subdiag[start-1]!=0)
start--;
internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);
}
if (iter <= maxIterations * n)
info = Success;
else
info = NoConvergence;
// Sort eigenvalues and corresponding vectors.
// TODO make the sort optional ?
// TODO use a better sort algorithm !!
if (info == Success)
{
for (Index i = 0; i < n-1; ++i)
{
Index k;
diag.segment(i,n-i).minCoeff(&k);
if (k > 0)
{
std::swap(diag[i], diag[k+i]);
if(computeEigenvectors)
eivec.col(i).swap(eivec.col(k+i));
}
}
}
return info;
}
template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues
{
EIGEN_DEVICE_FUNC
static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)
{ eig.compute(A,options); }
};
template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>
{
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
typedef typename SolverType::EigenvectorsType EigenvectorsType;
/** \internal
* Computes the roots of the characteristic polynomial of \a m.
* For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
*/
EIGEN_DEVICE_FUNC
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
EIGEN_USING_STD_MATH(sqrt)
EIGEN_USING_STD_MATH(atan2)
EIGEN_USING_STD_MATH(cos)
EIGEN_USING_STD_MATH(sin)
const Scalar s_inv3 = Scalar(1)/Scalar(3);
const Scalar s_sqrt3 = sqrt(Scalar(3));
// The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The
// eigenvalues are the roots to this equation, all guaranteed to be
// real-valued, because the matrix is symmetric.
Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);
Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);
Scalar c2 = m(0,0) + m(1,1) + m(2,2);
// Construct the parameters used in classifying the roots of the equation
// and in solving the equation for the roots in closed form.
Scalar c2_over_3 = c2*s_inv3;
Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
a_over_3 = numext::maxi(a_over_3, Scalar(0));
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
q = numext::maxi(q, Scalar(0));
// Compute the eigenvalues by solving for the roots of the polynomial.
Scalar rho = sqrt(a_over_3);
Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
Scalar cos_theta = cos(theta);
Scalar sin_theta = sin(theta);
// roots are already sorted, since cos is monotonically decreasing on [0, pi]
roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
}
EIGEN_DEVICE_FUNC
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
{
EIGEN_USING_STD_MATH(sqrt)
EIGEN_USING_STD_MATH(abs)
Index i0;
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
mat.diagonal().cwiseAbs().maxCoeff(&i0);
// mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
// so let's save it:
representative = mat.col(i0);
Scalar n0, n1;
VectorType c0, c1;
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
if(n0>n1) res = c0/sqrt(n0);
else res = c1/sqrt(n1);
return true;
}
EIGEN_DEVICE_FUNC
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
Scalar shift = mat.trace() / Scalar(3);
// TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
MatrixType scaledMat = mat.template selfadjointView<Lower>();
scaledMat.diagonal().array() -= shift;
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
// compute the eigenvalues
computeRoots(scaledMat,eivals);
// compute the eigenvectors
if(computeEigenvectors)
{
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{
// All three eigenvalues are numerically the same
eivecs.setIdentity();
}
else
{
MatrixType tmp;
tmp = scaledMat;
// Compute the eigenvector of the most distinct eigenvalue
Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0);
Index k(0), l(2);
if(d0 > d1)
{
numext::swap(k,l);
d0 = d1;
}
// Compute the eigenvector of index k
{
tmp.diagonal().array () -= eivals(k);
// By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
}
// Compute eigenvector of index l
if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
{
// If d0 is too small, then the two other eigenvalues are numerically the same,
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
eivecs.col(l).normalize();
}
else
{
tmp = scaledMat;
tmp.diagonal().array () -= eivals(l);
VectorType dummy;
extract_kernel(tmp, eivecs.col(l), dummy);
}
// Compute last eigenvector from the other two
eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
}
}
// Rescale back to the original size.
eivals *= scale;
eivals.array() += shift;
solver.m_info = Success;
solver.m_isInitialized = true;
solver.m_eigenvectorsOk = computeEigenvectors;
}
};
// 2x2 direct eigenvalues decomposition, code from Hauke Heibel
template<typename SolverType>
struct direct_selfadjoint_eigenvalues<SolverType,2,false>
{
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
typedef typename SolverType::EigenvectorsType EigenvectorsType;
EIGEN_DEVICE_FUNC
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
roots(1) = t1 + t0;
}
EIGEN_DEVICE_FUNC
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
EIGEN_USING_STD_MATH(sqrt);
EIGEN_USING_STD_MATH(abs);
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
Scalar shift = mat.trace() / Scalar(2);
MatrixType scaledMat = mat;
scaledMat.coeffRef(0,1) = mat.coeff(1,0);
scaledMat.diagonal().array() -= shift;
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
if(scale > Scalar(0))
scaledMat /= scale;
// Compute the eigenvalues
computeRoots(scaledMat,eivals);
// compute the eigen vectors
if(computeEigenvectors)
{
if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
{
eivecs.setIdentity();
}
else
{
scaledMat.diagonal().array () -= eivals(1);
Scalar a2 = numext::abs2(scaledMat(0,0));
Scalar c2 = numext::abs2(scaledMat(1,1));
Scalar b2 = numext::abs2(scaledMat(1,0));
if(a2>c2)
{
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
eivecs.col(1) /= sqrt(a2+b2);
}
else
{
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
eivecs.col(1) /= sqrt(c2+b2);
}
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
}
}
// Rescale back to the original size.
eivals *= scale;
eivals.array() += shift;
solver.m_info = Success;
solver.m_isInitialized = true;
solver.m_eigenvectorsOk = computeEigenvectors;
}
};
}
template<typename MatrixType>
EIGEN_DEVICE_FUNC
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::computeDirect(const MatrixType& matrix, int options)
{
internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);
return *this;
}
namespace internal {
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
EIGEN_DEVICE_FUNC
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
using std::abs;
RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
RealScalar e = subdiag[end-1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
// RealScalar e2 = numext::abs2(subdiag[end-1]);
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
RealScalar mu = diag[end];
if(td==RealScalar(0))
mu -= abs(e);
else
{
RealScalar e2 = numext::abs2(subdiag[end-1]);
RealScalar h = numext::hypot(td,e);
if(e2==RealScalar(0)) mu -= (e / (td + (td>RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
else mu -= e2 / (td + (td>RealScalar(0) ? h : -h));
}
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
for (Index k = start; k < end; ++k)
{
JacobiRotation<RealScalar> rot;
rot.makeGivens(x, z);
// do T = G' T G
RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
if (k > start)
subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
x = subdiag[k];
if (k < end - 1)
{
z = -rot.s() * subdiag[k+1];
subdiag[k + 1] = rot.c() * subdiag[k+1];
}
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
{
// FIXME if StorageOrder == RowMajor this operation is not very efficient
Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
q.applyOnTheRight(k,k+1,rot);
}
}
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SELFADJOINTEIGENSOLVER_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
|
.h
| 17,176
| 419
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2016 Tobias Wood <tobias@spinicist.org.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H
#define EIGEN_GENERALIZEDEIGENSOLVER_H
#include "./RealQZ.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class GeneralizedEigenSolver
*
* \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices
*
* \tparam _MatrixType the type of the matrices of which we are computing the
* eigen-decomposition; this is expected to be an instantiation of the Matrix
* class template. Currently, only real matrices are supported.
*
* The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars
* \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$. If
* \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
* \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
* B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
* have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition.
*
* The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the
* matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is
* singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$
* and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero,
* then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that:
* \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A = u_i^T B \f$ where \f$ u_i \f$ is
* called the left eigenvector.
*
* Call the function compute() to compute the generalized eigenvalues and eigenvectors of
* a given matrix pair. Alternatively, you can use the
* GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the
* eigenvalues and eigenvectors at construction time. Once the eigenvalue and
* eigenvectors are computed, they can be retrieved with the eigenvalues() and
* eigenvectors() functions.
*
* Here is an usage example of this class:
* Example: \include GeneralizedEigenSolver.cpp
* Output: \verbinclude GeneralizedEigenSolver.out
*
* \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class GeneralizedEigenSolver
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for vector of real scalar values eigenvalues as returned by betas().
*
* This is a column vector with entries of type #Scalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> VectorType;
/** \brief Type for vector of complex scalar values eigenvalues as returned by alphas().
*
* This is a column vector with entries of type #ComplexScalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ComplexVectorType;
/** \brief Expression type for the eigenvalues as returned by eigenvalues().
*/
typedef CwiseBinaryOp<internal::scalar_quotient_op<ComplexScalar,Scalar>,ComplexVectorType,VectorType> EigenvalueType;
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
/** \brief Default constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via EigenSolver::compute(const MatrixType&, bool).
*
* \sa compute() for an example.
*/
GeneralizedEigenSolver()
: m_eivec(),
m_alphas(),
m_betas(),
m_valuesOkay(false),
m_vectorsOkay(false),
m_realQZ()
{}
/** \brief Default constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa GeneralizedEigenSolver()
*/
explicit GeneralizedEigenSolver(Index size)
: m_eivec(size, size),
m_alphas(size),
m_betas(size),
m_valuesOkay(false),
m_vectorsOkay(false),
m_realQZ(size),
m_tmp(size)
{}
/** \brief Constructor; computes the generalized eigendecomposition of given matrix pair.
*
* \param[in] A Square matrix whose eigendecomposition is to be computed.
* \param[in] B Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are computed.
*
* This constructor calls compute() to compute the generalized eigenvalues
* and eigenvectors.
*
* \sa compute()
*/
GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true)
: m_eivec(A.rows(), A.cols()),
m_alphas(A.cols()),
m_betas(A.cols()),
m_valuesOkay(false),
m_vectorsOkay(false),
m_realQZ(A.cols()),
m_tmp(A.cols())
{
compute(A, B, computeEigenvectors);
}
/* \brief Returns the computed generalized eigenvectors.
*
* \returns %Matrix whose columns are the (possibly complex) right eigenvectors.
* i.e. the eigenvectors that solve (A - l*B)x = 0. The ordering matches the eigenvalues.
*
* \pre Either the constructor
* GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function
* compute(const MatrixType&, const MatrixType& bool) has been called before, and
* \p computeEigenvectors was set to true (the default).
*
* \sa eigenvalues()
*/
EigenvectorsType eigenvectors() const {
eigen_assert(m_vectorsOkay && "Eigenvectors for GeneralizedEigenSolver were not calculated.");
return m_eivec;
}
/** \brief Returns an expression of the computed generalized eigenvalues.
*
* \returns An expression of the column vector containing the eigenvalues.
*
* It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode
* Not that betas might contain zeros. It is therefore not recommended to use this function,
* but rather directly deal with the alphas and betas vectors.
*
* \pre Either the constructor
* GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function
* compute(const MatrixType&,const MatrixType&,bool) has been called before.
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix. The eigenvalues
* are not sorted in any particular order.
*
* \sa alphas(), betas(), eigenvectors()
*/
EigenvalueType eigenvalues() const
{
eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized.");
return EigenvalueType(m_alphas,m_betas);
}
/** \returns A const reference to the vectors containing the alpha values
*
* This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
*
* \sa betas(), eigenvalues() */
ComplexVectorType alphas() const
{
eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized.");
return m_alphas;
}
/** \returns A const reference to the vectors containing the beta values
*
* This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
*
* \sa alphas(), eigenvalues() */
VectorType betas() const
{
eigen_assert(m_valuesOkay && "GeneralizedEigenSolver is not initialized.");
return m_betas;
}
/** \brief Computes generalized eigendecomposition of given matrix.
*
* \param[in] A Square matrix whose eigendecomposition is to be computed.
* \param[in] B Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of the real matrix \p matrix.
* The eigenvalues() function can be used to retrieve them. If
* \p computeEigenvectors is true, then the eigenvectors are also computed
* and can be retrieved by calling eigenvectors().
*
* The matrix is first reduced to real generalized Schur form using the RealQZ
* class. The generalized Schur decomposition is then used to compute the eigenvalues
* and eigenvectors.
*
* The cost of the computation is dominated by the cost of the
* generalized Schur decomposition.
*
* This method reuses of the allocated data in the GeneralizedEigenSolver object.
*/
GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true);
ComputationInfo info() const
{
eigen_assert(m_valuesOkay && "EigenSolver is not initialized.");
return m_realQZ.info();
}
/** Sets the maximal number of iterations allowed.
*/
GeneralizedEigenSolver& setMaxIterations(Index maxIters)
{
m_realQZ.setMaxIterations(maxIters);
return *this;
}
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
}
EigenvectorsType m_eivec;
ComplexVectorType m_alphas;
VectorType m_betas;
bool m_valuesOkay, m_vectorsOkay;
RealQZ<MatrixType> m_realQZ;
ComplexVectorType m_tmp;
};
template<typename MatrixType>
GeneralizedEigenSolver<MatrixType>&
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
{
check_template_parameters();
using std::sqrt;
using std::abs;
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
Index size = A.cols();
m_valuesOkay = false;
m_vectorsOkay = false;
// Reduce to generalized real Schur form:
// A = Q S Z and B = Q T Z
m_realQZ.compute(A, B, computeEigenvectors);
if (m_realQZ.info() == Success)
{
// Resize storage
m_alphas.resize(size);
m_betas.resize(size);
if (computeEigenvectors)
{
m_eivec.resize(size,size);
m_tmp.resize(size);
}
// Aliases:
Map<VectorType> v(reinterpret_cast<Scalar*>(m_tmp.data()), size);
ComplexVectorType &cv = m_tmp;
const MatrixType &mS = m_realQZ.matrixS();
const MatrixType &mT = m_realQZ.matrixT();
Index i = 0;
while (i < size)
{
if (i == size - 1 || mS.coeff(i+1, i) == Scalar(0))
{
// Real eigenvalue
m_alphas.coeffRef(i) = mS.diagonal().coeff(i);
m_betas.coeffRef(i) = mT.diagonal().coeff(i);
if (computeEigenvectors)
{
v.setConstant(Scalar(0.0));
v.coeffRef(i) = Scalar(1.0);
// For singular eigenvalues do nothing more
if(abs(m_betas.coeffRef(i)) >= (std::numeric_limits<RealScalar>::min)())
{
// Non-singular eigenvalue
const Scalar alpha = real(m_alphas.coeffRef(i));
const Scalar beta = m_betas.coeffRef(i);
for (Index j = i-1; j >= 0; j--)
{
const Index st = j+1;
const Index sz = i-j;
if (j > 0 && mS.coeff(j, j-1) != Scalar(0))
{
// 2x2 block
Matrix<Scalar, 2, 1> rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( v.segment(st,sz) );
Matrix<Scalar, 2, 2> lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1);
v.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs);
j--;
}
else
{
v.coeffRef(j) = -v.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum() / (beta*mS.coeffRef(j,j) - alpha*mT.coeffRef(j,j));
}
}
}
m_eivec.col(i).real().noalias() = m_realQZ.matrixZ().transpose() * v;
m_eivec.col(i).real().normalize();
m_eivec.col(i).imag().setConstant(0);
}
++i;
}
else
{
// We need to extract the generalized eigenvalues of the pair of a general 2x2 block S and a positive diagonal 2x2 block T
// Then taking beta=T_00*T_11, we can avoid any division, and alpha is the eigenvalues of A = (U^-1 * S * U) * diag(T_11,T_00):
// T = [a 0]
// [0 b]
RealScalar a = mT.diagonal().coeff(i),
b = mT.diagonal().coeff(i+1);
const RealScalar beta = m_betas.coeffRef(i) = m_betas.coeffRef(i+1) = a*b;
// ^^ NOTE: using diagonal()(i) instead of coeff(i,i) workarounds a MSVC bug.
Matrix<RealScalar,2,2> S2 = mS.template block<2,2>(i,i) * Matrix<Scalar,2,1>(b,a).asDiagonal();
Scalar p = Scalar(0.5) * (S2.coeff(0,0) - S2.coeff(1,1));
Scalar z = sqrt(abs(p * p + S2.coeff(1,0) * S2.coeff(0,1)));
const ComplexScalar alpha = ComplexScalar(S2.coeff(1,1) + p, (beta > 0) ? z : -z);
m_alphas.coeffRef(i) = conj(alpha);
m_alphas.coeffRef(i+1) = alpha;
if (computeEigenvectors) {
// Compute eigenvector in position (i+1) and then position (i) is just the conjugate
cv.setZero();
cv.coeffRef(i+1) = Scalar(1.0);
// here, the "static_cast" workaound expression template issues.
cv.coeffRef(i) = -(static_cast<Scalar>(beta*mS.coeffRef(i,i+1)) - alpha*mT.coeffRef(i,i+1))
/ (static_cast<Scalar>(beta*mS.coeffRef(i,i)) - alpha*mT.coeffRef(i,i));
for (Index j = i-1; j >= 0; j--)
{
const Index st = j+1;
const Index sz = i+1-j;
if (j > 0 && mS.coeff(j, j-1) != Scalar(0))
{
// 2x2 block
Matrix<ComplexScalar, 2, 1> rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( cv.segment(st,sz) );
Matrix<ComplexScalar, 2, 2> lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1);
cv.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs);
j--;
} else {
cv.coeffRef(j) = cv.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum()
/ (alpha*mT.coeffRef(j,j) - static_cast<Scalar>(beta*mS.coeffRef(j,j)));
}
}
m_eivec.col(i+1).noalias() = (m_realQZ.matrixZ().transpose() * cv);
m_eivec.col(i+1).normalize();
m_eivec.col(i) = m_eivec.col(i+1).conjugate();
}
i += 2;
}
}
m_valuesOkay = true;
m_vectorsOkay = computeEigenvectors;
}
return *this;
}
} // end namespace Eigen
#endif // EIGEN_GENERALIZEDEIGENSOLVER_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/EigenSolver.h
|
.h
| 22,944
| 623
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_EIGENSOLVER_H
#define EIGEN_EIGENSOLVER_H
#include "./RealSchur.h"
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class EigenSolver
*
* \brief Computes eigenvalues and eigenvectors of general matrices
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* eigendecomposition; this is expected to be an instantiation of the Matrix
* class template. Currently, only real matrices are supported.
*
* The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
* \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$. If
* \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
* \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
* V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
* have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition.
*
* The eigenvalues and eigenvectors of a matrix may be complex, even when the
* matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D
* \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the
* matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to
* have blocks of the form
* \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f]
* (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal. These
* blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call
* this variant of the eigendecomposition the pseudo-eigendecomposition.
*
* Call the function compute() to compute the eigenvalues and eigenvectors of
* a given matrix. Alternatively, you can use the
* EigenSolver(const MatrixType&, bool) constructor which computes the
* eigenvalues and eigenvectors at construction time. Once the eigenvalue and
* eigenvectors are computed, they can be retrieved with the eigenvalues() and
* eigenvectors() functions. The pseudoEigenvalueMatrix() and
* pseudoEigenvectors() methods allow the construction of the
* pseudo-eigendecomposition.
*
* The documentation for EigenSolver(const MatrixType&, bool) contains an
* example of the typical use of this class.
*
* \note The implementation is adapted from
* <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
* Their code is based on EISPACK.
*
* \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
*/
template<typename _MatrixType> class EigenSolver
{
public:
/** \brief Synonym for the template parameter \p _MatrixType. */
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type #MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for #MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for vector of eigenvalues as returned by eigenvalues().
*
* This is a column vector with entries of type #ComplexScalar.
* The length of the vector is the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
/** \brief Type for matrix of eigenvectors as returned by eigenvectors().
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of #MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
/** \brief Default constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via EigenSolver::compute(const MatrixType&, bool).
*
* \sa compute() for an example.
*/
EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
/** \brief Default constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa EigenSolver()
*/
explicit EigenSolver(Index size)
: m_eivec(size, size),
m_eivalues(size),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_realSchur(size),
m_matT(size, size),
m_tmp(size)
{}
/** \brief Constructor; computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
*
* This constructor calls compute() to compute the eigenvalues
* and eigenvectors.
*
* Example: \include EigenSolver_EigenSolver_MatrixType.cpp
* Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out
*
* \sa compute()
*/
template<typename InputType>
explicit EigenSolver(const EigenBase<InputType>& matrix, bool computeEigenvectors = true)
: m_eivec(matrix.rows(), matrix.cols()),
m_eivalues(matrix.cols()),
m_isInitialized(false),
m_eigenvectorsOk(false),
m_realSchur(matrix.cols()),
m_matT(matrix.rows(), matrix.cols()),
m_tmp(matrix.cols())
{
compute(matrix.derived(), computeEigenvectors);
}
/** \brief Returns the eigenvectors of given matrix.
*
* \returns %Matrix whose columns are the (possibly complex) eigenvectors.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before, and
* \p computeEigenvectors was set to true (the default).
*
* Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
* to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
* eigenvectors are normalized to have (Euclidean) norm equal to one. The
* matrix returned by this function is the matrix \f$ V \f$ in the
* eigendecomposition \f$ A = V D V^{-1} \f$, if it exists.
*
* Example: \include EigenSolver_eigenvectors.cpp
* Output: \verbinclude EigenSolver_eigenvectors.out
*
* \sa eigenvalues(), pseudoEigenvectors()
*/
EigenvectorsType eigenvectors() const;
/** \brief Returns the pseudo-eigenvectors of given matrix.
*
* \returns Const reference to matrix whose columns are the pseudo-eigenvectors.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before, and
* \p computeEigenvectors was set to true (the default).
*
* The real matrix \f$ V \f$ returned by this function and the
* block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
* satisfy \f$ AV = VD \f$.
*
* Example: \include EigenSolver_pseudoEigenvectors.cpp
* Output: \verbinclude EigenSolver_pseudoEigenvectors.out
*
* \sa pseudoEigenvalueMatrix(), eigenvectors()
*/
const MatrixType& pseudoEigenvectors() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
return m_eivec;
}
/** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.
*
* \returns A block-diagonal matrix.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before.
*
* The matrix \f$ D \f$ returned by this function is real and
* block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
* blocks of the form
* \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$.
* These blocks are not sorted in any particular order.
* The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by
* pseudoEigenvectors() satisfy \f$ AV = VD \f$.
*
* \sa pseudoEigenvectors() for an example, eigenvalues()
*/
MatrixType pseudoEigenvalueMatrix() const;
/** \brief Returns the eigenvalues of given matrix.
*
* \returns A const reference to the column vector containing the eigenvalues.
*
* \pre Either the constructor
* EigenSolver(const MatrixType&,bool) or the member function
* compute(const MatrixType&, bool) has been called before.
*
* The eigenvalues are repeated according to their algebraic multiplicity,
* so there are as many eigenvalues as rows in the matrix. The eigenvalues
* are not sorted in any particular order.
*
* Example: \include EigenSolver_eigenvalues.cpp
* Output: \verbinclude EigenSolver_eigenvalues.out
*
* \sa eigenvectors(), pseudoEigenvalueMatrix(),
* MatrixBase::eigenvalues()
*/
const EigenvalueType& eigenvalues() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
return m_eivalues;
}
/** \brief Computes eigendecomposition of given matrix.
*
* \param[in] matrix Square matrix whose eigendecomposition is to be computed.
* \param[in] computeEigenvectors If true, both the eigenvectors and the
* eigenvalues are computed; if false, only the eigenvalues are
* computed.
* \returns Reference to \c *this
*
* This function computes the eigenvalues of the real matrix \p matrix.
* The eigenvalues() function can be used to retrieve them. If
* \p computeEigenvectors is true, then the eigenvectors are also computed
* and can be retrieved by calling eigenvectors().
*
* The matrix is first reduced to real Schur form using the RealSchur
* class. The Schur decomposition is then used to compute the eigenvalues
* and eigenvectors.
*
* The cost of the computation is dominated by the cost of the
* Schur decomposition, which is very approximately \f$ 25n^3 \f$
* (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors
* is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false.
*
* This method reuses of the allocated data in the EigenSolver object.
*
* Example: \include EigenSolver_compute.cpp
* Output: \verbinclude EigenSolver_compute.out
*/
template<typename InputType>
EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
/** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
return m_info;
}
/** \brief Sets the maximum number of iterations allowed. */
EigenSolver& setMaxIterations(Index maxIters)
{
m_realSchur.setMaxIterations(maxIters);
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_realSchur.getMaxIterations();
}
private:
void doComputeEigenvectors();
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
}
MatrixType m_eivec;
EigenvalueType m_eivalues;
bool m_isInitialized;
bool m_eigenvectorsOk;
ComputationInfo m_info;
RealSchur<MatrixType> m_realSchur;
MatrixType m_matT;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
ColumnVectorType m_tmp;
};
template<typename MatrixType>
MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
Index n = m_eivalues.rows();
MatrixType matD = MatrixType::Zero(n,n);
for (Index i=0; i<n; ++i)
{
if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i)), precision))
matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i));
else
{
matD.template block<2,2>(i,i) << numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)),
-numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i));
++i;
}
}
return matD;
}
template<typename MatrixType>
typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
{
eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
Index n = m_eivec.cols();
EigenvectorsType matV(n,n);
for (Index j=0; j<n; ++j)
{
if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j)), precision) || j+1==n)
{
// we have a real eigen value
matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
matV.col(j).normalize();
}
else
{
// we have a pair of complex eigen values
for (Index i=0; i<n; ++i)
{
matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
}
matV.col(j).normalize();
matV.col(j+1).normalize();
++j;
}
}
return matV;
}
template<typename MatrixType>
template<typename InputType>
EigenSolver<MatrixType>&
EigenSolver<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeEigenvectors)
{
check_template_parameters();
using std::sqrt;
using std::abs;
using numext::isfinite;
eigen_assert(matrix.cols() == matrix.rows());
// Reduce to real Schur form.
m_realSchur.compute(matrix.derived(), computeEigenvectors);
m_info = m_realSchur.info();
if (m_info == Success)
{
m_matT = m_realSchur.matrixT();
if (computeEigenvectors)
m_eivec = m_realSchur.matrixU();
// Compute eigenvalues from matT
m_eivalues.resize(matrix.cols());
Index i = 0;
while (i < matrix.cols())
{
if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
{
m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
if(!(isfinite)(m_eivalues.coeffRef(i)))
{
m_isInitialized = true;
m_eigenvectorsOk = false;
m_info = NumericalIssue;
return *this;
}
++i;
}
else
{
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
Scalar z;
// Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
// without overflow
{
Scalar t0 = m_matT.coeff(i+1, i);
Scalar t1 = m_matT.coeff(i, i+1);
Scalar maxval = numext::maxi<Scalar>(abs(p),numext::maxi<Scalar>(abs(t0),abs(t1)));
t0 /= maxval;
t1 /= maxval;
Scalar p0 = p/maxval;
z = maxval * sqrt(abs(p0 * p0 + t0 * t1));
}
m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
if(!((isfinite)(m_eivalues.coeffRef(i)) && (isfinite)(m_eivalues.coeffRef(i+1))))
{
m_isInitialized = true;
m_eigenvectorsOk = false;
m_info = NumericalIssue;
return *this;
}
i += 2;
}
}
// Compute eigenvectors.
if (computeEigenvectors)
doComputeEigenvectors();
}
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
}
template<typename MatrixType>
void EigenSolver<MatrixType>::doComputeEigenvectors()
{
using std::abs;
const Index size = m_eivec.cols();
const Scalar eps = NumTraits<Scalar>::epsilon();
// inefficient! this is already computed in RealSchur
Scalar norm(0);
for (Index j = 0; j < size; ++j)
{
norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
}
// Backsubstitute to find vectors of upper triangular form
if (norm == Scalar(0))
{
return;
}
for (Index n = size-1; n >= 0; n--)
{
Scalar p = m_eivalues.coeff(n).real();
Scalar q = m_eivalues.coeff(n).imag();
// Scalar vector
if (q == Scalar(0))
{
Scalar lastr(0), lastw(0);
Index l = n;
m_matT.coeffRef(n,n) = Scalar(1);
for (Index i = n-1; i >= 0; i--)
{
Scalar w = m_matT.coeff(i,i) - p;
Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastr = r;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == Scalar(0))
{
if (w != Scalar(0))
m_matT.coeffRef(i,n) = -r / w;
else
m_matT.coeffRef(i,n) = -r / (eps * norm);
}
else // Solve real equations
{
Scalar x = m_matT.coeff(i,i+1);
Scalar y = m_matT.coeff(i+1,i);
Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
Scalar t = (x * lastr - lastw * r) / denom;
m_matT.coeffRef(i,n) = t;
if (abs(x) > abs(lastw))
m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
else
m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
}
// Overflow control
Scalar t = abs(m_matT.coeff(i,n));
if ((eps * t) * t > Scalar(1))
m_matT.col(n).tail(size-i) /= t;
}
}
}
else if (q < Scalar(0) && n > 0) // Complex vector
{
Scalar lastra(0), lastsa(0), lastw(0);
Index l = n-1;
// Last vector component imaginary so matrix is triangular
if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n)))
{
m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
}
else
{
ComplexScalar cc = ComplexScalar(Scalar(0),-m_matT.coeff(n-1,n)) / ComplexScalar(m_matT.coeff(n-1,n-1)-p,q);
m_matT.coeffRef(n-1,n-1) = numext::real(cc);
m_matT.coeffRef(n-1,n) = numext::imag(cc);
}
m_matT.coeffRef(n,n-1) = Scalar(0);
m_matT.coeffRef(n,n) = Scalar(1);
for (Index i = n-2; i >= 0; i--)
{
Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
Scalar w = m_matT.coeff(i,i) - p;
if (m_eivalues.coeff(i).imag() < Scalar(0))
{
lastw = w;
lastra = ra;
lastsa = sa;
}
else
{
l = i;
if (m_eivalues.coeff(i).imag() == RealScalar(0))
{
ComplexScalar cc = ComplexScalar(-ra,-sa) / ComplexScalar(w,q);
m_matT.coeffRef(i,n-1) = numext::real(cc);
m_matT.coeffRef(i,n) = numext::imag(cc);
}
else
{
// Solve complex equations
Scalar x = m_matT.coeff(i,i+1);
Scalar y = m_matT.coeff(i+1,i);
Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
if ((vr == Scalar(0)) && (vi == Scalar(0)))
vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
ComplexScalar cc = ComplexScalar(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra) / ComplexScalar(vr,vi);
m_matT.coeffRef(i,n-1) = numext::real(cc);
m_matT.coeffRef(i,n) = numext::imag(cc);
if (abs(x) > (abs(lastw) + abs(q)))
{
m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
}
else
{
cc = ComplexScalar(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n)) / ComplexScalar(lastw,q);
m_matT.coeffRef(i+1,n-1) = numext::real(cc);
m_matT.coeffRef(i+1,n) = numext::imag(cc);
}
}
// Overflow control
Scalar t = numext::maxi<Scalar>(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));
if ((eps * t) * t > Scalar(1))
m_matT.block(i, n-1, size-i, 2) /= t;
}
}
// We handled a pair of complex conjugate eigenvalues, so need to skip them both
n--;
}
else
{
eigen_assert(0 && "Internal bug in EigenSolver (INF or NaN has not been detected)"); // this should not happen
}
}
// Back transformation to get eigenvectors of original matrix
for (Index j = size-1; j >= 0; j--)
{
m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
m_eivec.col(j) = m_tmp;
}
}
} // end namespace Eigen
#endif // EIGEN_EIGENSOLVER_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/RealQZ.h
|
.h
| 23,586
| 655
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Alexey Korepanov <kaikaikai@yandex.ru>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REAL_QZ_H
#define EIGEN_REAL_QZ_H
namespace Eigen {
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class RealQZ
*
* \brief Performs a real QZ decomposition of a pair of square matrices
*
* \tparam _MatrixType the type of the matrix of which we are computing the
* real QZ decomposition; this is expected to be an instantiation of the
* Matrix class template.
*
* Given a real square matrices A and B, this class computes the real QZ
* decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are
* real orthogonal matrixes, T is upper-triangular matrix, and S is upper
* quasi-triangular matrix. An orthogonal matrix is a matrix whose
* inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
* matrix is a block-triangular matrix whose diagonal consists of 1-by-1
* blocks and 2-by-2 blocks where further reduction is impossible due to
* complex eigenvalues.
*
* The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from
* 1x1 and 2x2 blocks on the diagonals of S and T.
*
* Call the function compute() to compute the real QZ decomposition of a
* given pair of matrices. Alternatively, you can use the
* RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ)
* constructor which computes the real QZ decomposition at construction
* time. Once the decomposition is computed, you can use the matrixS(),
* matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices
* S, T, Q and Z in the decomposition. If computeQZ==false, some time
* is saved by not computing matrices Q and Z.
*
* Example: \include RealQZ_compute.cpp
* Output: \include RealQZ_compute.out
*
* \note The implementation is based on the algorithm in "Matrix Computations"
* by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for
* generalized eigenvalue problems" by C.B.Moler and G.W.Stewart.
*
* \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class RealQZ
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose QZ decomposition will be computed.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via compute(). The \p size parameter is only
* used as a hint. It is not an error to give a wrong \p size, but it may
* impair performance.
*
* \sa compute() for an example.
*/
explicit RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) :
m_S(size, size),
m_T(size, size),
m_Q(size, size),
m_Z(size, size),
m_workspace(size*2),
m_maxIters(400),
m_isInitialized(false)
{ }
/** \brief Constructor; computes real QZ decomposition of given matrices
*
* \param[in] A Matrix A.
* \param[in] B Matrix B.
* \param[in] computeQZ If false, A and Z are not computed.
*
* This constructor calls compute() to compute the QZ decomposition.
*/
RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) :
m_S(A.rows(),A.cols()),
m_T(A.rows(),A.cols()),
m_Q(A.rows(),A.cols()),
m_Z(A.rows(),A.cols()),
m_workspace(A.rows()*2),
m_maxIters(400),
m_isInitialized(false) {
compute(A, B, computeQZ);
}
/** \brief Returns matrix Q in the QZ decomposition.
*
* \returns A const reference to the matrix Q.
*/
const MatrixType& matrixQ() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
return m_Q;
}
/** \brief Returns matrix Z in the QZ decomposition.
*
* \returns A const reference to the matrix Z.
*/
const MatrixType& matrixZ() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
return m_Z;
}
/** \brief Returns matrix S in the QZ decomposition.
*
* \returns A const reference to the matrix S.
*/
const MatrixType& matrixS() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_S;
}
/** \brief Returns matrix S in the QZ decomposition.
*
* \returns A const reference to the matrix S.
*/
const MatrixType& matrixT() const {
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_T;
}
/** \brief Computes QZ decomposition of given matrix.
*
* \param[in] A Matrix A.
* \param[in] B Matrix B.
* \param[in] computeQZ If false, A and Z are not computed.
* \returns Reference to \c *this
*/
RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_info;
}
/** \brief Returns number of performed QR-like iterations.
*/
Index iterations() const
{
eigen_assert(m_isInitialized && "RealQZ is not initialized.");
return m_global_iter;
}
/** Sets the maximal number of iterations allowed to converge to one eigenvalue
* or decouple the problem.
*/
RealQZ& setMaxIterations(Index maxIters)
{
m_maxIters = maxIters;
return *this;
}
private:
MatrixType m_S, m_T, m_Q, m_Z;
Matrix<Scalar,Dynamic,1> m_workspace;
ComputationInfo m_info;
Index m_maxIters;
bool m_isInitialized;
bool m_computeQZ;
Scalar m_normOfT, m_normOfS;
Index m_global_iter;
typedef Matrix<Scalar,3,1> Vector3s;
typedef Matrix<Scalar,2,1> Vector2s;
typedef Matrix<Scalar,2,2> Matrix2s;
typedef JacobiRotation<Scalar> JRs;
void hessenbergTriangular();
void computeNorms();
Index findSmallSubdiagEntry(Index iu);
Index findSmallDiagEntry(Index f, Index l);
void splitOffTwoRows(Index i);
void pushDownZero(Index z, Index f, Index l);
void step(Index f, Index l, Index iter);
}; // RealQZ
/** \internal Reduces S and T to upper Hessenberg - triangular form */
template<typename MatrixType>
void RealQZ<MatrixType>::hessenbergTriangular()
{
const Index dim = m_S.cols();
// perform QR decomposition of T, overwrite T with R, save Q
HouseholderQR<MatrixType> qrT(m_T);
m_T = qrT.matrixQR();
m_T.template triangularView<StrictlyLower>().setZero();
m_Q = qrT.householderQ();
// overwrite S with Q* S
m_S.applyOnTheLeft(m_Q.adjoint());
// init Z as Identity
if (m_computeQZ)
m_Z = MatrixType::Identity(dim,dim);
// reduce S to upper Hessenberg with Givens rotations
for (Index j=0; j<=dim-3; j++) {
for (Index i=dim-1; i>=j+2; i--) {
JRs G;
// kill S(i,j)
if(m_S.coeff(i,j) != 0)
{
G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j));
m_S.coeffRef(i,j) = Scalar(0.0);
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(i-1,i,G);
}
// kill T(i,i-1)
if(m_T.coeff(i,i-1)!=Scalar(0))
{
G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));
m_T.coeffRef(i,i-1) = Scalar(0.0);
m_S.applyOnTheRight(i,i-1,G);
m_T.topRows(i).applyOnTheRight(i,i-1,G);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
}
}
}
/** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */
template<typename MatrixType>
inline void RealQZ<MatrixType>::computeNorms()
{
const Index size = m_S.cols();
m_normOfS = Scalar(0.0);
m_normOfT = Scalar(0.0);
for (Index j = 0; j < size; ++j)
{
m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum();
}
}
/** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */
template<typename MatrixType>
inline Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res));
if (s == Scalar(0.0))
s = m_normOfS;
if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
break;
res--;
}
return res;
}
/** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */
template<typename MatrixType>
inline Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
{
using std::abs;
Index res = l;
while (res >= f) {
if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT)
break;
res--;
}
return res;
}
/** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */
template<typename MatrixType>
inline void RealQZ<MatrixType>::splitOffTwoRows(Index i)
{
using std::abs;
using std::sqrt;
const Index dim=m_S.cols();
if (abs(m_S.coeff(i+1,i))==Scalar(0))
return;
Index j = findSmallDiagEntry(i,i+1);
if (j==i-1)
{
// block of (S T^{-1})
Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>().
template solve<OnTheRight>(m_S.template block<2,2>(i,i));
Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1));
Scalar q = p*p + STi(1,0)*STi(0,1);
if (q>=0) {
Scalar z = sqrt(q);
// one QR-like iteration for ABi - lambda I
// is enough - when we know exact eigenvalue in advance,
// convergence is immediate
JRs G;
if (p>=0)
G.makeGivens(p + z, STi(1,0));
else
G.makeGivens(p - z, STi(1,0));
m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(i,i+1,G);
G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i));
m_S.topRows(i+2).applyOnTheRight(i+1,i,G);
m_T.topRows(i+2).applyOnTheRight(i+1,i,G);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(i+1,i,G.adjoint());
m_S.coeffRef(i+1,i) = Scalar(0.0);
m_T.coeffRef(i+1,i) = Scalar(0.0);
}
}
else
{
pushDownZero(j,i,i+1);
}
}
/** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */
template<typename MatrixType>
inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l)
{
JRs G;
const Index dim = m_S.cols();
for (Index zz=z; zz<l; zz++)
{
// push 0 down
Index firstColS = zz>f ? (zz-1) : zz;
G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1));
m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint());
m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint());
m_T.coeffRef(zz+1,zz+1) = Scalar(0.0);
// update Q
if (m_computeQZ)
m_Q.applyOnTheRight(zz,zz+1,G);
// kill S(zz+1, zz-1)
if (zz>f)
{
G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1));
m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G);
m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G);
m_S.coeffRef(zz+1,zz-1) = Scalar(0.0);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(zz,zz-1,G.adjoint());
}
}
// finally kill S(l,l-1)
G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1));
m_S.applyOnTheRight(l,l-1,G);
m_T.applyOnTheRight(l,l-1,G);
m_S.coeffRef(l,l-1)=Scalar(0.0);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(l,l-1,G.adjoint());
}
/** \internal QR-like iterative step for block f..l */
template<typename MatrixType>
inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter)
{
using std::abs;
const Index dim = m_S.cols();
// x, y, z
Scalar x, y, z;
if (iter==10)
{
// Wilkinson ad hoc shift
const Scalar
a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1),
a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1),
b12=m_T.coeff(f+0,f+1),
b11i=Scalar(1.0)/m_T.coeff(f+0,f+0),
b22i=Scalar(1.0)/m_T.coeff(f+1,f+1),
a87=m_S.coeff(l-1,l-2),
a98=m_S.coeff(l-0,l-1),
b77i=Scalar(1.0)/m_T.coeff(l-2,l-2),
b88i=Scalar(1.0)/m_T.coeff(l-1,l-1);
Scalar ss = abs(a87*b77i) + abs(a98*b88i),
lpl = Scalar(1.5)*ss,
ll = ss*ss;
x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i
- a11*a21*b12*b11i*b11i*b22i;
y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i
- a21*a21*b12*b11i*b11i*b22i;
z = a21*a32*b11i*b22i;
}
else if (iter==16)
{
// another exceptional shift
x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) /
(m_T.coeff(l-1,l-1)*m_T.coeff(l,l));
y = m_S.coeff(f+1,f)/m_T.coeff(f,f);
z = 0;
}
else if (iter>23 && !(iter%8))
{
// extremely exceptional shift
x = internal::random<Scalar>(-1.0,1.0);
y = internal::random<Scalar>(-1.0,1.0);
z = internal::random<Scalar>(-1.0,1.0);
}
else
{
// Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1
// where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where
// U and V are 2x2 bottom right sub matrices of A and B. Thus:
// = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1)
// = AB^-1AB^-1 + det(M) - tr(M)(AB^-1)
// Since we are only interested in having x, y, z with a correct ratio, we have:
const Scalar
a11 = m_S.coeff(f,f), a12 = m_S.coeff(f,f+1),
a21 = m_S.coeff(f+1,f), a22 = m_S.coeff(f+1,f+1),
a32 = m_S.coeff(f+2,f+1),
a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l),
a98 = m_S.coeff(l,l-1), a99 = m_S.coeff(l,l),
b11 = m_T.coeff(f,f), b12 = m_T.coeff(f,f+1),
b22 = m_T.coeff(f+1,f+1),
b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l),
b99 = m_T.coeff(l,l);
x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21)
+ a12/b22 - (a11/b11)*(b12/b22);
y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99);
z = a32/b22;
}
JRs G;
for (Index k=f; k<=l-2; k++)
{
// variables for Householder reflections
Vector2s essential2;
Scalar tau, beta;
Vector3s hr(x,y,z);
// Q_k to annihilate S(k+1,k-1) and S(k+2,k-1)
hr.makeHouseholderInPlace(tau, beta);
essential2 = hr.template bottomRows<2>();
Index fc=(std::max)(k-1,Index(0)); // first col to update
m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
if (m_computeQZ)
m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data());
if (k>f)
m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0);
// Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k)
hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1);
hr.makeHouseholderInPlace(tau, beta);
essential2 = hr.template bottomRows<2>();
{
Index lr = (std::min)(k+4,dim); // last row to update
Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr);
// S
tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2;
tmp += m_S.col(k+2).head(lr);
m_S.col(k+2).head(lr) -= tau*tmp;
m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
// T
tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2;
tmp += m_T.col(k+2).head(lr);
m_T.col(k+2).head(lr) -= tau*tmp;
m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
}
if (m_computeQZ)
{
// Z
Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim);
tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k));
tmp += m_Z.row(k+2);
m_Z.row(k+2) -= tau*tmp;
m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp);
}
m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0);
// Z_{k2} to annihilate T(k+1,k)
G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k));
m_S.applyOnTheRight(k+1,k,G);
m_T.applyOnTheRight(k+1,k,G);
// update Z
if (m_computeQZ)
m_Z.applyOnTheLeft(k+1,k,G.adjoint());
m_T.coeffRef(k+1,k) = Scalar(0.0);
// update x,y,z
x = m_S.coeff(k+1,k);
y = m_S.coeff(k+2,k);
if (k < l-2)
z = m_S.coeff(k+3,k);
} // loop over k
// Q_{n-1} to annihilate y = S(l,l-2)
G.makeGivens(x,y);
m_S.applyOnTheLeft(l-1,l,G.adjoint());
m_T.applyOnTheLeft(l-1,l,G.adjoint());
if (m_computeQZ)
m_Q.applyOnTheRight(l-1,l,G);
m_S.coeffRef(l,l-2) = Scalar(0.0);
// Z_{n-1} to annihilate T(l,l-1)
G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1));
m_S.applyOnTheRight(l,l-1,G);
m_T.applyOnTheRight(l,l-1,G);
if (m_computeQZ)
m_Z.applyOnTheLeft(l,l-1,G.adjoint());
m_T.coeffRef(l,l-1) = Scalar(0.0);
}
template<typename MatrixType>
RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ)
{
const Index dim = A_in.cols();
eigen_assert (A_in.rows()==dim && A_in.cols()==dim
&& B_in.rows()==dim && B_in.cols()==dim
&& "Need square matrices of the same dimension");
m_isInitialized = true;
m_computeQZ = computeQZ;
m_S = A_in; m_T = B_in;
m_workspace.resize(dim*2);
m_global_iter = 0;
// entrance point: hessenberg triangular decomposition
hessenbergTriangular();
// compute L1 vector norms of T, S into m_normOfS, m_normOfT
computeNorms();
Index l = dim-1,
f,
local_iter = 0;
while (l>0 && local_iter<m_maxIters)
{
f = findSmallSubdiagEntry(l);
// now rows and columns f..l (including) decouple from the rest of the problem
if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0);
if (f == l) // One root found
{
l--;
local_iter = 0;
}
else if (f == l-1) // Two roots found
{
splitOffTwoRows(f);
l -= 2;
local_iter = 0;
}
else // No convergence yet
{
// if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations
Index z = findSmallDiagEntry(f,l);
if (z>=f)
{
// zero found
pushDownZero(z,f,l);
}
else
{
// We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg
// and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to
// apply a QR-like iteration to rows and columns f..l.
step(f,l, local_iter);
local_iter++;
m_global_iter++;
}
}
}
// check if we converged before reaching iterations limit
m_info = (local_iter<m_maxIters) ? Success : NoConvergence;
// For each non triangular 2x2 diagonal block of S,
// reduce the respective 2x2 diagonal block of T to positive diagonal form using 2x2 SVD.
// This step is not mandatory for QZ, but it does help further extraction of eigenvalues/eigenvectors,
// and is in par with Lapack/Matlab QZ.
if(m_info==Success)
{
for(Index i=0; i<dim-1; ++i)
{
if(m_S.coeff(i+1, i) != Scalar(0))
{
JacobiRotation<Scalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_T, i, i+1, &j_left, &j_right);
// Apply resulting Jacobi rotations
m_S.applyOnTheLeft(i,i+1,j_left);
m_S.applyOnTheRight(i,i+1,j_right);
m_T.applyOnTheLeft(i,i+1,j_left);
m_T.applyOnTheRight(i,i+1,j_right);
m_T(i+1,i) = m_T(i,i+1) = Scalar(0);
if(m_computeQZ) {
m_Q.applyOnTheRight(i,i+1,j_left.transpose());
m_Z.applyOnTheLeft(i,i+1,j_right.transpose());
}
i++;
}
}
}
return *this;
} // end compute
} // end namespace Eigen
#endif //EIGEN_REAL_QZ
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/Eigenvalues/ComplexSchur.h
|
.h
| 17,273
| 463
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Claire Maurice
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPLEX_SCHUR_H
#define EIGEN_COMPLEX_SCHUR_H
#include "./HessenbergDecomposition.h"
namespace Eigen {
namespace internal {
template<typename MatrixType, bool IsComplex> struct complex_schur_reduce_to_hessenberg;
}
/** \eigenvalues_module \ingroup Eigenvalues_Module
*
*
* \class ComplexSchur
*
* \brief Performs a complex Schur decomposition of a real or complex square matrix
*
* \tparam _MatrixType the type of the matrix of which we are
* computing the Schur decomposition; this is expected to be an
* instantiation of the Matrix class template.
*
* Given a real or complex square matrix A, this class computes the
* Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary
* complex matrix, and T is a complex upper triangular matrix. The
* diagonal of the matrix T corresponds to the eigenvalues of the
* matrix A.
*
* Call the function compute() to compute the Schur decomposition of
* a given matrix. Alternatively, you can use the
* ComplexSchur(const MatrixType&, bool) constructor which computes
* the Schur decomposition at construction time. Once the
* decomposition is computed, you can use the matrixU() and matrixT()
* functions to retrieve the matrices U and V in the decomposition.
*
* \note This code is inspired from Jampack
*
* \sa class RealSchur, class EigenSolver, class ComplexEigenSolver
*/
template<typename _MatrixType> class ComplexSchur
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
/** \brief Complex scalar type for \p _MatrixType.
*
* This is \c std::complex<Scalar> if #Scalar is real (e.g.,
* \c float or \c double) and just \c Scalar if #Scalar is
* complex.
*/
typedef std::complex<RealScalar> ComplexScalar;
/** \brief Type for the matrices in the Schur decomposition.
*
* This is a square matrix with entries of type #ComplexScalar.
* The size is the same as the size of \p _MatrixType.
*/
typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrixType;
/** \brief Default constructor.
*
* \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed.
*
* The default constructor is useful in cases in which the user
* intends to perform decompositions via compute(). The \p size
* parameter is only used as a hint. It is not an error to give a
* wrong \p size, but it may impair performance.
*
* \sa compute() for an example.
*/
explicit ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
: m_matT(size,size),
m_matU(size,size),
m_hess(size),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{}
/** \brief Constructor; computes Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
*
* This constructor calls compute() to compute the Schur decomposition.
*
* \sa matrixT() and matrixU() for examples.
*/
template<typename InputType>
explicit ComplexSchur(const EigenBase<InputType>& matrix, bool computeU = true)
: m_matT(matrix.rows(),matrix.cols()),
m_matU(matrix.rows(),matrix.cols()),
m_hess(matrix.rows()),
m_isInitialized(false),
m_matUisUptodate(false),
m_maxIters(-1)
{
compute(matrix.derived(), computeU);
}
/** \brief Returns the unitary matrix in the Schur decomposition.
*
* \returns A const reference to the matrix U.
*
* It is assumed that either the constructor
* ComplexSchur(const MatrixType& matrix, bool computeU) or the
* member function compute(const MatrixType& matrix, bool computeU)
* has been called before to compute the Schur decomposition of a
* matrix, and that \p computeU was set to true (the default
* value).
*
* Example: \include ComplexSchur_matrixU.cpp
* Output: \verbinclude ComplexSchur_matrixU.out
*/
const ComplexMatrixType& matrixU() const
{
eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition.");
return m_matU;
}
/** \brief Returns the triangular matrix in the Schur decomposition.
*
* \returns A const reference to the matrix T.
*
* It is assumed that either the constructor
* ComplexSchur(const MatrixType& matrix, bool computeU) or the
* member function compute(const MatrixType& matrix, bool computeU)
* has been called before to compute the Schur decomposition of a
* matrix.
*
* Note that this function returns a plain square matrix. If you want to reference
* only the upper triangular part, use:
* \code schur.matrixT().triangularView<Upper>() \endcode
*
* Example: \include ComplexSchur_matrixT.cpp
* Output: \verbinclude ComplexSchur_matrixT.out
*/
const ComplexMatrixType& matrixT() const
{
eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
return m_matT;
}
/** \brief Computes Schur decomposition of given matrix.
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
* \returns Reference to \c *this
*
* The Schur decomposition is computed by first reducing the
* matrix to Hessenberg form using the class
* HessenbergDecomposition. The Hessenberg matrix is then reduced
* to triangular form by performing QR iterations with a single
* shift. The cost of computing the Schur decomposition depends
* on the number of iterations; as a rough guide, it may be taken
* on the number of iterations; as a rough guide, it may be taken
* to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops
* if \a computeU is false.
*
* Example: \include ComplexSchur_compute.cpp
* Output: \verbinclude ComplexSchur_compute.out
*
* \sa compute(const MatrixType&, bool, Index)
*/
template<typename InputType>
ComplexSchur& compute(const EigenBase<InputType>& matrix, bool computeU = true);
/** \brief Compute Schur decomposition from a given Hessenberg matrix
* \param[in] matrixH Matrix in Hessenberg form H
* \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
* \param computeU Computes the matriX U of the Schur vectors
* \return Reference to \c *this
*
* This routine assumes that the matrix is already reduced in Hessenberg form matrixH
* using either the class HessenbergDecomposition or another mean.
* It computes the upper quasi-triangular matrix T of the Schur decomposition of H
* When computeU is true, this routine computes the matrix U such that
* A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
*
* NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
* is not available, the user should give an identity matrix (Q.setIdentity())
*
* \sa compute(const MatrixType&, bool)
*/
template<typename HessMatrixType, typename OrthMatrixType>
ComplexSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU=true);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
return m_info;
}
/** \brief Sets the maximum number of iterations allowed.
*
* If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
* of the matrix.
*/
ComplexSchur& setMaxIterations(Index maxIters)
{
m_maxIters = maxIters;
return *this;
}
/** \brief Returns the maximum number of iterations. */
Index getMaxIterations()
{
return m_maxIters;
}
/** \brief Maximum number of iterations per row.
*
* If not otherwise specified, the maximum number of iterations is this number times the size of the
* matrix. It is currently set to 30.
*/
static const int m_maxIterationsPerRow = 30;
protected:
ComplexMatrixType m_matT, m_matU;
HessenbergDecomposition<MatrixType> m_hess;
ComputationInfo m_info;
bool m_isInitialized;
bool m_matUisUptodate;
Index m_maxIters;
private:
bool subdiagonalEntryIsNeglegible(Index i);
ComplexScalar computeShift(Index iu, Index iter);
void reduceToTriangularForm(bool computeU);
friend struct internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
};
/** If m_matT(i+1,i) is neglegible in floating point arithmetic
* compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
* return true, else return false. */
template<typename MatrixType>
inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
{
RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1));
RealScalar sd = numext::norm1(m_matT.coeff(i+1,i));
if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
{
m_matT.coeffRef(i+1,i) = ComplexScalar(0);
return true;
}
return false;
}
/** Compute the shift in the current QR iteration. */
template<typename MatrixType>
typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
{
using std::abs;
if (iter == 10 || iter == 20)
{
// exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2)));
}
// compute the shift as one of the eigenvalues of t, the 2x2
// diagonal block on the bottom of the active submatrix
Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1);
RealScalar normt = t.cwiseAbs().sum();
t /= normt; // the normalization by sf is to avoid under/overflow
ComplexScalar b = t.coeff(0,1) * t.coeff(1,0);
ComplexScalar c = t.coeff(0,0) - t.coeff(1,1);
ComplexScalar disc = sqrt(c*c + RealScalar(4)*b);
ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b;
ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
ComplexScalar eival1 = (trace + disc) / RealScalar(2);
ComplexScalar eival2 = (trace - disc) / RealScalar(2);
RealScalar eival1_norm = numext::norm1(eival1);
RealScalar eival2_norm = numext::norm1(eival2);
// A division by zero can only occur if eival1==eival2==0.
// In this case, det==0, and all we have to do is checking that eival2_norm!=0
if(eival1_norm > eival2_norm)
eival2 = det / eival1;
else if(eival2_norm!=RealScalar(0))
eival1 = det / eival2;
// choose the eigenvalue closest to the bottom entry of the diagonal
if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1)))
return normt * eival1;
else
return normt * eival2;
}
template<typename MatrixType>
template<typename InputType>
ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeU)
{
m_matUisUptodate = false;
eigen_assert(matrix.cols() == matrix.rows());
if(matrix.cols() == 1)
{
m_matT = matrix.derived().template cast<ComplexScalar>();
if(computeU) m_matU = ComplexMatrixType::Identity(1,1);
m_info = Success;
m_isInitialized = true;
m_matUisUptodate = computeU;
return *this;
}
internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix.derived(), computeU);
computeFromHessenberg(m_matT, m_matU, computeU);
return *this;
}
template<typename MatrixType>
template<typename HessMatrixType, typename OrthMatrixType>
ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)
{
m_matT = matrixH;
if(computeU)
m_matU = matrixQ;
reduceToTriangularForm(computeU);
return *this;
}
namespace internal {
/* Reduce given matrix to Hessenberg form */
template<typename MatrixType, bool IsComplex>
struct complex_schur_reduce_to_hessenberg
{
// this is the implementation for the case IsComplex = true
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
{
_this.m_hess.compute(matrix);
_this.m_matT = _this.m_hess.matrixH();
if(computeU) _this.m_matU = _this.m_hess.matrixQ();
}
};
template<typename MatrixType>
struct complex_schur_reduce_to_hessenberg<MatrixType, false>
{
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
{
typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
// Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
_this.m_hess.compute(matrix);
_this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();
if(computeU)
{
// This may cause an allocation which seems to be avoidable
MatrixType Q = _this.m_hess.matrixQ();
_this.m_matU = Q.template cast<ComplexScalar>();
}
}
};
} // end namespace internal
// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.
template<typename MatrixType>
void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
{
Index maxIters = m_maxIters;
if (maxIters == -1)
maxIters = m_maxIterationsPerRow * m_matT.rows();
// The matrix m_matT is divided in three parts.
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active submatrix).
// Rows iu+1,...,end are already brought in triangular form.
Index iu = m_matT.cols() - 1;
Index il;
Index iter = 0; // number of iterations we are working on the (iu,iu) element
Index totalIter = 0; // number of iterations for whole matrix
while(true)
{
// find iu, the bottom row of the active submatrix
while(iu > 0)
{
if(!subdiagonalEntryIsNeglegible(iu-1)) break;
iter = 0;
--iu;
}
// if iu is zero then we are done; the whole matrix is triangularized
if(iu==0) break;
// if we spent too many iterations, we give up
iter++;
totalIter++;
if(totalIter > maxIters) break;
// find il, the top row of the active submatrix
il = iu-1;
while(il > 0 && !subdiagonalEntryIsNeglegible(il-1))
{
--il;
}
/* perform the QR step using Givens rotations. The first rotation
creates a bulge; the (il+2,il) element becomes nonzero. This
bulge is chased down to the bottom of the active submatrix. */
ComplexScalar shift = computeShift(iu, iter);
JacobiRotation<ComplexScalar> rot;
rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
for(Index i=il+1 ; i<iu ; i++)
{
rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
}
}
if(totalIter <= maxIters)
m_info = Success;
else
m_info = NoConvergence;
m_isInitialized = true;
m_matUisUptodate = computeU;
}
} // end namespace Eigen
#endif // EIGEN_COMPLEX_SCHUR_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/PaStiXSupport/PaStiXSupport.h
|
.h
| 22,248
| 679
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PASTIXSUPPORT_H
#define EIGEN_PASTIXSUPPORT_H
namespace Eigen {
#if defined(DCOMPLEX)
#define PASTIX_COMPLEX COMPLEX
#define PASTIX_DCOMPLEX DCOMPLEX
#else
#define PASTIX_COMPLEX std::complex<float>
#define PASTIX_DCOMPLEX std::complex<double>
#endif
/** \ingroup PaStiXSupport_Module
* \brief Interface to the PaStix solver
*
* This class is used to solve the linear systems A.X = B via the PaStix library.
* The matrix can be either real or complex, symmetric or not.
*
* \sa TutorialSparseDirectSolvers
*/
template<typename _MatrixType, bool IsStrSym = false> class PastixLU;
template<typename _MatrixType, int Options> class PastixLLT;
template<typename _MatrixType, int Options> class PastixLDLT;
namespace internal
{
template<class Pastix> struct pastix_traits;
template<typename _MatrixType>
struct pastix_traits< PastixLU<_MatrixType> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
struct pastix_traits< PastixLLT<_MatrixType,Options> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
template<typename _MatrixType, int Options>
struct pastix_traits< PastixLDLT<_MatrixType,Options> >
{
typedef _MatrixType MatrixType;
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
typedef typename _MatrixType::StorageIndex StorageIndex;
};
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
}
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm);
}
inline void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
{
if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
if (nbrhs == 0) {x = NULL; nbrhs=1;}
z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_DCOMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_DCOMPLEX*>(x), nbrhs, iparm, dparm);
}
// Convert the matrix to Fortran-style Numbering
template <typename MatrixType>
void c_to_fortran_numbering (MatrixType& mat)
{
if ( !(mat.outerIndexPtr()[0]) )
{
int i;
for(i = 0; i <= mat.rows(); ++i)
++mat.outerIndexPtr()[i];
for(i = 0; i < mat.nonZeros(); ++i)
++mat.innerIndexPtr()[i];
}
}
// Convert to C-style Numbering
template <typename MatrixType>
void fortran_to_c_numbering (MatrixType& mat)
{
// Check the Numbering
if ( mat.outerIndexPtr()[0] == 1 )
{ // Convert to C-style numbering
int i;
for(i = 0; i <= mat.rows(); ++i)
--mat.outerIndexPtr()[i];
for(i = 0; i < mat.nonZeros(); ++i)
--mat.innerIndexPtr()[i];
}
}
}
// This is the base class to interface with PaStiX functions.
// Users should not used this class directly.
template <class Derived>
class PastixBase : public SparseSolverBase<Derived>
{
protected:
typedef SparseSolverBase<Derived> Base;
using Base::derived;
using Base::m_isInitialized;
public:
using Base::_solve_impl;
typedef typename internal::pastix_traits<Derived>::MatrixType _MatrixType;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef SparseMatrix<Scalar, ColMajor> ColSpMatrix;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_pastixdata(0), m_size(0)
{
init();
}
~PastixBase()
{
clean();
}
template<typename Rhs,typename Dest>
bool _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;
/** Returns a reference to the integer vector IPARM of PaStiX parameters
* to modify the default parameters.
* The statistics related to the different phases of factorization and solve are saved here as well
* \sa analyzePattern() factorize()
*/
Array<StorageIndex,IPARM_SIZE,1>& iparm()
{
return m_iparm;
}
/** Return a reference to a particular index parameter of the IPARM vector
* \sa iparm()
*/
int& iparm(int idxparam)
{
return m_iparm(idxparam);
}
/** Returns a reference to the double vector DPARM of PaStiX parameters
* The statistics related to the different phases of factorization and solve are saved here as well
* \sa analyzePattern() factorize()
*/
Array<double,DPARM_SIZE,1>& dparm()
{
return m_dparm;
}
/** Return a reference to a particular index parameter of the DPARM vector
* \sa dparm()
*/
double& dparm(int idxparam)
{
return m_dparm(idxparam);
}
inline Index cols() const { return m_size; }
inline Index rows() const { return m_size; }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the PaStiX reports a problem
* \c InvalidInput if the input matrix is invalid
*
* \sa iparm()
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
protected:
// Initialize the Pastix data structure, check the matrix
void init();
// Compute the ordering and the symbolic factorization
void analyzePattern(ColSpMatrix& mat);
// Compute the numerical factorization
void factorize(ColSpMatrix& mat);
// Free all the data allocated by Pastix
void clean()
{
eigen_assert(m_initisOk && "The Pastix structure should be allocated first");
m_iparm(IPARM_START_TASK) = API_TASK_CLEAN;
m_iparm(IPARM_END_TASK) = API_TASK_CLEAN;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
}
void compute(ColSpMatrix& mat);
int m_initisOk;
int m_analysisIsOk;
int m_factorizationIsOk;
mutable ComputationInfo m_info;
mutable pastix_data_t *m_pastixdata; // Data structure for pastix
mutable int m_comm; // The MPI communicator identifier
mutable Array<int,IPARM_SIZE,1> m_iparm; // integer vector for the input parameters
mutable Array<double,DPARM_SIZE,1> m_dparm; // Scalar vector for the input parameters
mutable Matrix<StorageIndex,Dynamic,1> m_perm; // Permutation vector
mutable Matrix<StorageIndex,Dynamic,1> m_invp; // Inverse permutation vector
mutable int m_size; // Size of the matrix
};
/** Initialize the PaStiX data structure.
*A first call to this function fills iparm and dparm with the default PaStiX parameters
* \sa iparm() dparm()
*/
template <class Derived>
void PastixBase<Derived>::init()
{
m_size = 0;
m_iparm.setZero(IPARM_SIZE);
m_dparm.setZero(DPARM_SIZE);
m_iparm(IPARM_MODIFY_PARAMETER) = API_NO;
pastix(&m_pastixdata, MPI_COMM_WORLD,
0, 0, 0, 0,
0, 0, 0, 1, m_iparm.data(), m_dparm.data());
m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO;
m_iparm[IPARM_VERBOSE] = API_VERBOSE_NOT;
m_iparm[IPARM_ORDERING] = API_ORDER_SCOTCH;
m_iparm[IPARM_INCOMPLETE] = API_NO;
m_iparm[IPARM_OOC_LIMIT] = 2000;
m_iparm[IPARM_RHS_MAKING] = API_RHS_B;
m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
m_iparm(IPARM_START_TASK) = API_TASK_INIT;
m_iparm(IPARM_END_TASK) = API_TASK_INIT;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
0, 0, 0, 0, m_iparm.data(), m_dparm.data());
// Check the returned error
if(m_iparm(IPARM_ERROR_NUMBER)) {
m_info = InvalidInput;
m_initisOk = false;
}
else {
m_info = Success;
m_initisOk = true;
}
}
template <class Derived>
void PastixBase<Derived>::compute(ColSpMatrix& mat)
{
eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared");
analyzePattern(mat);
factorize(mat);
m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
}
template <class Derived>
void PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)
{
eigen_assert(m_initisOk && "The initialization of PaSTiX failed");
// clean previous calls
if(m_size>0)
clean();
m_size = internal::convert_index<int>(mat.rows());
m_perm.resize(m_size);
m_invp.resize(m_size);
m_iparm(IPARM_START_TASK) = API_TASK_ORDERING;
m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
// Check the returned error
if(m_iparm(IPARM_ERROR_NUMBER))
{
m_info = NumericalIssue;
m_analysisIsOk = false;
}
else
{
m_info = Success;
m_analysisIsOk = true;
}
}
template <class Derived>
void PastixBase<Derived>::factorize(ColSpMatrix& mat)
{
// if(&m_cpyMat != &mat) m_cpyMat = mat;
eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
m_size = internal::convert_index<int>(mat.rows());
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
// Check the returned error
if(m_iparm(IPARM_ERROR_NUMBER))
{
m_info = NumericalIssue;
m_factorizationIsOk = false;
m_isInitialized = false;
}
else
{
m_info = Success;
m_factorizationIsOk = true;
m_isInitialized = true;
}
}
/* Solve the system */
template<typename Base>
template<typename Rhs,typename Dest>
bool PastixBase<Base>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const
{
eigen_assert(m_isInitialized && "The matrix should be factorized first");
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
int rhs = 1;
x = b; /* on return, x is overwritten by the computed solution */
for (int i = 0; i < b.cols(); i++){
m_iparm[IPARM_START_TASK] = API_TASK_SOLVE;
m_iparm[IPARM_END_TASK] = API_TASK_REFINE;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index<int>(x.rows()), 0, 0, 0,
m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
}
// Check the returned error
m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue;
return m_iparm(IPARM_ERROR_NUMBER)==0;
}
/** \ingroup PaStiXSupport_Module
* \class PastixLU
* \brief Sparse direct LU solver based on PaStiX library
*
* This class is used to solve the linear systems A.X = B with a supernodal LU
* factorization in the PaStiX library. The matrix A should be squared and nonsingular
* PaStiX requires that the matrix A has a symmetric structural pattern.
* This interface can symmetrize the input matrix otherwise.
* The vectors or matrices X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false
* NOTE : Note that if the analysis and factorization phase are called separately,
* the input matrix will be symmetrized at each call, hence it is advised to
* symmetrize the matrix in a end-user program and set \p IsStrSym to true
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SparseLU
*
*/
template<typename _MatrixType, bool IsStrSym>
class PastixLU : public PastixBase< PastixLU<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
typedef PastixBase<PastixLU<MatrixType> > Base;
typedef typename Base::ColSpMatrix ColSpMatrix;
typedef typename MatrixType::StorageIndex StorageIndex;
public:
PastixLU() : Base()
{
init();
}
explicit PastixLU(const MatrixType& matrix):Base()
{
init();
compute(matrix);
}
/** Compute the LU supernodal factorization of \p matrix.
* iparm and dparm can be used to tune the PaStiX parameters.
* see the PaStiX user's manual
* \sa analyzePattern() factorize()
*/
void compute (const MatrixType& matrix)
{
m_structureIsUptodate = false;
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::compute(temp);
}
/** Compute the LU symbolic factorization of \p matrix using its sparsity pattern.
* Several ordering methods can be used at this step. See the PaStiX user's manual.
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
m_structureIsUptodate = false;
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::analyzePattern(temp);
}
/** Compute the LU supernodal factorization of \p matrix
* WARNING The matrix \p matrix should have the same structural pattern
* as the same used in the analysis phase.
* \sa analyzePattern()
*/
void factorize(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::factorize(temp);
}
protected:
void init()
{
m_structureIsUptodate = false;
m_iparm(IPARM_SYM) = API_SYM_NO;
m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;
}
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
{
if(IsStrSym)
out = matrix;
else
{
if(!m_structureIsUptodate)
{
// update the transposed structure
m_transposedStructure = matrix.transpose();
// Set the elements of the matrix to zero
for (Index j=0; j<m_transposedStructure.outerSize(); ++j)
for(typename ColSpMatrix::InnerIterator it(m_transposedStructure, j); it; ++it)
it.valueRef() = 0.0;
m_structureIsUptodate = true;
}
out = m_transposedStructure + matrix;
}
internal::c_to_fortran_numbering(out);
}
using Base::m_iparm;
using Base::m_dparm;
ColSpMatrix m_transposedStructure;
bool m_structureIsUptodate;
};
/** \ingroup PaStiXSupport_Module
* \class PastixLLT
* \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
*
* This class is used to solve the linear systems A.X = B via a LL^T supernodal Cholesky factorization
* available in the PaStiX library. The matrix A should be symmetric and positive definite
* WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
* The vectors or matrices X and B can be either dense or sparse
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SimplicialLLT
*/
template<typename _MatrixType, int _UpLo>
class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
typedef PastixBase<PastixLLT<MatrixType, _UpLo> > Base;
typedef typename Base::ColSpMatrix ColSpMatrix;
public:
enum { UpLo = _UpLo };
PastixLLT() : Base()
{
init();
}
explicit PastixLLT(const MatrixType& matrix):Base()
{
init();
compute(matrix);
}
/** Compute the L factor of the LL^T supernodal factorization of \p matrix
* \sa analyzePattern() factorize()
*/
void compute (const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::compute(temp);
}
/** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::analyzePattern(temp);
}
/** Compute the LL^T supernodal numerical factorization of \p matrix
* \sa analyzePattern()
*/
void factorize(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::factorize(temp);
}
protected:
using Base::m_iparm;
void init()
{
m_iparm(IPARM_SYM) = API_SYM_YES;
m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;
}
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
{
out.resize(matrix.rows(), matrix.cols());
// Pastix supports only lower, column-major matrices
out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
internal::c_to_fortran_numbering(out);
}
};
/** \ingroup PaStiXSupport_Module
* \class PastixLDLT
* \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
*
* This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization
* available in the PaStiX library. The matrix A should be symmetric and positive definite
* WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
* The vectors or matrices X and B can be either dense or sparse
*
* \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
*
* \implsparsesolverconcept
*
* \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT
*/
template<typename _MatrixType, int _UpLo>
class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> >
{
public:
typedef _MatrixType MatrixType;
typedef PastixBase<PastixLDLT<MatrixType, _UpLo> > Base;
typedef typename Base::ColSpMatrix ColSpMatrix;
public:
enum { UpLo = _UpLo };
PastixLDLT():Base()
{
init();
}
explicit PastixLDLT(const MatrixType& matrix):Base()
{
init();
compute(matrix);
}
/** Compute the L and D factors of the LDL^T factorization of \p matrix
* \sa analyzePattern() factorize()
*/
void compute (const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::compute(temp);
}
/** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern
* The result of this operation can be used with successive matrices having the same pattern as \p matrix
* \sa factorize()
*/
void analyzePattern(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::analyzePattern(temp);
}
/** Compute the LDL^T supernodal numerical factorization of \p matrix
*
*/
void factorize(const MatrixType& matrix)
{
ColSpMatrix temp;
grabMatrix(matrix, temp);
Base::factorize(temp);
}
protected:
using Base::m_iparm;
void init()
{
m_iparm(IPARM_SYM) = API_SYM_YES;
m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;
}
void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
{
// Pastix supports only lower, column-major matrices
out.resize(matrix.rows(), matrix.cols());
out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
internal::c_to_fortran_numbering(out);
}
};
} // end namespace Eigen
#endif
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/LU/FullPivLU.h
|
.h
| 32,803
| 892
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_LU_H
#define EIGEN_LU_H
namespace Eigen {
namespace internal {
template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
: traits<_MatrixType>
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
enum { Flags = 0 };
};
} // end namespace internal
/** \ingroup LU_Module
*
* \class FullPivLU
*
* \brief LU decomposition of a matrix with complete pivoting, and related features
*
* \tparam _MatrixType the type of the matrix of which we are computing the LU decomposition
*
* This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is
* decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is
* upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU
* decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any
* zeros are at the end.
*
* This decomposition provides the generic approach to solving systems of linear equations, computing
* the rank, invertibility, inverse, kernel, and determinant.
*
* This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD
* decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix,
* working with the SVD allows to select the smallest singular values of the matrix, something that
* the LU decomposition doesn't see.
*
* The data of the LU decomposition can be directly accessed through the methods matrixLU(),
* permutationP(), permutationQ().
*
* As an exemple, here is how the original matrix can be retrieved:
* \include class_FullPivLU.cpp
* Output: \verbinclude class_FullPivLU.out
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
*/
template<typename _MatrixType> class FullPivLU
: public SolverBase<FullPivLU<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
typedef SolverBase<FullPivLU> Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivLU)
// FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef typename internal::plain_row_type<MatrixType, StorageIndex>::type IntRowVectorType;
typedef typename internal::plain_col_type<MatrixType, StorageIndex>::type IntColVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;
typedef typename MatrixType::PlainObject PlainObject;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via LU::compute(const MatrixType&).
*/
FullPivLU();
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa FullPivLU()
*/
FullPivLU(Index rows, Index cols);
/** Constructor.
*
* \param matrix the matrix of which to compute the LU decomposition.
* It is required to be nonzero.
*/
template<typename InputType>
explicit FullPivLU(const EigenBase<InputType>& matrix);
/** \brief Constructs a LU factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref.
*
* \sa FullPivLU(const EigenBase&)
*/
template<typename InputType>
explicit FullPivLU(EigenBase<InputType>& matrix);
/** Computes the LU decomposition of the given matrix.
*
* \param matrix the matrix of which to compute the LU decomposition.
* It is required to be nonzero.
*
* \returns a reference to *this
*/
template<typename InputType>
FullPivLU& compute(const EigenBase<InputType>& matrix) {
m_lu = matrix.derived();
computeInPlace();
return *this;
}
/** \returns the LU decomposition matrix: the upper-triangular part is U, the
* unit-lower-triangular part is L (at least for square matrices; in the non-square
* case, special care is needed, see the documentation of class FullPivLU).
*
* \sa matrixL(), matrixU()
*/
inline const MatrixType& matrixLU() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_lu;
}
/** \returns the number of nonzero pivots in the LU decomposition.
* Here nonzero is meant in the exact sense, not in a fuzzy sense.
* So that notion isn't really intrinsically interesting, but it is
* still useful when implementing algorithms.
*
* \sa rank()
*/
inline Index nonzeroPivots() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_nonzero_pivots;
}
/** \returns the absolute value of the biggest pivot, i.e. the biggest
* diagonal coefficient of U.
*/
RealScalar maxPivot() const { return m_maxpivot; }
/** \returns the permutation matrix P
*
* \sa permutationQ()
*/
EIGEN_DEVICE_FUNC inline const PermutationPType& permutationP() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_p;
}
/** \returns the permutation matrix Q
*
* \sa permutationP()
*/
inline const PermutationQType& permutationQ() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return m_q;
}
/** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix
* will form a basis of the kernel.
*
* \note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*
* Example: \include FullPivLU_kernel.cpp
* Output: \verbinclude FullPivLU_kernel.out
*
* \sa image()
*/
inline const internal::kernel_retval<FullPivLU> kernel() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return internal::kernel_retval<FullPivLU>(*this);
}
/** \returns the image of the matrix, also called its column-space. The columns of the returned matrix
* will form a basis of the image (column-space).
*
* \param originalMatrix the original matrix, of which *this is the LU decomposition.
* The reason why it is needed to pass it here, is that this allows
* a large optimization, as otherwise this method would need to reconstruct it
* from the LU decomposition.
*
* \note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*
* Example: \include FullPivLU_image.cpp
* Output: \verbinclude FullPivLU_image.out
*
* \sa kernel()
*/
inline const internal::image_retval<FullPivLU>
image(const MatrixType& originalMatrix) const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return internal::image_retval<FullPivLU>(*this, originalMatrix);
}
/** \return a solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
* \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
* the only requirement in order for the equation to make sense is that
* b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
*
* \returns a solution.
*
* \note_about_checking_solutions
*
* \note_about_arbitrary_choice_of_solution
* \note_about_using_kernel_to_study_multiple_solutions
*
* Example: \include FullPivLU_solve.cpp
* Output: \verbinclude FullPivLU_solve.out
*
* \sa TriangularView::solve(), kernel(), inverse()
*/
// FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<FullPivLU, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return Solve<FullPivLU, Rhs>(*this, b.derived());
}
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
*/
inline RealScalar rcond() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return internal::rcond_estimate_helper(m_l1_norm, *this);
}
/** \returns the determinant of the matrix of which
* *this is the LU decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the LU decomposition has already been computed.
*
* \note This is only for square matrices.
*
* \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
* optimized paths.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
*
* \sa MatrixBase::determinant()
*/
typename internal::traits<MatrixType>::Scalar determinant() const;
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
* who need to determine when pivots are to be considered nonzero. This is not used for the
* LU decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold(). By default, this
* uses a formula to automatically determine a reasonable threshold.
* Once you have called the present method setThreshold(const RealScalar&),
* your value is used instead.
*
* \param threshold The new value to use as the threshold.
*
* A pivot will be considered nonzero if its absolute value is strictly greater than
* \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
* where maxpivot is the biggest pivot.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
FullPivLU& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return *this;
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code lu.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
FullPivLU& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return *this;
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
: NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();
}
/** \returns the rank of the matrix of which *this is the LU decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
eigen_assert(m_isInitialized && "LU is not initialized.");
RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
Index result = 0;
for(Index i = 0; i < m_nonzero_pivots; ++i)
result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold);
return result;
}
/** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index dimensionOfKernel() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return cols() - rank();
}
/** \returns true if the matrix of which *this is the LU decomposition represents an injective
* linear map, i.e. has trivial kernel; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInjective() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return rank() == cols();
}
/** \returns true if the matrix of which *this is the LU decomposition represents a surjective
* linear map; false otherwise.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isSurjective() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return rank() == rows();
}
/** \returns true if the matrix of which *this is the LU decomposition is invertible.
*
* \note This method has to determine which pivots should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline bool isInvertible() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
return isInjective() && (m_lu.rows() == m_lu.cols());
}
/** \returns the inverse of the matrix of which *this is the LU decomposition.
*
* \note If this matrix is not invertible, the returned matrix has undefined coefficients.
* Use isInvertible() to first determine whether this matrix is invertible.
*
* \sa MatrixBase::inverse()
*/
inline const Inverse<FullPivLU> inverse() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!");
return Inverse<FullPivLU>(*this);
}
MatrixType reconstructedMatrix() const;
EIGEN_DEVICE_FUNC inline Index rows() const { return m_lu.rows(); }
EIGEN_DEVICE_FUNC inline Index cols() const { return m_lu.cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
template<bool Conjugate, typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void computeInPlace();
MatrixType m_lu;
PermutationPType m_p;
PermutationQType m_q;
IntColVectorType m_rowsTranspositions;
IntRowVectorType m_colsTranspositions;
Index m_nonzero_pivots;
RealScalar m_l1_norm;
RealScalar m_maxpivot, m_prescribedThreshold;
signed char m_det_pq;
bool m_isInitialized, m_usePrescribedThreshold;
};
template<typename MatrixType>
FullPivLU<MatrixType>::FullPivLU()
: m_isInitialized(false), m_usePrescribedThreshold(false)
{
}
template<typename MatrixType>
FullPivLU<MatrixType>::FullPivLU(Index rows, Index cols)
: m_lu(rows, cols),
m_p(rows),
m_q(cols),
m_rowsTranspositions(rows),
m_colsTranspositions(cols),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
}
template<typename MatrixType>
template<typename InputType>
FullPivLU<MatrixType>::FullPivLU(const EigenBase<InputType>& matrix)
: m_lu(matrix.rows(), matrix.cols()),
m_p(matrix.rows()),
m_q(matrix.cols()),
m_rowsTranspositions(matrix.rows()),
m_colsTranspositions(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
compute(matrix.derived());
}
template<typename MatrixType>
template<typename InputType>
FullPivLU<MatrixType>::FullPivLU(EigenBase<InputType>& matrix)
: m_lu(matrix.derived()),
m_p(matrix.rows()),
m_q(matrix.cols()),
m_rowsTranspositions(matrix.rows()),
m_colsTranspositions(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
computeInPlace();
}
template<typename MatrixType>
void FullPivLU<MatrixType>::computeInPlace()
{
check_template_parameters();
// the permutations are stored as int indices, so just to be sure:
eigen_assert(m_lu.rows()<=NumTraits<int>::highest() && m_lu.cols()<=NumTraits<int>::highest());
m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
const Index size = m_lu.diagonalSize();
const Index rows = m_lu.rows();
const Index cols = m_lu.cols();
// will store the transpositions, before we accumulate them at the end.
// can't accumulate on-the-fly because that will be done in reverse order for the rows.
m_rowsTranspositions.resize(m_lu.rows());
m_colsTranspositions.resize(m_lu.cols());
Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
for(Index k = 0; k < size; ++k)
{
// First, we need to find the pivot.
// biggest coefficient in the remaining bottom-right corner (starting at row k, col k)
Index row_of_biggest_in_corner, col_of_biggest_in_corner;
typedef internal::scalar_score_coeff_op<Scalar> Scoring;
typedef typename Scoring::result_type Score;
Score biggest_in_corner;
biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k)
.unaryExpr(Scoring())
.maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner,
col_of_biggest_in_corner += k; // need to add k to them.
if(biggest_in_corner==Score(0))
{
// before exiting, make sure to initialize the still uninitialized transpositions
// in a sane state without destroying what we already have.
m_nonzero_pivots = k;
for(Index i = k; i < size; ++i)
{
m_rowsTranspositions.coeffRef(i) = i;
m_colsTranspositions.coeffRef(i) = i;
}
break;
}
RealScalar abs_pivot = internal::abs_knowing_score<Scalar>()(m_lu(row_of_biggest_in_corner, col_of_biggest_in_corner), biggest_in_corner);
if(abs_pivot > m_maxpivot) m_maxpivot = abs_pivot;
// Now that we've found the pivot, we need to apply the row/col swaps to
// bring it to the location (k,k).
m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner;
m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner;
if(k != row_of_biggest_in_corner) {
m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
++number_of_transpositions;
}
if(k != col_of_biggest_in_corner) {
m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner));
++number_of_transpositions;
}
// Now that the pivot is at the right location, we update the remaining
// bottom-right corner by Gaussian elimination.
if(k<rows-1)
m_lu.col(k).tail(rows-k-1) /= m_lu.coeff(k,k);
if(k<size-1)
m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).tail(rows-k-1) * m_lu.row(k).tail(cols-k-1);
}
// the main loop is over, we still have to accumulate the transpositions to find the
// permutations P and Q
m_p.setIdentity(rows);
for(Index k = size-1; k >= 0; --k)
m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
m_q.setIdentity(cols);
for(Index k = 0; k < size; ++k)
m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
m_isInitialized = true;
}
template<typename MatrixType>
typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!");
return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod());
}
/** \returns the matrix represented by the decomposition,
* i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$.
* This function is provided for debug purposes. */
template<typename MatrixType>
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());
// LU
MatrixType res(m_lu.rows(),m_lu.cols());
// FIXME the .toDenseMatrix() should not be needed...
res = m_lu.leftCols(smalldim)
.template triangularView<UnitLower>().toDenseMatrix()
* m_lu.topRows(smalldim)
.template triangularView<Upper>().toDenseMatrix();
// P^{-1}(LU)
res = m_p.inverse() * res;
// (P^{-1}LU)Q^{-1}
res = res * m_q.inverse();
return res;
}
/********* Implementation of kernel() **************************************************/
namespace internal {
template<typename _MatrixType>
struct kernel_retval<FullPivLU<_MatrixType> >
: kernel_retval_base<FullPivLU<_MatrixType> >
{
EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>)
enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
MatrixType::MaxColsAtCompileTime,
MatrixType::MaxRowsAtCompileTime)
};
template<typename Dest> void evalTo(Dest& dst) const
{
using std::abs;
const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
if(dimker == 0)
{
// The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's
// avoid crashing/asserting as that depends on floating point calculations. Let's
// just return a single column vector filled with zeros.
dst.setZero();
return;
}
/* Let us use the following lemma:
*
* Lemma: If the matrix A has the LU decomposition PAQ = LU,
* then Ker A = Q(Ker U).
*
* Proof: trivial: just keep in mind that P, Q, L are invertible.
*/
/* Thus, all we need to do is to compute Ker U, and then apply Q.
*
* U is upper triangular, with eigenvalues sorted so that any zeros appear at the end.
* Thus, the diagonal of U ends with exactly
* dimKer zero's. Let us use that to construct dimKer linearly
* independent vectors in Ker U.
*/
Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
Index p = 0;
for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i;
eigen_internal_assert(p == rank());
// we construct a temporaty trapezoid matrix m, by taking the U matrix and
// permuting the rows and cols to bring the nonnegligible pivots to the top of
// the main diagonal. We need that to be able to apply our triangular solvers.
// FIXME when we get triangularView-for-rectangular-matrices, this can be simplified
Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options,
MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime>
m(dec().matrixLU().block(0, 0, rank(), cols));
for(Index i = 0; i < rank(); ++i)
{
if(i) m.row(i).head(i).setZero();
m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);
}
m.block(0, 0, rank(), rank());
m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero();
for(Index i = 0; i < rank(); ++i)
m.col(i).swap(m.col(pivots.coeff(i)));
// ok, we have our trapezoid matrix, we can apply the triangular solver.
// notice that the math behind this suggests that we should apply this to the
// negative of the RHS, but for performance we just put the negative sign elsewhere, see below.
m.topLeftCorner(rank(), rank())
.template triangularView<Upper>().solveInPlace(
m.topRightCorner(rank(), dimker)
);
// now we must undo the column permutation that we had applied!
for(Index i = rank()-1; i >= 0; --i)
m.col(i).swap(m.col(pivots.coeff(i)));
// see the negative sign in the next line, that's what we were talking about above.
for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
}
};
/***** Implementation of image() *****************************************************/
template<typename _MatrixType>
struct image_retval<FullPivLU<_MatrixType> >
: image_retval_base<FullPivLU<_MatrixType> >
{
EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>)
enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
MatrixType::MaxColsAtCompileTime,
MatrixType::MaxRowsAtCompileTime)
};
template<typename Dest> void evalTo(Dest& dst) const
{
using std::abs;
if(rank() == 0)
{
// The Image is just {0}, so it doesn't have a basis properly speaking, but let's
// avoid crashing/asserting as that depends on floating point calculations. Let's
// just return a single column vector filled with zeros.
dst.setZero();
return;
}
Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
Index p = 0;
for(Index i = 0; i < dec().nonzeroPivots(); ++i)
if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
pivots.coeffRef(p++) = i;
eigen_internal_assert(p == rank());
for(Index i = 0; i < rank(); ++i)
dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i)));
}
};
/***** Implementation of solve() *****************************************************/
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename _MatrixType>
template<typename RhsType, typename DstType>
void FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
* So we proceed as follows:
* Step 1: compute c = P * rhs.
* Step 2: replace c by the solution x to Lx = c. Exists because L is invertible.
* Step 3: replace c by the solution x to Ux = c. May or may not exist.
* Step 4: result = Q * c;
*/
const Index rows = this->rows(),
cols = this->cols(),
nonzero_pivots = this->rank();
eigen_assert(rhs.rows() == rows);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
{
dst.setZero();
return;
}
typename RhsType::PlainObject c(rhs.rows(), rhs.cols());
// Step 1
c = permutationP() * rhs;
// Step 2
m_lu.topLeftCorner(smalldim,smalldim)
.template triangularView<UnitLower>()
.solveInPlace(c.topRows(smalldim));
if(rows>cols)
c.bottomRows(rows-cols) -= m_lu.bottomRows(rows-cols) * c.topRows(cols);
// Step 3
m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.solveInPlace(c.topRows(nonzero_pivots));
// Step 4
for(Index i = 0; i < nonzero_pivots; ++i)
dst.row(permutationQ().indices().coeff(i)) = c.row(i);
for(Index i = nonzero_pivots; i < m_lu.cols(); ++i)
dst.row(permutationQ().indices().coeff(i)).setZero();
}
template<typename _MatrixType>
template<bool Conjugate, typename RhsType, typename DstType>
void FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
{
/* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1},
* and since permutations are real and unitary, we can write this
* as A^T = Q U^T L^T P,
* So we proceed as follows:
* Step 1: compute c = Q^T rhs.
* Step 2: replace c by the solution x to U^T x = c. May or may not exist.
* Step 3: replace c by the solution x to L^T x = c.
* Step 4: result = P^T c.
* If Conjugate is true, replace "^T" by "^*" above.
*/
const Index rows = this->rows(), cols = this->cols(),
nonzero_pivots = this->rank();
eigen_assert(rhs.rows() == cols);
const Index smalldim = (std::min)(rows, cols);
if(nonzero_pivots == 0)
{
dst.setZero();
return;
}
typename RhsType::PlainObject c(rhs.rows(), rhs.cols());
// Step 1
c = permutationQ().inverse() * rhs;
if (Conjugate) {
// Step 2
m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.adjoint()
.solveInPlace(c.topRows(nonzero_pivots));
// Step 3
m_lu.topLeftCorner(smalldim, smalldim)
.template triangularView<UnitLower>()
.adjoint()
.solveInPlace(c.topRows(smalldim));
} else {
// Step 2
m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.transpose()
.solveInPlace(c.topRows(nonzero_pivots));
// Step 3
m_lu.topLeftCorner(smalldim, smalldim)
.template triangularView<UnitLower>()
.transpose()
.solveInPlace(c.topRows(smalldim));
}
// Step 4
PermutationPType invp = permutationP().inverse().eval();
for(Index i = 0; i < smalldim; ++i)
dst.row(invp.indices().coeff(i)) = c.row(i);
for(Index i = smalldim; i < rows; ++i)
dst.row(invp.indices().coeff(i)).setZero();
}
#endif
namespace internal {
/***** Implementation of inverse() *****************************************************/
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivLU<MatrixType>::Scalar>, Dense2Dense>
{
typedef FullPivLU<MatrixType> LuType;
typedef Inverse<LuType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename MatrixType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
};
} // end namespace internal
/******* MatrixBase methods *****************************************************************/
/** \lu_module
*
* \return the full-pivoting LU decomposition of \c *this.
*
* \sa class FullPivLU
*/
template<typename Derived>
inline const FullPivLU<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::fullPivLu() const
{
return FullPivLU<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_LU_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/LU/PartialPivLU_LAPACKE.h
|
.h
| 3,555
| 84
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* LU decomposition with partial pivoting based on LAPACKE_?getrf function.
********************************************************************************
*/
#ifndef EIGEN_PARTIALLU_LAPACK_H
#define EIGEN_PARTIALLU_LAPACK_H
namespace Eigen {
namespace internal {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_LU_PARTPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \
template<int StorageOrder> \
struct partial_lu_impl<EIGTYPE, StorageOrder, lapack_int> \
{ \
/* \internal performs the LU decomposition in-place of the matrix represented */ \
static lapack_int blocked_lu(Index rows, Index cols, EIGTYPE* lu_data, Index luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \
{ \
EIGEN_UNUSED_VARIABLE(maxBlockSize);\
lapack_int matrix_order, first_zero_pivot; \
lapack_int m, n, lda, *ipiv, info; \
EIGTYPE* a; \
/* Set up parameters for ?getrf */ \
matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
lda = convert_index<lapack_int>(luStride); \
a = lu_data; \
ipiv = row_transpositions; \
m = convert_index<lapack_int>(rows); \
n = convert_index<lapack_int>(cols); \
nb_transpositions = 0; \
\
info = LAPACKE_##LAPACKE_PREFIX##getrf( matrix_order, m, n, (LAPACKE_TYPE*)a, lda, ipiv ); \
\
for(int i=0;i<m;i++) { ipiv[i]--; if (ipiv[i]!=i) nb_transpositions++; } \
\
eigen_assert(info >= 0); \
/* something should be done with nb_transpositions */ \
\
first_zero_pivot = info; \
return first_zero_pivot; \
} \
};
EIGEN_LAPACKE_LU_PARTPIV(double, double, d)
EIGEN_LAPACKE_LU_PARTPIV(float, float, s)
EIGEN_LAPACKE_LU_PARTPIV(dcomplex, lapack_complex_double, z)
EIGEN_LAPACKE_LU_PARTPIV(scomplex, lapack_complex_float, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARTIALLU_LAPACK_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/LU/InverseImpl.h
|
.h
| 15,064
| 416
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_INVERSE_IMPL_H
#define EIGEN_INVERSE_IMPL_H
namespace Eigen {
namespace internal {
/**********************************
*** General case implementation ***
**********************************/
template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
struct compute_inverse
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
result = matrix.partialPivLu().inverse();
}
};
template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
struct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ };
/****************************
*** Size 1 implementation ***
****************************/
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 1>
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
typedef typename MatrixType::Scalar Scalar;
internal::evaluator<MatrixType> matrixEval(matrix);
result.coeffRef(0,0) = Scalar(1) / matrixEval.coeff(0,0);
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& result,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
determinant = matrix.coeff(0,0);
invertible = abs(determinant) > absDeterminantThreshold;
if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant;
}
};
/****************************
*** Size 2 implementation ***
****************************/
template<typename MatrixType, typename ResultType>
EIGEN_DEVICE_FUNC
inline void compute_inverse_size2_helper(
const MatrixType& matrix, const typename ResultType::Scalar& invdet,
ResultType& result)
{
result.coeffRef(0,0) = matrix.coeff(1,1) * invdet;
result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
result.coeffRef(1,1) = matrix.coeff(0,0) * invdet;
}
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 2>
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
typedef typename ResultType::Scalar Scalar;
const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant();
compute_inverse_size2_helper(matrix, invdet, result);
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
typedef typename ResultType::Scalar Scalar;
determinant = matrix.determinant();
invertible = abs(determinant) > absDeterminantThreshold;
if(!invertible) return;
const Scalar invdet = Scalar(1) / determinant;
compute_inverse_size2_helper(matrix, invdet, inverse);
}
};
/****************************
*** Size 3 implementation ***
****************************/
template<typename MatrixType, int i, int j>
EIGEN_DEVICE_FUNC
inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m)
{
enum {
i1 = (i+1) % 3,
i2 = (i+2) % 3,
j1 = (j+1) % 3,
j2 = (j+2) % 3
};
return m.coeff(i1, j1) * m.coeff(i2, j2)
- m.coeff(i1, j2) * m.coeff(i2, j1);
}
template<typename MatrixType, typename ResultType>
EIGEN_DEVICE_FUNC
inline void compute_inverse_size3_helper(
const MatrixType& matrix,
const typename ResultType::Scalar& invdet,
const Matrix<typename ResultType::Scalar,3,1>& cofactors_col0,
ResultType& result)
{
result.row(0) = cofactors_col0 * invdet;
result.coeffRef(1,0) = cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
result.coeffRef(1,1) = cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
result.coeffRef(1,2) = cofactor_3x3<MatrixType,2,1>(matrix) * invdet;
result.coeffRef(2,0) = cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
result.coeffRef(2,1) = cofactor_3x3<MatrixType,1,2>(matrix) * invdet;
result.coeffRef(2,2) = cofactor_3x3<MatrixType,2,2>(matrix) * invdet;
}
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 3>
{
EIGEN_DEVICE_FUNC
static inline void run(const MatrixType& matrix, ResultType& result)
{
typedef typename ResultType::Scalar Scalar;
Matrix<typename MatrixType::Scalar,3,1> cofactors_col0;
cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix);
cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix);
const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
const Scalar invdet = Scalar(1) / det;
compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result);
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
typedef typename ResultType::Scalar Scalar;
Matrix<Scalar,3,1> cofactors_col0;
cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix);
cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix);
determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
invertible = abs(determinant) > absDeterminantThreshold;
if(!invertible) return;
const Scalar invdet = Scalar(1) / determinant;
compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse);
}
};
/****************************
*** Size 4 implementation ***
****************************/
template<typename Derived>
EIGEN_DEVICE_FUNC
inline const typename Derived::Scalar general_det3_helper
(const MatrixBase<Derived>& matrix, int i1, int i2, int i3, int j1, int j2, int j3)
{
return matrix.coeff(i1,j1)
* (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2));
}
template<typename MatrixType, int i, int j>
EIGEN_DEVICE_FUNC
inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix)
{
enum {
i1 = (i+1) % 4,
i2 = (i+2) % 4,
i3 = (i+3) % 4,
j1 = (j+1) % 4,
j2 = (j+2) % 4,
j3 = (j+3) % 4
};
return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3)
+ general_det3_helper(matrix, i2, i3, i1, j1, j2, j3)
+ general_det3_helper(matrix, i3, i1, i2, j1, j2, j3);
}
template<int Arch, typename Scalar, typename MatrixType, typename ResultType>
struct compute_inverse_size4
{
EIGEN_DEVICE_FUNC
static void run(const MatrixType& matrix, ResultType& result)
{
result.coeffRef(0,0) = cofactor_4x4<MatrixType,0,0>(matrix);
result.coeffRef(1,0) = -cofactor_4x4<MatrixType,0,1>(matrix);
result.coeffRef(2,0) = cofactor_4x4<MatrixType,0,2>(matrix);
result.coeffRef(3,0) = -cofactor_4x4<MatrixType,0,3>(matrix);
result.coeffRef(0,2) = cofactor_4x4<MatrixType,2,0>(matrix);
result.coeffRef(1,2) = -cofactor_4x4<MatrixType,2,1>(matrix);
result.coeffRef(2,2) = cofactor_4x4<MatrixType,2,2>(matrix);
result.coeffRef(3,2) = -cofactor_4x4<MatrixType,2,3>(matrix);
result.coeffRef(0,1) = -cofactor_4x4<MatrixType,1,0>(matrix);
result.coeffRef(1,1) = cofactor_4x4<MatrixType,1,1>(matrix);
result.coeffRef(2,1) = -cofactor_4x4<MatrixType,1,2>(matrix);
result.coeffRef(3,1) = cofactor_4x4<MatrixType,1,3>(matrix);
result.coeffRef(0,3) = -cofactor_4x4<MatrixType,3,0>(matrix);
result.coeffRef(1,3) = cofactor_4x4<MatrixType,3,1>(matrix);
result.coeffRef(2,3) = -cofactor_4x4<MatrixType,3,2>(matrix);
result.coeffRef(3,3) = cofactor_4x4<MatrixType,3,3>(matrix);
result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum();
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse<MatrixType, ResultType, 4>
: compute_inverse_size4<Architecture::Target, typename MatrixType::Scalar,
MatrixType, ResultType>
{
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
{
EIGEN_DEVICE_FUNC
static inline void run(
const MatrixType& matrix,
const typename MatrixType::RealScalar& absDeterminantThreshold,
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible
)
{
using std::abs;
determinant = matrix.determinant();
invertible = abs(determinant) > absDeterminantThreshold;
if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
}
};
/*************************
*** MatrixBase methods ***
*************************/
} // end namespace internal
namespace internal {
// Specialization for "dense = dense_xpr.inverse()"
template<typename DstXprType, typename XprType>
struct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>
{
typedef Inverse<XprType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);
EIGEN_ONLY_USED_FOR_DEBUG(Size);
eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))
&& "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type ActualXprType;
typedef typename internal::remove_all<ActualXprType>::type ActualXprTypeCleanded;
ActualXprType actual_xpr(src.nestedExpression());
compute_inverse<ActualXprTypeCleanded, DstXprType>::run(actual_xpr, dst);
}
};
} // end namespace internal
/** \lu_module
*
* \returns the matrix inverse of this matrix.
*
* For small fixed sizes up to 4x4, this method uses cofactors.
* In the general case, this method uses class PartialPivLU.
*
* \note This matrix must be invertible, otherwise the result is undefined. If you need an
* invertibility check, do the following:
* \li for fixed sizes up to 4x4, use computeInverseAndDetWithCheck().
* \li for the general case, use class FullPivLU.
*
* Example: \include MatrixBase_inverse.cpp
* Output: \verbinclude MatrixBase_inverse.out
*
* \sa computeInverseAndDetWithCheck()
*/
template<typename Derived>
inline const Inverse<Derived> MatrixBase<Derived>::inverse() const
{
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
eigen_assert(rows() == cols());
return Inverse<Derived>(derived());
}
/** \lu_module
*
* Computation of matrix inverse and determinant, with invertibility check.
*
* This is only for fixed-size square matrices of size up to 4x4.
*
* \param inverse Reference to the matrix in which to store the inverse.
* \param determinant Reference to the variable in which to store the determinant.
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
* \param absDeterminantThreshold Optional parameter controlling the invertibility check.
* The matrix will be declared invertible if the absolute value of its
* determinant is greater than this threshold.
*
* Example: \include MatrixBase_computeInverseAndDetWithCheck.cpp
* Output: \verbinclude MatrixBase_computeInverseAndDetWithCheck.out
*
* \sa inverse(), computeInverseWithCheck()
*/
template<typename Derived>
template<typename ResultType>
inline void MatrixBase<Derived>::computeInverseAndDetWithCheck(
ResultType& inverse,
typename ResultType::Scalar& determinant,
bool& invertible,
const RealScalar& absDeterminantThreshold
) const
{
// i'd love to put some static assertions there, but SFINAE means that they have no effect...
eigen_assert(rows() == cols());
// for 2x2, it's worth giving a chance to avoid evaluating.
// for larger sizes, evaluating has negligible cost and limits code size.
typedef typename internal::conditional<
RowsAtCompileTime == 2,
typename internal::remove_all<typename internal::nested_eval<Derived, 2>::type>::type,
PlainObject
>::type MatrixType;
internal::compute_inverse_and_det_with_check<MatrixType, ResultType>::run
(derived(), absDeterminantThreshold, inverse, determinant, invertible);
}
/** \lu_module
*
* Computation of matrix inverse, with invertibility check.
*
* This is only for fixed-size square matrices of size up to 4x4.
*
* \param inverse Reference to the matrix in which to store the inverse.
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
* \param absDeterminantThreshold Optional parameter controlling the invertibility check.
* The matrix will be declared invertible if the absolute value of its
* determinant is greater than this threshold.
*
* Example: \include MatrixBase_computeInverseWithCheck.cpp
* Output: \verbinclude MatrixBase_computeInverseWithCheck.out
*
* \sa inverse(), computeInverseAndDetWithCheck()
*/
template<typename Derived>
template<typename ResultType>
inline void MatrixBase<Derived>::computeInverseWithCheck(
ResultType& inverse,
bool& invertible,
const RealScalar& absDeterminantThreshold
) const
{
Scalar determinant;
// i'd love to put some static assertions there, but SFINAE means that they have no effect...
eigen_assert(rows() == cols());
computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold);
}
} // end namespace Eigen
#endif // EIGEN_INVERSE_IMPL_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/LU/PartialPivLU.h
|
.h
| 21,538
| 615
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARTIALLU_H
#define EIGEN_PARTIALLU_H
namespace Eigen {
namespace internal {
template<typename _MatrixType> struct traits<PartialPivLU<_MatrixType> >
: traits<_MatrixType>
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
typedef traits<_MatrixType> BaseTraits;
enum {
Flags = BaseTraits::Flags & RowMajorBit,
CoeffReadCost = Dynamic
};
};
template<typename T,typename Derived>
struct enable_if_ref;
// {
// typedef Derived type;
// };
template<typename T,typename Derived>
struct enable_if_ref<Ref<T>,Derived> {
typedef Derived type;
};
} // end namespace internal
/** \ingroup LU_Module
*
* \class PartialPivLU
*
* \brief LU decomposition of a matrix with partial pivoting, and related features
*
* \tparam _MatrixType the type of the matrix of which we are computing the LU decomposition
*
* This class represents a LU decomposition of a \b square \b invertible matrix, with partial pivoting: the matrix A
* is decomposed as A = PLU where L is unit-lower-triangular, U is upper-triangular, and P
* is a permutation matrix.
*
* Typically, partial pivoting LU decomposition is only considered numerically stable for square invertible
* matrices. Thus LAPACK's dgesv and dgesvx require the matrix to be square and invertible. The present class
* does the same. It will assert that the matrix is square, but it won't (actually it can't) check that the
* matrix is invertible: it is your task to check that you only use this decomposition on invertible matrices.
*
* The guaranteed safe alternative, working for all matrices, is the full pivoting LU decomposition, provided
* by class FullPivLU.
*
* This is \b not a rank-revealing LU decomposition. Many features are intentionally absent from this class,
* such as rank computation. If you need these features, use class FullPivLU.
*
* This LU decomposition is suitable to invert invertible matrices. It is what MatrixBase::inverse() uses
* in the general case.
* On the other hand, it is \b not suitable to determine whether a given matrix is invertible.
*
* The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().
*
* This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU
*/
template<typename _MatrixType> class PartialPivLU
: public SolverBase<PartialPivLU<_MatrixType> >
{
public:
typedef _MatrixType MatrixType;
typedef SolverBase<PartialPivLU> Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(PartialPivLU)
// FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int
enum {
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
typedef typename MatrixType::PlainObject PlainObject;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via PartialPivLU::compute(const MatrixType&).
*/
PartialPivLU();
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem \a size.
* \sa PartialPivLU()
*/
explicit PartialPivLU(Index size);
/** Constructor.
*
* \param matrix the matrix of which to compute the LU decomposition.
*
* \warning The matrix should have full rank (e.g. if it's square, it should be invertible).
* If you need to deal with non-full rank, use class FullPivLU instead.
*/
template<typename InputType>
explicit PartialPivLU(const EigenBase<InputType>& matrix);
/** Constructor for \link InplaceDecomposition inplace decomposition \endlink
*
* \param matrix the matrix of which to compute the LU decomposition.
*
* \warning The matrix should have full rank (e.g. if it's square, it should be invertible).
* If you need to deal with non-full rank, use class FullPivLU instead.
*/
template<typename InputType>
explicit PartialPivLU(EigenBase<InputType>& matrix);
template<typename InputType>
PartialPivLU& compute(const EigenBase<InputType>& matrix) {
m_lu = matrix.derived();
compute();
return *this;
}
/** \returns the LU decomposition matrix: the upper-triangular part is U, the
* unit-lower-triangular part is L (at least for square matrices; in the non-square
* case, special care is needed, see the documentation of class FullPivLU).
*
* \sa matrixL(), matrixU()
*/
inline const MatrixType& matrixLU() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return m_lu;
}
/** \returns the permutation matrix P.
*/
inline const PermutationType& permutationP() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return m_p;
}
/** This method returns the solution x to the equation Ax=b, where A is the matrix of which
* *this is the LU decomposition.
*
* \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
* the only requirement in order for the equation to make sense is that
* b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
*
* \returns the solution.
*
* Example: \include PartialPivLU_solve.cpp
* Output: \verbinclude PartialPivLU_solve.out
*
* Since this PartialPivLU class assumes anyway that the matrix A is invertible, the solution
* theoretically exists and is unique regardless of b.
*
* \sa TriangularView::solve(), inverse(), computeInverse()
*/
// FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.
template<typename Rhs>
inline const Solve<PartialPivLU, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return Solve<PartialPivLU, Rhs>(*this, b.derived());
}
/** \returns an estimate of the reciprocal condition number of the matrix of which \c *this is
the LU decomposition.
*/
inline RealScalar rcond() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return internal::rcond_estimate_helper(m_l1_norm, *this);
}
/** \returns the inverse of the matrix of which *this is the LU decomposition.
*
* \warning The matrix being decomposed here is assumed to be invertible. If you need to check for
* invertibility, use class FullPivLU instead.
*
* \sa MatrixBase::inverse(), LU::inverse()
*/
inline const Inverse<PartialPivLU> inverse() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return Inverse<PartialPivLU>(*this);
}
/** \returns the determinant of the matrix of which
* *this is the LU decomposition. It has only linear complexity
* (that is, O(n) where n is the dimension of the square matrix)
* as the LU decomposition has already been computed.
*
* \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
* optimized paths.
*
* \warning a determinant can be very big or small, so for matrices
* of large enough dimension, there is a risk of overflow/underflow.
*
* \sa MatrixBase::determinant()
*/
Scalar determinant() const;
MatrixType reconstructedMatrix() const;
inline Index rows() const { return m_lu.rows(); }
inline Index cols() const { return m_lu.cols(); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const {
/* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
* So we proceed as follows:
* Step 1: compute c = Pb.
* Step 2: replace c by the solution x to Lx = c.
* Step 3: replace c by the solution x to Ux = c.
*/
eigen_assert(rhs.rows() == m_lu.rows());
// Step 1
dst = permutationP() * rhs;
// Step 2
m_lu.template triangularView<UnitLower>().solveInPlace(dst);
// Step 3
m_lu.template triangularView<Upper>().solveInPlace(dst);
}
template<bool Conjugate, typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const {
/* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
* So we proceed as follows:
* Step 1: compute c = Pb.
* Step 2: replace c by the solution x to Lx = c.
* Step 3: replace c by the solution x to Ux = c.
*/
eigen_assert(rhs.rows() == m_lu.cols());
if (Conjugate) {
// Step 1
dst = m_lu.template triangularView<Upper>().adjoint().solve(rhs);
// Step 2
m_lu.template triangularView<UnitLower>().adjoint().solveInPlace(dst);
} else {
// Step 1
dst = m_lu.template triangularView<Upper>().transpose().solve(rhs);
// Step 2
m_lu.template triangularView<UnitLower>().transpose().solveInPlace(dst);
}
// Step 3
dst = permutationP().transpose() * dst;
}
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void compute();
MatrixType m_lu;
PermutationType m_p;
TranspositionType m_rowsTranspositions;
RealScalar m_l1_norm;
signed char m_det_p;
bool m_isInitialized;
};
template<typename MatrixType>
PartialPivLU<MatrixType>::PartialPivLU()
: m_lu(),
m_p(),
m_rowsTranspositions(),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
}
template<typename MatrixType>
PartialPivLU<MatrixType>::PartialPivLU(Index size)
: m_lu(size, size),
m_p(size),
m_rowsTranspositions(size),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
}
template<typename MatrixType>
template<typename InputType>
PartialPivLU<MatrixType>::PartialPivLU(const EigenBase<InputType>& matrix)
: m_lu(matrix.rows(),matrix.cols()),
m_p(matrix.rows()),
m_rowsTranspositions(matrix.rows()),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
compute(matrix.derived());
}
template<typename MatrixType>
template<typename InputType>
PartialPivLU<MatrixType>::PartialPivLU(EigenBase<InputType>& matrix)
: m_lu(matrix.derived()),
m_p(matrix.rows()),
m_rowsTranspositions(matrix.rows()),
m_l1_norm(0),
m_det_p(0),
m_isInitialized(false)
{
compute();
}
namespace internal {
/** \internal This is the blocked version of fullpivlu_unblocked() */
template<typename Scalar, int StorageOrder, typename PivIndex>
struct partial_lu_impl
{
// FIXME add a stride to Map, so that the following mapping becomes easier,
// another option would be to create an expression being able to automatically
// warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly
// a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix,
// and Block.
typedef Map<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > MapLU;
typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
typedef typename MatrixType::RealScalar RealScalar;
/** \internal performs the LU decomposition in-place of the matrix \a lu
* using an unblocked algorithm.
*
* In addition, this function returns the row transpositions in the
* vector \a row_transpositions which must have a size equal to the number
* of columns of the matrix \a lu, and an integer \a nb_transpositions
* which returns the actual number of transpositions.
*
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*/
static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
{
typedef scalar_score_coeff_op<Scalar> Scoring;
typedef typename Scoring::result_type Score;
const Index rows = lu.rows();
const Index cols = lu.cols();
const Index size = (std::min)(rows,cols);
nb_transpositions = 0;
Index first_zero_pivot = -1;
for(Index k = 0; k < size; ++k)
{
Index rrows = rows-k-1;
Index rcols = cols-k-1;
Index row_of_biggest_in_col;
Score biggest_in_corner
= lu.col(k).tail(rows-k).unaryExpr(Scoring()).maxCoeff(&row_of_biggest_in_col);
row_of_biggest_in_col += k;
row_transpositions[k] = PivIndex(row_of_biggest_in_col);
if(biggest_in_corner != Score(0))
{
if(k != row_of_biggest_in_col)
{
lu.row(k).swap(lu.row(row_of_biggest_in_col));
++nb_transpositions;
}
// FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k)
// overflow but not the actual quotient?
lu.col(k).tail(rrows) /= lu.coeff(k,k);
}
else if(first_zero_pivot==-1)
{
// the pivot is exactly zero, we record the index of the first pivot which is exactly 0,
// and continue the factorization such we still have A = PLU
first_zero_pivot = k;
}
if(k<rows-1)
lu.bottomRightCorner(rrows,rcols).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rcols);
}
return first_zero_pivot;
}
/** \internal performs the LU decomposition in-place of the matrix represented
* by the variables \a rows, \a cols, \a lu_data, and \a lu_stride using a
* recursive, blocked algorithm.
*
* In addition, this function returns the row transpositions in the
* vector \a row_transpositions which must have a size equal to the number
* of columns of the matrix \a lu, and an integer \a nb_transpositions
* which returns the actual number of transpositions.
*
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
*
* \note This very low level interface using pointers, etc. is to:
* 1 - reduce the number of instanciations to the strict minimum
* 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
*/
static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
{
MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
MatrixType lu(lu1,0,0,rows,cols);
const Index size = (std::min)(rows,cols);
// if the matrix is too small, no blocking:
if(size<=16)
{
return unblocked_lu(lu, row_transpositions, nb_transpositions);
}
// automatically adjust the number of subdivisions to the size
// of the matrix so that there is enough sub blocks:
Index blockSize;
{
blockSize = size/8;
blockSize = (blockSize/16)*16;
blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);
}
nb_transpositions = 0;
Index first_zero_pivot = -1;
for(Index k = 0; k < size; k+=blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
Index trows = rows - k - bs; // trailing rows
Index tsize = size - k - bs; // trailing size
// partition the matrix:
// A00 | A01 | A02
// lu = A_0 | A_1 | A_2 = A10 | A11 | A12
// A20 | A21 | A22
BlockType A_0(lu,0,0,rows,k);
BlockType A_2(lu,0,k+bs,rows,tsize);
BlockType A11(lu,k,k,bs,bs);
BlockType A12(lu,k,k+bs,bs,tsize);
BlockType A21(lu,k+bs,k,trows,bs);
BlockType A22(lu,k+bs,k+bs,trows,tsize);
PivIndex nb_transpositions_in_panel;
// recursively call the blocked LU algorithm on [A11^T A21^T]^T
// with a very small blocking size:
Index ret = blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,
row_transpositions+k, nb_transpositions_in_panel, 16);
if(ret>=0 && first_zero_pivot==-1)
first_zero_pivot = k+ret;
nb_transpositions += nb_transpositions_in_panel;
// update permutations and apply them to A_0
for(Index i=k; i<k+bs; ++i)
{
Index piv = (row_transpositions[i] += internal::convert_index<PivIndex>(k));
A_0.row(i).swap(A_0.row(piv));
}
if(trows)
{
// apply permutations to A_2
for(Index i=k;i<k+bs; ++i)
A_2.row(i).swap(A_2.row(row_transpositions[i]));
// A12 = A11^-1 A12
A11.template triangularView<UnitLower>().solveInPlace(A12);
A22.noalias() -= A21 * A12;
}
}
return first_zero_pivot;
}
};
/** \internal performs the LU decomposition with partial pivoting in-place.
*/
template<typename MatrixType, typename TranspositionType>
void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndex& nb_transpositions)
{
eigen_assert(lu.cols() == row_transpositions.size());
eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
partial_lu_impl
<typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>
::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
}
} // end namespace internal
template<typename MatrixType>
void PartialPivLU<MatrixType>::compute()
{
check_template_parameters();
// the row permutation is stored as int indices, so just to be sure:
eigen_assert(m_lu.rows()<NumTraits<int>::highest());
if(m_lu.cols()>0)
m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();
else
m_l1_norm = RealScalar(0);
eigen_assert(m_lu.rows() == m_lu.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
const Index size = m_lu.rows();
m_rowsTranspositions.resize(size);
typename TranspositionType::StorageIndex nb_transpositions;
internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
m_det_p = (nb_transpositions%2) ? -1 : 1;
m_p = m_rowsTranspositions;
m_isInitialized = true;
}
template<typename MatrixType>
typename PartialPivLU<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
{
eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
return Scalar(m_det_p) * m_lu.diagonal().prod();
}
/** \returns the matrix represented by the decomposition,
* i.e., it returns the product: P^{-1} L U.
* This function is provided for debug purpose. */
template<typename MatrixType>
MatrixType PartialPivLU<MatrixType>::reconstructedMatrix() const
{
eigen_assert(m_isInitialized && "LU is not initialized.");
// LU
MatrixType res = m_lu.template triangularView<UnitLower>().toDenseMatrix()
* m_lu.template triangularView<Upper>();
// P^{-1}(LU)
res = m_p.inverse() * res;
return res;
}
/***** Implementation details *****************************************************/
namespace internal {
/***** Implementation of inverse() *****************************************************/
template<typename DstXprType, typename MatrixType>
struct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename PartialPivLU<MatrixType>::Scalar>, Dense2Dense>
{
typedef PartialPivLU<MatrixType> LuType;
typedef Inverse<LuType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename LuType::Scalar> &)
{
dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));
}
};
} // end namespace internal
/******** MatrixBase methods *******/
/** \lu_module
*
* \return the partial-pivoting LU decomposition of \c *this.
*
* \sa class PartialPivLU
*/
template<typename Derived>
inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::partialPivLu() const
{
return PartialPivLU<PlainObject>(eval());
}
/** \lu_module
*
* Synonym of partialPivLu().
*
* \return the partial-pivoting LU decomposition of \c *this.
*
* \sa class PartialPivLU
*/
template<typename Derived>
inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::lu() const
{
return PartialPivLU<PlainObject>(eval());
}
} // end namespace Eigen
#endif // EIGEN_PARTIALLU_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/LU/Determinant.h
|
.h
| 3,057
| 102
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DETERMINANT_H
#define EIGEN_DETERMINANT_H
namespace Eigen {
namespace internal {
template<typename Derived>
inline const typename Derived::Scalar bruteforce_det3_helper
(const MatrixBase<Derived>& matrix, int a, int b, int c)
{
return matrix.coeff(0,a)
* (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
}
template<typename Derived>
const typename Derived::Scalar bruteforce_det4_helper
(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
{
return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
* (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
}
template<typename Derived,
int DeterminantType = Derived::RowsAtCompileTime
> struct determinant_impl
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0)
return typename traits<Derived>::Scalar(1);
return m.partialPivLu().determinant();
}
};
template<typename Derived> struct determinant_impl<Derived, 1>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0);
}
};
template<typename Derived> struct determinant_impl<Derived, 2>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
}
};
template<typename Derived> struct determinant_impl<Derived, 3>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return bruteforce_det3_helper(m,0,1,2)
- bruteforce_det3_helper(m,1,0,2)
+ bruteforce_det3_helper(m,2,0,1);
}
};
template<typename Derived> struct determinant_impl<Derived, 4>
{
static typename traits<Derived>::Scalar run(const Derived& m)
{
// trick by Martin Costabel to compute 4x4 det with only 30 muls
return bruteforce_det4_helper(m,0,1,2,3)
- bruteforce_det4_helper(m,0,2,1,3)
+ bruteforce_det4_helper(m,0,3,1,2)
+ bruteforce_det4_helper(m,1,2,0,3)
- bruteforce_det4_helper(m,1,3,0,2)
+ bruteforce_det4_helper(m,2,3,0,1);
}
};
} // end namespace internal
/** \lu_module
*
* \returns the determinant of this matrix
*/
template<typename Derived>
inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
eigen_assert(rows() == cols());
typedef typename internal::nested_eval<Derived,Base::RowsAtCompileTime>::type Nested;
return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
}
} // end namespace Eigen
#endif // EIGEN_DETERMINANT_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/LU/arch/Inverse_SSE.h
|
.h
| 13,662
| 339
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2001 Intel Corporation
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// The SSE code for the 4x4 float and double matrix inverse in this file
// comes from the following Intel's library:
// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
//
// Here is the respective copyright and license statement:
//
// Copyright (c) 2001 Intel Corporation.
//
// Permition is granted to use, copy, distribute and prepare derivative works
// of this library for any purpose and without fee, provided, that the above
// copyright notice and this statement appear in all copies.
// Intel makes no representations about the suitability of this software for
// any purpose, and specifically disclaims all warranties.
// See LEGAL.TXT for all the legal information.
#ifndef EIGEN_INVERSE_SSE_H
#define EIGEN_INVERSE_SSE_H
namespace Eigen {
namespace internal {
template<typename MatrixType, typename ResultType>
struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
{
enum {
MatrixAlignment = traits<MatrixType>::Alignment,
ResultAlignment = traits<ResultType>::Alignment,
StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
};
typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;
static void run(const MatrixType& mat, ResultType& result)
{
ActualMatrixType matrix(mat);
const Packet4f p4f_sign_PNNP = _mm_castsi128_ps(_mm_set_epi32(0x00000000, 0x80000000, 0x80000000, 0x00000000));
// Load the full matrix into registers
__m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
__m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
__m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
__m128 _L4 = matrix.template packet<MatrixAlignment>(12);
// The inverse is calculated using "Divide and Conquer" technique. The
// original matrix is divide into four 2x2 sub-matrices. Since each
// register holds four matrix element, the smaller matrices are
// represented as a registers. Hence we get a better locality of the
// calculations.
__m128 A, B, C, D; // the four sub-matrices
if(!StorageOrdersMatch)
{
A = _mm_unpacklo_ps(_L1, _L2);
B = _mm_unpacklo_ps(_L3, _L4);
C = _mm_unpackhi_ps(_L1, _L2);
D = _mm_unpackhi_ps(_L3, _L4);
}
else
{
A = _mm_movelh_ps(_L1, _L2);
B = _mm_movehl_ps(_L2, _L1);
C = _mm_movelh_ps(_L3, _L4);
D = _mm_movehl_ps(_L4, _L3);
}
__m128 iA, iB, iC, iD, // partial inverse of the sub-matrices
DC, AB;
__m128 dA, dB, dC, dD; // determinant of the sub-matrices
__m128 det, d, d1, d2;
__m128 rd; // reciprocal of the determinant
// AB = A# * B
AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
// DC = D# * C
DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
// dA = |A|
dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
// dB = |B|
dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
// dC = |C|
dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
// dD = |D|
dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
// d = trace(AB*DC) = trace(A#*B*D#*C)
d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
// iD = C*A#*B
iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
// iA = B*D#*C
iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
// d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
d = _mm_add_ps(d, _mm_movehl_ps(d, d));
d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
d1 = _mm_mul_ss(dA,dD);
d2 = _mm_mul_ss(dB,dC);
// iD = D*|A| - C*A#*B
iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
// iA = A*|D| - B*D#*C;
iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
// det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
rd = _mm_div_ss(_mm_set_ss(1.0f), det);
// #ifdef ZERO_SINGULAR
// rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
// #endif
// iB = D * (A#B)# = D*B#*A
iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
// iC = A * (D#C)# = A*C#*D
iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
rd = _mm_shuffle_ps(rd,rd,0);
rd = _mm_xor_ps(rd, p4f_sign_PNNP);
// iB = C*|B| - D*B#*A
iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
// iC = B*|C| - A*C#*D;
iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
// iX = iX / det
iA = _mm_mul_ps(rd,iA);
iB = _mm_mul_ps(rd,iB);
iC = _mm_mul_ps(rd,iC);
iD = _mm_mul_ps(rd,iD);
Index res_stride = result.outerStride();
float* res = result.data();
pstoret<float, Packet4f, ResultAlignment>(res+0, _mm_shuffle_ps(iA,iB,0x77));
pstoret<float, Packet4f, ResultAlignment>(res+res_stride, _mm_shuffle_ps(iA,iB,0x22));
pstoret<float, Packet4f, ResultAlignment>(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77));
pstoret<float, Packet4f, ResultAlignment>(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22));
}
};
template<typename MatrixType, typename ResultType>
struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
{
enum {
MatrixAlignment = traits<MatrixType>::Alignment,
ResultAlignment = traits<ResultType>::Alignment,
StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
};
typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;
static void run(const MatrixType& mat, ResultType& result)
{
ActualMatrixType matrix(mat);
const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
// The inverse is calculated using "Divide and Conquer" technique. The
// original matrix is divide into four 2x2 sub-matrices. Since each
// register of the matrix holds two elements, the smaller matrices are
// consisted of two registers. Hence we get a better locality of the
// calculations.
// the four sub-matrices
__m128d A1, A2, B1, B2, C1, C2, D1, D2;
if(StorageOrdersMatch)
{
A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
}
else
{
__m128d tmp;
A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
tmp = A1;
A1 = _mm_unpacklo_pd(A1,A2);
A2 = _mm_unpackhi_pd(tmp,A2);
tmp = C1;
C1 = _mm_unpacklo_pd(C1,C2);
C2 = _mm_unpackhi_pd(tmp,C2);
B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
tmp = B1;
B1 = _mm_unpacklo_pd(B1,B2);
B2 = _mm_unpackhi_pd(tmp,B2);
tmp = D1;
D1 = _mm_unpacklo_pd(D1,D2);
D2 = _mm_unpackhi_pd(tmp,D2);
}
__m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices
DC1, DC2, AB1, AB2;
__m128d dA, dB, dC, dD; // determinant of the sub-matrices
__m128d det, d1, d2, rd;
// dA = |A|
dA = _mm_shuffle_pd(A2, A2, 1);
dA = _mm_mul_pd(A1, dA);
dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
// dB = |B|
dB = _mm_shuffle_pd(B2, B2, 1);
dB = _mm_mul_pd(B1, dB);
dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
// AB = A# * B
AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
// dC = |C|
dC = _mm_shuffle_pd(C2, C2, 1);
dC = _mm_mul_pd(C1, dC);
dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
// dD = |D|
dD = _mm_shuffle_pd(D2, D2, 1);
dD = _mm_mul_pd(D1, dD);
dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
// DC = D# * C
DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
// rd = trace(AB*DC) = trace(A#*B*D#*C)
d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
rd = _mm_add_pd(d1, d2);
rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
// iD = C*A#*B
iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
// iA = B*D#*C
iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
// iD = D*|A| - C*A#*B
dA = _mm_shuffle_pd(dA,dA,0);
iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
// iA = A*|D| - B*D#*C;
dD = _mm_shuffle_pd(dD,dD,0);
iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
d1 = _mm_mul_sd(dA, dD);
d2 = _mm_mul_sd(dB, dC);
// iB = D * (A#B)# = D*B#*A
iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
// det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
det = _mm_add_sd(d1, d2);
det = _mm_sub_sd(det, rd);
// iC = A * (D#C)# = A*C#*D
iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
rd = _mm_div_sd(_mm_set_sd(1.0), det);
// #ifdef ZERO_SINGULAR
// rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
// #endif
rd = _mm_shuffle_pd(rd,rd,0);
// iB = C*|B| - D*B#*A
dB = _mm_shuffle_pd(dB,dB,0);
iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
d1 = _mm_xor_pd(rd, _Sign_PN);
d2 = _mm_xor_pd(rd, _Sign_NP);
// iC = B*|C| - A*C#*D;
dC = _mm_shuffle_pd(dC,dC,0);
iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
Index res_stride = result.outerStride();
double* res = result.data();
pstoret<double, Packet2d, ResultAlignment>(res+0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+res_stride, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+res_stride+2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));
pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_INVERSE_SSE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SVD/SVDBase.h
|
.h
| 12,752
| 316
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SVDBASE_H
#define EIGEN_SVDBASE_H
namespace Eigen {
/** \ingroup SVD_Module
*
*
* \class SVDBase
*
* \brief Base class of SVD algorithms
*
* \tparam Derived the type of the actual SVD decomposition
*
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
* and right \em singular \em vectors of \a A respectively.
*
* Singular values are always sorted in decreasing order.
*
*
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
*
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
* \sa class BDCSVD, class JacobiSVD
*/
template<typename Derived>
class SVDBase
{
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \returns the \a U matrix.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
* the U matrix is n-by-n if you asked for \link Eigen::ComputeFullU ComputeFullU \endlink, and is n-by-m if you asked for \link Eigen::ComputeThinU ComputeThinU \endlink.
*
* The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
*
* This method asserts that you asked for \a U to be computed.
*/
const MatrixUType& matrixU() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
return m_matrixU;
}
/** \returns the \a V matrix.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
* the V matrix is p-by-p if you asked for \link Eigen::ComputeFullV ComputeFullV \endlink, and is p-by-m if you asked for \link Eigen::ComputeThinV ComputeThinV \endlink.
*
* The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
*
* This method asserts that you asked for \a V to be computed.
*/
const MatrixVType& matrixV() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
return m_matrixV;
}
/** \returns the vector of singular values.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
* returned vector has size \a m. Singular values are always sorted in decreasing order.
*/
const SingularValuesType& singularValues() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
return m_singularValues;
}
/** \returns the number of singular values that are not exactly 0 */
Index nonzeroSingularValues() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
return m_nonzeroSingularValues;
}
/** \returns the rank of the matrix of which \c *this is the SVD.
*
* \note This method has to determine which singular values should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
if(m_singularValues.size()==0) return 0;
RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)());
Index i = m_nonzeroSingularValues-1;
while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
return i+1;
}
/** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
* which need to determine when singular values are to be considered nonzero.
* This is not used for the SVD decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold().
* The default is \c NumTraits<Scalar>::epsilon()
*
* \param threshold The new value to use as the threshold.
*
* A singular value will be considered nonzero if its value is strictly greater than
* \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
Derived& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return derived();
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code svd.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
Derived& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return derived();
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
// this temporary is needed to workaround a MSVC issue
Index diagSize = (std::max<Index>)(1,m_diagSize);
return m_usePrescribedThreshold ? m_prescribedThreshold
: RealScalar(diagSize)*NumTraits<Scalar>::epsilon();
}
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
inline bool computeU() const { return m_computeFullU || m_computeThinU; }
/** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
inline bool computeV() const { return m_computeFullV || m_computeThinV; }
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
*
* \param b the right-hand-side of the equation to solve.
*
* \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
*
* \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
* In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
eigen_assert(computeU() && computeV() && "SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
return Solve<Derived, Rhs>(derived(), b.derived());
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
// return true if already allocated
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
RealScalar m_prescribedThreshold;
/** \brief Default Constructor.
*
* Default constructor of SVDBase
*/
SVDBase()
: m_isInitialized(false),
m_isAllocated(false),
m_usePrescribedThreshold(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1), m_diagSize(0)
{
check_template_parameters();
}
};
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Derived>
template<typename RhsType, typename DstType>
void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
eigen_assert(rhs.rows() == rows());
// A = U S V^*
// So A^{-1} = V S^{-1} U^*
Matrix<Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
Index l_rank = rank();
tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs;
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
dst = m_matrixV.leftCols(l_rank) * tmp;
}
#endif
template<typename MatrixType>
bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols &&
computationOptions == m_computationOptions)
{
return true;
}
m_rows = rows;
m_cols = cols;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);
return false;
}
}// end namespace
#endif // EIGEN_SVDBASE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SVD/JacobiSVD_LAPACKE.h
|
.h
| 5,099
| 92
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Singular Value Decomposition - SVD.
********************************************************************************
*/
#ifndef EIGEN_JACOBISVD_LAPACKE_H
#define EIGEN_JACOBISVD_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \
template<> inline \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
{ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
/*typedef MatrixType::Scalar Scalar;*/ \
/*typedef MatrixType::RealScalar RealScalar;*/ \
allocate(matrix.rows(), matrix.cols(), computationOptions); \
\
/*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
m_nonzeroSingularValues = m_diagSize; \
\
lapack_int lda = internal::convert_index<lapack_int>(matrix.outerStride()), ldu, ldvt; \
lapack_int matrix_order = LAPACKE_COLROW; \
char jobu, jobvt; \
LAPACKE_TYPE *u, *vt, dummy; \
jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
if (computeU()) { \
ldu = internal::convert_index<lapack_int>(m_matrixU.outerStride()); \
u = (LAPACKE_TYPE*)m_matrixU.data(); \
} else { ldu=1; u=&dummy; }\
MatrixType localV; \
lapack_int vt_rows = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
if (computeV()) { \
localV.resize(vt_rows, m_cols); \
ldvt = internal::convert_index<lapack_int>(localV.outerStride()); \
vt = (LAPACKE_TYPE*)localV.data(); \
} else { ldvt=1; vt=&dummy; }\
Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
MatrixType m_temp; m_temp = matrix; \
LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::convert_index<lapack_int>(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
if (computeV()) m_matrixV = localV.adjoint(); \
/* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \
m_isInitialized = true; \
return *this; \
}
EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_JACOBISVD_LAPACKE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SVD/BDCSVD.h
|
.h
| 49,870
| 1,278
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
// research report written by Ming Gu and Stanley C.Eisenstat
// The code variable names correspond to the names they used in their
// report
//
// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
// Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
// Copyright (C) 2014-2017 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BDCSVD_H
#define EIGEN_BDCSVD_H
// #define EIGEN_BDCSVD_DEBUG_VERBOSE
// #define EIGEN_BDCSVD_SANITY_CHECKS
namespace Eigen {
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
IOFormat bdcsvdfmt(8, 0, ", ", "\n", " [", "]");
#endif
template<typename _MatrixType> class BDCSVD;
namespace internal {
template<typename _MatrixType>
struct traits<BDCSVD<_MatrixType> >
{
typedef _MatrixType MatrixType;
};
} // end namespace internal
/** \ingroup SVD_Module
*
*
* \class BDCSVD
*
* \brief class Bidiagonal Divide and Conquer SVD
*
* \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
*
* This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization,
* and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD.
* You can control the switching size with the setSwitchSize() method, default is 16.
* For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly
* recommended and can several order of magnitude faster.
*
* \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
* For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless
* you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
* significantly degrade the accuracy.
*
* \sa class JacobiSVD
*/
template<typename _MatrixType>
class BDCSVD : public SVDBase<BDCSVD<_MatrixType> >
{
typedef SVDBase<BDCSVD> Base;
public:
using Base::rows;
using Base::cols;
using Base::computeU;
using Base::computeV;
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename NumTraits<RealScalar>::Literal Literal;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef typename Base::MatrixUType MatrixUType;
typedef typename Base::MatrixVType MatrixVType;
typedef typename Base::SingularValuesType SingularValuesType;
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> MatrixX;
typedef Matrix<RealScalar, Dynamic, Dynamic, ColMajor> MatrixXr;
typedef Matrix<RealScalar, Dynamic, 1> VectorType;
typedef Array<RealScalar, Dynamic, 1> ArrayXr;
typedef Array<Index,1,Dynamic> ArrayXi;
typedef Ref<ArrayXr> ArrayRef;
typedef Ref<ArrayXi> IndicesRef;
/** \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via BDCSVD::compute(const MatrixType&).
*/
BDCSVD() : m_algoswap(16), m_numIters(0)
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem size.
* \sa BDCSVD()
*/
BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
: m_algoswap(16), m_numIters(0)
{
allocate(rows, cols, computationOptions);
}
/** \brief Constructor performing the decomposition of given matrix.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
: m_algoswap(16), m_numIters(0)
{
compute(matrix, computationOptions);
}
~BDCSVD()
{
}
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non - default) FullPivHouseholderQR preconditioner.
*/
BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
* \param matrix the matrix to decompose
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
BDCSVD& compute(const MatrixType& matrix)
{
return compute(matrix, this->m_computationOptions);
}
void setSwitchSize(int s)
{
eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 3");
m_algoswap = s;
}
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);
void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);
void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);
void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat);
void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V);
void deflation43(Index firstCol, Index shift, Index i, Index size);
void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);
void structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1);
static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);
protected:
MatrixXr m_naiveU, m_naiveV;
MatrixXr m_computed;
Index m_nRec;
ArrayXr m_workspace;
ArrayXi m_workspaceI;
int m_algoswap;
bool m_isTranspose, m_compU, m_compV;
using Base::m_singularValues;
using Base::m_diagSize;
using Base::m_computeFullU;
using Base::m_computeFullV;
using Base::m_computeThinU;
using Base::m_computeThinV;
using Base::m_matrixU;
using Base::m_matrixV;
using Base::m_isInitialized;
using Base::m_nonzeroSingularValues;
public:
int m_numIters;
}; //end class BDCSVD
// Method to allocate and initialize matrix and attributes
template<typename MatrixType>
void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
m_isTranspose = (cols > rows);
if (Base::allocate(rows, cols, computationOptions))
return;
m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );
m_compU = computeV();
m_compV = computeU();
if (m_isTranspose)
std::swap(m_compU, m_compV);
if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 );
else m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 );
if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize);
m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3);
m_workspaceI.resize(3*m_diagSize);
}// end allocate
template<typename MatrixType>
BDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "\n\n\n======================================================================================================================\n\n\n";
#endif
allocate(matrix.rows(), matrix.cols(), computationOptions);
using std::abs;
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
//**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return
if(matrix.cols() < m_algoswap)
{
// FIXME this line involves temporaries
JacobiSVD<MatrixType> jsvd(matrix,computationOptions);
if(computeU()) m_matrixU = jsvd.matrixU();
if(computeV()) m_matrixV = jsvd.matrixV();
m_singularValues = jsvd.singularValues();
m_nonzeroSingularValues = jsvd.nonzeroSingularValues();
m_isInitialized = true;
return *this;
}
//**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows
RealScalar scale = matrix.cwiseAbs().maxCoeff();
if(scale==Literal(0)) scale = Literal(1);
MatrixX copy;
if (m_isTranspose) copy = matrix.adjoint()/scale;
else copy = matrix/scale;
//**** step 1 - Bidiagonalization
// FIXME this line involves temporaries
internal::UpperBidiagonalization<MatrixX> bid(copy);
//**** step 2 - Divide & Conquer
m_naiveU.setZero();
m_naiveV.setZero();
// FIXME this line involves a temporary matrix
m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();
m_computed.template bottomRows<1>().setZero();
divide(0, m_diagSize - 1, 0, 0, 0);
//**** step 3 - Copy singular values and vectors
for (int i=0; i<m_diagSize; i++)
{
RealScalar a = abs(m_computed.coeff(i, i));
m_singularValues.coeffRef(i) = a * scale;
if (a<considerZero)
{
m_nonzeroSingularValues = i;
m_singularValues.tail(m_diagSize - i - 1).setZero();
break;
}
else if (i == m_diagSize - 1)
{
m_nonzeroSingularValues = i + 1;
break;
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
// std::cout << "m_naiveU\n" << m_naiveU << "\n\n";
// std::cout << "m_naiveV\n" << m_naiveV << "\n\n";
#endif
if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU);
else copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV);
m_isInitialized = true;
return *this;
}// end compute
template<typename MatrixType>
template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>
void BDCSVD<MatrixType>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)
{
// Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa
if (computeU())
{
Index Ucols = m_computeThinU ? m_diagSize : householderU.cols();
m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);
m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer
}
if (computeV())
{
Index Vcols = m_computeThinV ? m_diagSize : householderV.cols();
m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);
m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);
householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer
}
}
/** \internal
* Performs A = A * B exploiting the special structure of the matrix A. Splitting A as:
* A = [A1]
* [A2]
* such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros.
* We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large
* enough.
*/
template<typename MatrixType>
void BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)
{
Index n = A.rows();
if(n>100)
{
// If the matrices are large enough, let's exploit the sparse structure of A by
// splitting it in half (wrt n1), and packing the non-zero columns.
Index n2 = n - n1;
Map<MatrixXr> A1(m_workspace.data() , n1, n);
Map<MatrixXr> A2(m_workspace.data()+ n1*n, n2, n);
Map<MatrixXr> B1(m_workspace.data()+ n*n, n, n);
Map<MatrixXr> B2(m_workspace.data()+2*n*n, n, n);
Index k1=0, k2=0;
for(Index j=0; j<n; ++j)
{
if( (A.col(j).head(n1).array()!=Literal(0)).any() )
{
A1.col(k1) = A.col(j).head(n1);
B1.row(k1) = B.row(j);
++k1;
}
if( (A.col(j).tail(n2).array()!=Literal(0)).any() )
{
A2.col(k2) = A.col(j).tail(n2);
B2.row(k2) = B.row(j);
++k2;
}
}
A.topRows(n1).noalias() = A1.leftCols(k1) * B1.topRows(k1);
A.bottomRows(n2).noalias() = A2.leftCols(k2) * B2.topRows(k2);
}
else
{
Map<MatrixXr,Aligned> tmp(m_workspace.data(),n,n);
tmp.noalias() = A*B;
A = tmp;
}
}
// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
// place of the submatrix we are currently working on.
//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
// lastCol + 1 - firstCol is the size of the submatrix.
//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
//@param firstRowW : Same as firstRowW with the column.
//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
template<typename MatrixType>
void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift)
{
// requires rows = cols + 1;
using std::pow;
using std::sqrt;
using std::abs;
const Index n = lastCol - firstCol + 1;
const Index k = n/2;
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
RealScalar alphaK;
RealScalar betaK;
RealScalar r0;
RealScalar lambda, phi, c0, s0;
VectorType l, f;
// We use the other algorithm which is more efficient for small
// matrices.
if (n < m_algoswap)
{
// FIXME this line involves temporaries
JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));
if (m_compU)
m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();
else
{
m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0);
m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n);
}
if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV();
m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n);
return;
}
// We use the divide and conquer algorithm
alphaK = m_computed(firstCol + k, firstCol + k);
betaK = m_computed(firstCol + k + 1, firstCol + k);
// The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
// and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
// right submatrix before the left one.
divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
if (m_compU)
{
lambda = m_naiveU(firstCol + k, firstCol + k);
phi = m_naiveU(firstCol + k + 1, lastCol + 1);
}
else
{
lambda = m_naiveU(1, firstCol + k);
phi = m_naiveU(0, lastCol + 1);
}
r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi));
if (m_compU)
{
l = m_naiveU.row(firstCol + k).segment(firstCol, k);
f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
}
else
{
l = m_naiveU.row(1).segment(firstCol, k);
f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
}
if (m_compV) m_naiveV(firstRowW+k, firstColW) = Literal(1);
if (r0<considerZero)
{
c0 = Literal(1);
s0 = Literal(0);
}
else
{
c0 = alphaK * lambda / r0;
s0 = betaK * phi / r0;
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
if (m_compU)
{
MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
// we shiftW Q1 to the right
for (Index i = firstCol + k - 1; i >= firstCol; i--)
m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1);
// we shift q1 at the left with a factor c0
m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0);
// last column = q1 * - s0
m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0));
// first column = q2 * s0
m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0;
// q2 *= c0
m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
}
else
{
RealScalar q1 = m_naiveU(0, firstCol + k);
// we shift Q1 to the right
for (Index i = firstCol + k - 1; i >= firstCol; i--)
m_naiveU(0, i + 1) = m_naiveU(0, i);
// we shift q1 at the left with a factor c0
m_naiveU(0, firstCol) = (q1 * c0);
// last column = q1 * - s0
m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
// first column = q2 * s0
m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
// q2 *= c0
m_naiveU(1, lastCol + 1) *= c0;
m_naiveU.row(1).segment(firstCol + 1, k).setZero();
m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
m_computed(firstCol + shift, firstCol + shift) = r0;
m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real();
m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real();
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
#endif
// Second part: try to deflate singular values in combined matrix
deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();
std::cout << "\n\nj1 = " << tmp1.transpose().format(bdcsvdfmt) << "\n";
std::cout << "j2 = " << tmp2.transpose().format(bdcsvdfmt) << "\n\n";
std::cout << "err: " << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << "\n";
static int count = 0;
std::cout << "# " << ++count << "\n\n";
assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm());
// assert(count<681);
// assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all());
#endif
// Third part: compute SVD of combined matrix
MatrixXr UofSVD, VofSVD;
VectorType singVals;
computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(UofSVD.allFinite());
assert(VofSVD.allFinite());
#endif
if (m_compU)
structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2);
else
{
Map<Matrix<RealScalar,2,Dynamic>,Aligned> tmp(m_workspace.data(),2,n+1);
tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD;
m_naiveU.middleCols(firstCol, n + 1) = tmp;
}
if (m_compV) structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();
m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;
}// end divide
// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in
// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing
// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except
// that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order.
//
// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better
// handling of round-off errors, be consistent in ordering
// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)
{
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
using std::abs;
ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);
m_workspace.head(n) = m_computed.block(firstCol, firstCol, n, n).diagonal();
ArrayRef diag = m_workspace.head(n);
diag(0) = Literal(0);
// Allocate space for singular values and vectors
singVals.resize(n);
U.resize(n+1, n+1);
if (m_compV) V.resize(n, n);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if (col0.hasNaN() || diag.hasNaN())
std::cout << "\n\nHAS NAN\n\n";
#endif
// Many singular values might have been deflated, the zero ones have been moved to the end,
// but others are interleaved and we must ignore them at this stage.
// To this end, let's compute a permutation skipping them:
Index actual_n = n;
while(actual_n>1 && diag(actual_n-1)==Literal(0)) --actual_n;
Index m = 0; // size of the deflated problem
for(Index k=0;k<actual_n;++k)
if(abs(col0(k))>considerZero)
m_workspaceI(m++) = k;
Map<ArrayXi> perm(m_workspaceI.data(),m);
Map<ArrayXr> shifts(m_workspace.data()+1*n, n);
Map<ArrayXr> mus(m_workspace.data()+2*n, n);
Map<ArrayXr> zhat(m_workspace.data()+3*n, n);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "computeSVDofM using:\n";
std::cout << " z: " << col0.transpose() << "\n";
std::cout << " d: " << diag.transpose() << "\n";
#endif
// Compute singVals, shifts, and mus
computeSingVals(col0, diag, perm, singVals, shifts, mus);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << " j: " << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << "\n\n";
std::cout << " sing-val: " << singVals.transpose() << "\n";
std::cout << " mu: " << mus.transpose() << "\n";
std::cout << " shift: " << shifts.transpose() << "\n";
{
Index actual_n = n;
while(actual_n>1 && abs(col0(actual_n-1))<considerZero) --actual_n;
std::cout << "\n\n mus: " << mus.head(actual_n).transpose() << "\n\n";
std::cout << " check1 (expect0) : " << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << "\n\n";
std::cout << " check2 (>0) : " << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << "\n\n";
std::cout << " check3 (>0) : " << ((diag.segment(1,actual_n-1)-singVals.head(actual_n-1).array()) / singVals.head(actual_n-1).array()).transpose() << "\n\n\n";
std::cout << " check4 (>0) : " << ((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).transpose() << "\n\n\n";
}
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(singVals.allFinite());
assert(mus.allFinite());
assert(shifts.allFinite());
#endif
// Compute zhat
perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << " zhat: " << zhat.transpose() << "\n";
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(zhat.allFinite());
#endif
computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V);
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "U^T U: " << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << "\n";
std::cout << "V^T V: " << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << "\n";
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(U.allFinite());
assert(V.allFinite());
assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 1e-14 * n);
assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 1e-14 * n);
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
// Because of deflation, the singular values might not be completely sorted.
// Fortunately, reordering them is a O(n) problem
for(Index i=0; i<actual_n-1; ++i)
{
if(singVals(i)>singVals(i+1))
{
using std::swap;
swap(singVals(i),singVals(i+1));
U.col(i).swap(U.col(i+1));
if(m_compV) V.col(i).swap(V.col(i+1));
}
}
// Reverse order so that singular values in increased order
// Because of deflation, the zeros singular-values are already at the end
singVals.head(actual_n).reverseInPlace();
U.leftCols(actual_n).rowwise().reverseInPlace();
if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace();
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
JacobiSVD<MatrixXr> jsvd(m_computed.block(firstCol, firstCol, n, n) );
std::cout << " * j: " << jsvd.singularValues().transpose() << "\n\n";
std::cout << " * sing-val: " << singVals.transpose() << "\n";
// std::cout << " * err: " << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << "\n";
#endif
}
template <typename MatrixType>
typename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)
{
Index m = perm.size();
RealScalar res = Literal(1);
for(Index i=0; i<m; ++i)
{
Index j = perm(i);
// The following expression could be rewritten to involve only a single division,
// but this would make the expression more sensitive to overflow.
res += (col0(j) / (diagShifted(j) - mu)) * (col0(j) / (diag(j) + shift + mu));
}
return res;
}
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,
VectorType& singVals, ArrayRef shifts, ArrayRef mus)
{
using std::abs;
using std::swap;
using std::sqrt;
Index n = col0.size();
Index actual_n = n;
// Note that here actual_n is computed based on col0(i)==0 instead of diag(i)==0 as above
// because 1) we have diag(i)==0 => col0(i)==0 and 2) if col0(i)==0, then diag(i) is already a singular value.
while(actual_n>1 && col0(actual_n-1)==Literal(0)) --actual_n;
for (Index k = 0; k < n; ++k)
{
if (col0(k) == Literal(0) || actual_n==1)
{
// if col0(k) == 0, then entry is deflated, so singular value is on diagonal
// if actual_n==1, then the deflated problem is already diagonalized
singVals(k) = k==0 ? col0(0) : diag(k);
mus(k) = Literal(0);
shifts(k) = k==0 ? col0(0) : diag(k);
continue;
}
// otherwise, use secular equation to find singular value
RealScalar left = diag(k);
RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm());
if(k==actual_n-1)
right = (diag(actual_n-1) + col0.matrix().norm());
else
{
// Skip deflated singular values,
// recall that at this stage we assume that z[j]!=0 and all entries for which z[j]==0 have been put aside.
// This should be equivalent to using perm[]
Index l = k+1;
while(col0(l)==Literal(0)) { ++l; eigen_internal_assert(l<actual_n); }
right = diag(l);
}
// first decide whether it's closer to the left end or the right end
RealScalar mid = left + (right-left) / Literal(2);
RealScalar fMid = secularEq(mid, col0, diag, perm, diag, Literal(0));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << right-left << "\n";
std::cout << "fMid = " << fMid << " " << secularEq(mid-left, col0, diag, perm, diag-left, left) << " " << secularEq(mid-right, col0, diag, perm, diag-right, right) << "\n";
std::cout << " = " << secularEq(0.1*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.2*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.3*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.4*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.49*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.5*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.51*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.6*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.7*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.8*(left+right), col0, diag, perm, diag, 0)
<< " " << secularEq(0.9*(left+right), col0, diag, perm, diag, 0) << "\n";
#endif
RealScalar shift = (k == actual_n-1 || fMid > Literal(0)) ? left : right;
// measure everything relative to shift
Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);
diagShifted = diag - shift;
if(k!=actual_n-1)
{
// check that after the shift, f(mid) is still negative:
RealScalar midShifted = (right - left) / RealScalar(2);
if(shift==right)
midShifted = -midShifted;
RealScalar fMidShifted = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
if(fMidShifted>0)
{
// fMid was erroneous, fix it:
shift = fMidShifted > Literal(0) ? left : right;
diagShifted = diag - shift;
}
}
// initial guess
RealScalar muPrev, muCur;
if (shift == left)
{
muPrev = (right - left) * RealScalar(0.1);
if (k == actual_n-1) muCur = right - left;
else muCur = (right - left) * RealScalar(0.5);
}
else
{
muPrev = -(right - left) * RealScalar(0.1);
muCur = -(right - left) * RealScalar(0.5);
}
RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);
RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift);
if (abs(fPrev) < abs(fCur))
{
swap(fPrev, fCur);
swap(muPrev, muCur);
}
// rational interpolation: fit a function of the form a / mu + b through the two previous
// iterates and use its zero to compute the next iterate
bool useBisection = fPrev*fCur>Literal(0);
while (fCur!=Literal(0) && abs(muCur - muPrev) > Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)
{
++m_numIters;
// Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.
RealScalar a = (fCur - fPrev) / (Literal(1)/muCur - Literal(1)/muPrev);
RealScalar b = fCur - a / muCur;
// And find mu such that f(mu)==0:
RealScalar muZero = -a/b;
RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);
muPrev = muCur;
fPrev = fCur;
muCur = muZero;
fCur = fZero;
if (shift == left && (muCur < Literal(0) || muCur > right - left)) useBisection = true;
if (shift == right && (muCur < -(right - left) || muCur > Literal(0))) useBisection = true;
if (abs(fCur)>abs(fPrev)) useBisection = true;
}
// fall back on bisection method if rational interpolation did not work
if (useBisection)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "useBisection for k = " << k << ", actual_n = " << actual_n << "\n";
#endif
RealScalar leftShifted, rightShifted;
if (shift == left)
{
// to avoid overflow, we must have mu > max(real_min, |z(k)|/sqrt(real_max)),
// the factor 2 is to be more conservative
leftShifted = numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), Literal(2) * abs(col0(k)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
// check that we did it right:
eigen_internal_assert( (numext::isfinite)( (col0(k)/leftShifted)*(col0(k)/(diag(k)+shift+leftShifted)) ) );
// I don't understand why the case k==0 would be special there:
// if (k == 0) rightShifted = right - left; else
rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.51)); // theoretically we can take 0.5, but let's be safe
}
else
{
leftShifted = -(right - left) * RealScalar(0.51);
if(k+1<n)
rightShifted = -numext::maxi<RealScalar>( (std::numeric_limits<RealScalar>::min)(), abs(col0(k+1)) / sqrt((std::numeric_limits<RealScalar>::max)()) );
else
rightShifted = -(std::numeric_limits<RealScalar>::min)();
}
RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);
eigen_internal_assert(fLeft<Literal(0));
#if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_DEBUG_VERBOSE
RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if(!(fLeft * fRight<0))
{
std::cout << "fLeft: " << leftShifted << " - " << diagShifted.head(10).transpose() << "\n ; " << bool(left==shift) << " " << (left-shift) << "\n";
std::cout << k << " : " << fLeft << " * " << fRight << " == " << fLeft * fRight << " ; " << left << " - " << right << " -> " << leftShifted << " " << rightShifted << " shift=" << shift << "\n";
}
#endif
eigen_internal_assert(fLeft * fRight < Literal(0));
if(fLeft<Literal(0))
{
while (rightShifted - leftShifted > Literal(2) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))
{
RealScalar midShifted = (leftShifted + rightShifted) / Literal(2);
fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);
eigen_internal_assert((numext::isfinite)(fMid));
if (fLeft * fMid < Literal(0))
{
rightShifted = midShifted;
}
else
{
leftShifted = midShifted;
fLeft = fMid;
}
}
muCur = (leftShifted + rightShifted) / Literal(2);
}
else
{
// We have a problem as shifting on the left or right give either a positive or negative value
// at the middle of [left,right]...
// Instead fo abbording or entering an infinite loop,
// let's just use the middle as the estimated zero-crossing:
muCur = (right - left) * RealScalar(0.5);
if(shift == right)
muCur = -muCur;
}
}
singVals[k] = shift + muCur;
shifts[k] = shift;
mus[k] = muCur;
// perturb singular value slightly if it equals diagonal entry to avoid division by zero later
// (deflation is supposed to avoid this from happening)
// - this does no seem to be necessary anymore -
// if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();
// if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();
}
}
// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)
template <typename MatrixType>
void BDCSVD<MatrixType>::perturbCol0
(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat)
{
using std::sqrt;
Index n = col0.size();
Index m = perm.size();
if(m==0)
{
zhat.setZero();
return;
}
Index last = perm(m-1);
// The offset permits to skip deflated entries while computing zhat
for (Index k = 0; k < n; ++k)
{
if (col0(k) == Literal(0)) // deflated
zhat(k) = Literal(0);
else
{
// see equation (3.6)
RealScalar dk = diag(k);
RealScalar prod = (singVals(last) + dk) * (mus(last) + (shifts(last) - dk));
for(Index l = 0; l<m; ++l)
{
Index i = perm(l);
if(i!=k)
{
Index j = i<k ? i : perm(l-1);
prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
if(i!=k && numext::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )
std::cout << " " << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << " == (" << (singVals(j)+dk) << " * " << (mus(j)+(shifts(j)-dk))
<< ") / (" << (diag(i)+dk) << " * " << (diag(i)-dk) << ")\n";
#endif
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "zhat(" << k << ") = sqrt( " << prod << ") ; " << (singVals(last) + dk) << " * " << mus(last) + shifts(last) << " - " << dk << "\n";
#endif
RealScalar tmp = sqrt(prod);
zhat(k) = col0(k) > Literal(0) ? RealScalar(tmp) : RealScalar(-tmp);
}
}
}
// compute singular vectors
template <typename MatrixType>
void BDCSVD<MatrixType>::computeSingVecs
(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,
const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V)
{
Index n = zhat.size();
Index m = perm.size();
for (Index k = 0; k < n; ++k)
{
if (zhat(k) == Literal(0))
{
U.col(k) = VectorType::Unit(n+1, k);
if (m_compV) V.col(k) = VectorType::Unit(n, k);
}
else
{
U.col(k).setZero();
for(Index l=0;l<m;++l)
{
Index i = perm(l);
U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
}
U(n,k) = Literal(0);
U.col(k).normalize();
if (m_compV)
{
V.col(k).setZero();
for(Index l=1;l<m;++l)
{
Index i = perm(l);
V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));
}
V(0,k) = Literal(-1);
V.col(k).normalize();
}
}
}
U.col(n) = VectorType::Unit(n+1, n);
}
// page 12_13
// i >= 1, di almost null and zi non null.
// We use a rotation to zero out zi applied to the left of M
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size)
{
using std::abs;
using std::sqrt;
using std::pow;
Index start = firstCol + shift;
RealScalar c = m_computed(start, start);
RealScalar s = m_computed(start+i, start);
RealScalar r = numext::hypot(c,s);
if (r == Literal(0))
{
m_computed(start+i, start+i) = Literal(0);
return;
}
m_computed(start,start) = r;
m_computed(start+i, start) = Literal(0);
m_computed(start+i, start+i) = Literal(0);
JacobiRotation<RealScalar> J(c/r,-s/r);
if (m_compU) m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);
else m_naiveU.applyOnTheRight(firstCol, firstCol+i, J);
}// end deflation 43
// page 13
// i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)
// We apply two rotations to have zj = 0;
// TODO deflation44 is still broken and not properly tested
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size)
{
using std::abs;
using std::sqrt;
using std::conj;
using std::pow;
RealScalar c = m_computed(firstColm+i, firstColm);
RealScalar s = m_computed(firstColm+j, firstColm);
RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.4: " << i << "," << j << " -> " << c << " " << s << " " << r << " ; "
<< m_computed(firstColm + i-1, firstColm) << " "
<< m_computed(firstColm + i, firstColm) << " "
<< m_computed(firstColm + i+1, firstColm) << " "
<< m_computed(firstColm + i+2, firstColm) << "\n";
std::cout << m_computed(firstColm + i-1, firstColm + i-1) << " "
<< m_computed(firstColm + i, firstColm+i) << " "
<< m_computed(firstColm + i+1, firstColm+i+1) << " "
<< m_computed(firstColm + i+2, firstColm+i+2) << "\n";
#endif
if (r==Literal(0))
{
m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
return;
}
c/=r;
s/=r;
m_computed(firstColm + i, firstColm) = r;
m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);
m_computed(firstColm + j, firstColm) = Literal(0);
JacobiRotation<RealScalar> J(c,-s);
if (m_compU) m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);
else m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J);
if (m_compV) m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J);
}// end deflation 44
// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]
template <typename MatrixType>
void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift)
{
using std::sqrt;
using std::abs;
const Index length = lastCol + 1 - firstCol;
Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);
Diagonal<MatrixXr> fulldiag(m_computed);
VectorBlock<Diagonal<MatrixXr>,Dynamic> diag(fulldiag, firstCol+shift, length);
const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();
RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();
RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);
RealScalar epsilon_coarse = Literal(8) * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "\ndeflate:" << diag.head(k+1).transpose() << " | " << diag.segment(k+1,length-k-1).transpose() << "\n";
#endif
//condition 4.1
if (diag(0) < epsilon_coarse)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.1, because " << diag(0) << " < " << epsilon_coarse << "\n";
#endif
diag(0) = epsilon_coarse;
}
//condition 4.2
for (Index i=1;i<length;++i)
if (abs(col0(i)) < epsilon_strict)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.2, set z(" << i << ") to zero because " << abs(col0(i)) << " < " << epsilon_strict << " (diag(" << i << ")=" << diag(i) << ")\n";
#endif
col0(i) = Literal(0);
}
//condition 4.3
for (Index i=1;i<length; i++)
if (diag(i) < epsilon_coarse)
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.3, cancel z(" << i << ")=" << col0(i) << " because diag(" << i << ")=" << diag(i) << " < " << epsilon_coarse << "\n";
#endif
deflation43(firstCol, shift, i, length);
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "to be sorted: " << diag.transpose() << "\n\n";
#endif
{
// Check for total deflation
// If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting
bool total_deflation = (col0.tail(length-1).array()<considerZero).all();
// Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.
// First, compute the respective permutation.
Index *permutation = m_workspaceI.data();
{
permutation[0] = 0;
Index p = 1;
// Move deflated diagonal entries at the end.
for(Index i=1; i<length; ++i)
if(abs(diag(i))<considerZero)
permutation[p++] = i;
Index i=1, j=k+1;
for( ; p < length; ++p)
{
if (i > k) permutation[p] = j++;
else if (j >= length) permutation[p] = i++;
else if (diag(i) < diag(j)) permutation[p] = j++;
else permutation[p] = i++;
}
}
// If we have a total deflation, then we have to insert diag(0) at the right place
if(total_deflation)
{
for(Index i=1; i<length; ++i)
{
Index pi = permutation[i];
if(abs(diag(pi))<considerZero || diag(0)<diag(pi))
permutation[i-1] = permutation[i];
else
{
permutation[i-1] = 0;
break;
}
}
}
// Current index of each col, and current column of each index
Index *realInd = m_workspaceI.data()+length;
Index *realCol = m_workspaceI.data()+2*length;
for(int pos = 0; pos< length; pos++)
{
realCol[pos] = pos;
realInd[pos] = pos;
}
for(Index i = total_deflation?0:1; i < length; i++)
{
const Index pi = permutation[length - (total_deflation ? i+1 : i)];
const Index J = realCol[pi];
using std::swap;
// swap diagonal and first column entries:
swap(diag(i), diag(J));
if(i!=0 && J!=0) swap(col0(i), col0(J));
// change columns
if (m_compU) m_naiveU.col(firstCol+i).segment(firstCol, length + 1).swap(m_naiveU.col(firstCol+J).segment(firstCol, length + 1));
else m_naiveU.col(firstCol+i).segment(0, 2) .swap(m_naiveU.col(firstCol+J).segment(0, 2));
if (m_compV) m_naiveV.col(firstColW + i).segment(firstRowW, length).swap(m_naiveV.col(firstColW + J).segment(firstRowW, length));
//update real pos
const Index realI = realInd[i];
realCol[realI] = J;
realCol[pi] = i;
realInd[J] = realI;
realInd[i] = pi;
}
}
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "sorted: " << diag.transpose().format(bdcsvdfmt) << "\n";
std::cout << " : " << col0.transpose() << "\n\n";
#endif
//condition 4.4
{
Index i = length-1;
while(i>0 && (abs(diag(i))<considerZero || abs(col0(i))<considerZero)) --i;
for(; i>1;--i)
if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )
{
#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE
std::cout << "deflation 4.4 with i = " << i << " because " << (diag(i) - diag(i-1)) << " < " << NumTraits<RealScalar>::epsilon()*diag(i) << "\n";
#endif
eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && " diagonal entries are not properly sorted");
deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);
}
}
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
for(Index j=2;j<length;++j)
assert(diag(j-1)<=diag(j) || abs(diag(j))<considerZero);
#endif
#ifdef EIGEN_BDCSVD_SANITY_CHECKS
assert(m_naiveU.allFinite());
assert(m_naiveV.allFinite());
assert(m_computed.allFinite());
#endif
}//end deflation
#ifndef __CUDACC__
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by Divide & Conquer algorithm
*
* \sa class BDCSVD
*/
template<typename Derived>
BDCSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
{
return BDCSVD<PlainObject>(*this, computationOptions);
}
#endif
} // end namespace Eigen
#endif
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SVD/JacobiSVD.h
|
.h
| 32,949
| 805
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_JACOBISVD_H
#define EIGEN_JACOBISVD_H
namespace Eigen {
namespace internal {
// forward declaration (needed by ICC)
// the empty body is required by MSVC
template<typename MatrixType, int QRPreconditioner,
bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
struct svd_precondition_2x2_block_to_be_real {};
/*** QR preconditioners (R-SVD)
***
*** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
*** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
*** JacobiSVD which by itself is only able to work on square matrices.
***/
enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
template<typename MatrixType, int QRPreconditioner, int Case>
struct qr_preconditioner_should_do_anything
{
enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
b = MatrixType::RowsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime != Dynamic &&
MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
ret = !( (QRPreconditioner == NoQRPreconditioner) ||
(Case == PreconditionIfMoreColsThanRows && bool(a)) ||
(Case == PreconditionIfMoreRowsThanCols && bool(b)) )
};
};
template<typename MatrixType, int QRPreconditioner, int Case,
bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
> struct qr_preconditioner_impl {};
template<typename MatrixType, int QRPreconditioner, int Case>
class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
{
public:
void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
{
return false;
}
};
/*** preconditioner using FullPivHouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
};
typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
}
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
return true;
}
return false;
}
private:
typedef FullPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
WorkspaceType m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))
: ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor)
: MatrixType::Options
};
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>
TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
m_adjoint.resize(svd.cols(), svd.rows());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
return true;
}
else return false;
}
private:
typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using ColPivHouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
}
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
return true;
}
return false;
}
private:
typedef ColPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))
: ColsAtCompileTime==1 ? (MatrixType::Options | RowMajor)
: MatrixType::Options
};
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>
TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
}
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
return true;
}
else return false;
}
private:
typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using HouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
}
if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
return true;
}
return false;
}
private:
typedef HouseholderQR<MatrixType> QRType;
QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
Options = MatrixType::Options
};
typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
}
if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
return true;
}
else return false;
}
private:
typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** 2x2 SVD implementation
***
*** JacobiSVD consists in performing a series of 2x2 SVD subproblems
***/
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }
};
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)
{
using std::sqrt;
using std::abs;
Scalar z;
JacobiRotation<Scalar> rot;
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
const RealScalar precision = NumTraits<Scalar>::epsilon();
if(n==0)
{
// make sure first column is zero
work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0);
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
// work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
}
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
// otherwise the second row is already zero, so we have nothing to do.
}
else
{
rot.c() = conj(work_matrix.coeff(p,p)) / n;
rot.s() = work_matrix.coeff(q,p) / n;
work_matrix.applyOnTheLeft(p,q,rot);
if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.col(q) *= z;
if(svd.computeV()) svd.m_matrixV.col(q) *= z;
}
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
}
// update largest diagonal entry
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q))));
// and check whether the 2x2 block is already diagonal
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold;
}
};
template<typename _MatrixType, int QRPreconditioner>
struct traits<JacobiSVD<_MatrixType,QRPreconditioner> >
{
typedef _MatrixType MatrixType;
};
} // end namespace internal
/** \ingroup SVD_Module
*
*
* \class JacobiSVD
*
* \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
* \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
* for the R-SVD step for non-square matrices. See discussion of possible values below.
*
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
* and right \em singular \em vectors of \a A respectively.
*
* Singular values are always sorted in decreasing order.
*
* This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
*
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
*
* Here's an example demonstrating basic usage:
* \include JacobiSVD_basic.cpp
* Output: \verbinclude JacobiSVD_basic.out
*
* This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
* bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
* \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
* In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
*
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
*
* The possible values for QRPreconditioner are:
* \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
* \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
* Contrary to other QRs, it doesn't allow computing thin unitaries.
* \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
* This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
* is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
* process is more reliable than the optimized bidiagonal SVD iterations.
* \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
* JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
* faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
* if QR preconditioning is needed before applying it anyway.
*
* \sa MatrixBase::jacobiSvd()
*/
template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
: public SVDBase<JacobiSVD<_MatrixType,QRPreconditioner> >
{
typedef SVDBase<JacobiSVD> Base;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef typename Base::MatrixUType MatrixUType;
typedef typename Base::MatrixVType MatrixVType;
typedef typename Base::SingularValuesType SingularValuesType;
typedef typename internal::plain_row_type<MatrixType>::type RowType;
typedef typename internal::plain_col_type<MatrixType>::type ColType;
typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
WorkMatrixType;
/** \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via JacobiSVD::compute(const MatrixType&).
*/
JacobiSVD()
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem size.
* \sa JacobiSVD()
*/
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
{
allocate(rows, cols, computationOptions);
}
/** \brief Constructor performing the decomposition of given matrix.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
{
compute(matrix, computationOptions);
}
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
* \param matrix the matrix to decompose
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
JacobiSVD& compute(const MatrixType& matrix)
{
return compute(matrix, m_computationOptions);
}
using Base::computeU;
using Base::computeV;
using Base::rows;
using Base::cols;
using Base::rank;
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
protected:
using Base::m_matrixU;
using Base::m_matrixV;
using Base::m_singularValues;
using Base::m_isInitialized;
using Base::m_isAllocated;
using Base::m_usePrescribedThreshold;
using Base::m_computeFullU;
using Base::m_computeThinU;
using Base::m_computeFullV;
using Base::m_computeThinV;
using Base::m_computationOptions;
using Base::m_nonzeroSingularValues;
using Base::m_rows;
using Base::m_cols;
using Base::m_diagSize;
using Base::m_prescribedThreshold;
WorkMatrixType m_workMatrix;
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct internal::svd_precondition_2x2_block_to_be_real;
template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
friend struct internal::qr_preconditioner_impl;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
MatrixType m_scaledMatrix;
};
template<typename MatrixType, int QRPreconditioner>
void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols &&
computationOptions == m_computationOptions)
{
return;
}
m_rows = rows;
m_cols = cols;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
{
eigen_assert(!(m_computeThinU || m_computeThinV) &&
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
"Use the ColPivHouseholderQR preconditioner instead.");
}
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
: m_computeThinU ? m_diagSize
: 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, m_computeFullV ? m_cols
: m_computeThinV ? m_diagSize
: 0);
m_workMatrix.resize(m_diagSize, m_diagSize);
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
}
template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
// only worsening the precision of U and V as we accumulate more rotations
const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
// limit for denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
// Scaling factor to reduce over/under-flows
RealScalar scale = matrix.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
if(m_rows!=m_cols)
{
m_scaledMatrix = matrix / scale;
m_qr_precond_morecols.run(*this, m_scaledMatrix);
m_qr_precond_morerows.run(*this, m_scaledMatrix);
}
else
{
m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
}
/*** step 2. The main Jacobi SVD iteration. ***/
RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff();
bool finished = false;
while(!finished)
{
finished = true;
// do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
for(Index p = 1; p < m_diagSize; ++p)
{
for(Index q = 0; q < p; ++q)
{
// if this 2x2 sub-matrix is not diagonal already...
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{
finished = false;
// perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
// the complex to real operation returns true if the updated 2x2 block is not already diagonal
if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))
{
JacobiRotation<RealScalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
// accumulate resulting Jacobi rotations
m_workMatrix.applyOnTheLeft(p,q,j_left);
if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
m_workMatrix.applyOnTheRight(p,q,j_right);
if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
// keep track of the largest diagonal coefficient
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q))));
}
}
}
}
}
/*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
for(Index i = 0; i < m_diagSize; ++i)
{
// For a complex matrix, some diagonal coefficients might note have been
// treated by svd_precondition_2x2_block_to_be_real, and the imaginary part
// of some diagonal entry might not be null.
if(NumTraits<Scalar>::IsComplex && abs(numext::imag(m_workMatrix.coeff(i,i)))>considerAsZero)
{
RealScalar a = abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = abs(a);
if(computeU()) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
}
else
{
// m_workMatrix.coeff(i,i) is already real, no difficulty:
RealScalar a = numext::real(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = abs(a);
if(computeU() && (a<RealScalar(0))) m_matrixU.col(i) = -m_matrixU.col(i);
}
}
m_singularValues *= scale;
/*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
m_nonzeroSingularValues = m_diagSize;
for(Index i = 0; i < m_diagSize; i++)
{
Index pos;
RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
if(maxRemainingSingularValue == RealScalar(0))
{
m_nonzeroSingularValues = i;
break;
}
if(pos)
{
pos += i;
std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
}
}
m_isInitialized = true;
return *this;
}
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by two-sided
* Jacobi transformations.
*
* \sa class JacobiSVD
*/
template<typename Derived>
JacobiSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
{
return JacobiSVD<PlainObject>(*this, computationOptions);
}
} // end namespace Eigen
#endif // EIGEN_JACOBISVD_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SVD/UpperBidiagonalization.h
|
.h
| 15,957
| 415
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BIDIAGONALIZATION_H
#define EIGEN_BIDIAGONALIZATION_H
namespace Eigen {
namespace internal {
// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
template<typename _MatrixType> class UpperBidiagonalization
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType;
typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
typedef HouseholderSequence<
const MatrixType,
const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type
> HouseholderUSequenceType;
typedef HouseholderSequence<
const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
Diagonal<const MatrixType,1>,
OnTheRight
> HouseholderVSequenceType;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via Bidiagonalization::compute(const MatrixType&).
*/
UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
explicit UpperBidiagonalization(const MatrixType& matrix)
: m_householder(matrix.rows(), matrix.cols()),
m_bidiagonal(matrix.cols(), matrix.cols()),
m_isInitialized(false)
{
compute(matrix);
}
UpperBidiagonalization& compute(const MatrixType& matrix);
UpperBidiagonalization& computeUnblocked(const MatrixType& matrix);
const MatrixType& householder() const { return m_householder; }
const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
const HouseholderUSequenceType householderU() const
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
}
const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
.setLength(m_householder.cols()-1)
.setShift(1);
}
protected:
MatrixType m_householder;
BidiagonalType m_bidiagonal;
bool m_isInitialized;
};
// Standard upper bidiagonalization without fancy optimizations
// This version should be faster for small matrix size
template<typename MatrixType>
void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
typename MatrixType::Scalar* tempData = 0)
{
typedef typename MatrixType::Scalar Scalar;
Index rows = mat.rows();
Index cols = mat.cols();
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;
TempType tempVector;
if(tempData==0)
{
tempVector.resize(rows);
tempData = tempVector.data();
}
for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
{
Index remainingRows = rows - k;
Index remainingCols = cols - k - 1;
// construct left householder transform in-place in A
mat.col(k).tail(remainingRows)
.makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
// apply householder transform to remaining part of A on the left
mat.bottomRightCorner(remainingRows, remainingCols)
.applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);
if(k == cols-1) break;
// construct right householder transform in-place in mat
mat.row(k).tail(remainingCols)
.makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
// apply householder transform to remaining part of mat on the left
mat.bottomRightCorner(remainingRows-1, remainingCols)
.applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
}
}
/** \internal
* Helper routine for the block reduction to upper bidiagonal form.
*
* Let's partition the matrix A:
*
* | A00 A01 |
* A = | |
* | A10 A11 |
*
* This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10]
* and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11
* is updated using matrix-matrix products:
* A22 -= V * Y^T - X * U^T
* where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01
* respectively, and the update matrices X and Y are computed during the reduction.
*
*/
template<typename MatrixType>
void upperbidiagonalization_blocked_helper(MatrixType& A,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
Index bs,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > X,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > Y)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename NumTraits<RealScalar>::Literal Literal;
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType;
typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType;
typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;
Index brows = A.rows();
Index bcols = A.cols();
Scalar tau_u, tau_u_prev(0), tau_v;
for(Index k = 0; k < bs; ++k)
{
Index remainingRows = brows - k;
Index remainingCols = bcols - k - 1;
SubMatType X_k1( X.block(k,0, remainingRows,k) );
SubMatType V_k1( A.block(k,0, remainingRows,k) );
// 1 - update the k-th column of A
SubColumnType v_k = A.col(k).tail(remainingRows);
v_k -= V_k1 * Y.row(k).head(k).adjoint();
if(k) v_k -= X_k1 * A.col(k).head(k);
// 2 - construct left Householder transform in-place
v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
if(k+1<bcols)
{
SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) );
SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
// this eases the application of Householder transforAions
// A(k,k) will store tau_v later
A(k,k) = Scalar(1);
// 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
{
SubColumnType y_k( Y.col(k).tail(remainingCols) );
// let's use the begining of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k;
y_k.noalias() -= Y_k.leftCols(k) * tmp;
tmp.noalias() = X_k1.adjoint() * v_k;
y_k.noalias() -= U_k1.adjoint() * tmp;
y_k *= numext::conj(tau_v);
}
// 4 - update k-th row of A (it will become u_k)
SubRowType u_k( A.row(k).tail(remainingCols) );
u_k = u_k.conjugate();
{
u_k -= Y_k * A.row(k).head(k+1).adjoint();
if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
}
// 5 - construct right Householder transform in-place
u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);
// this eases the application of Householder transformations
// A(k,k+1) will store tau_u later
A(k,k+1) = Scalar(1);
// 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
{
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
// let's use the begining of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) );
x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
tmp0.noalias() = U_k1 * u_k.transpose();
x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0;
tmp1.noalias() = Y_k.adjoint() * u_k.transpose();
x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
x_k *= numext::conj(tau_u);
tau_u = numext::conj(tau_u);
u_k = u_k.conjugate();
}
if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
tau_u_prev = tau_u;
}
else
A.coeffRef(k-1,k) = tau_u_prev;
A.coeffRef(k,k) = tau_v;
}
if(bs<bcols)
A.coeffRef(bs-1,bs) = tau_u_prev;
// update A22
if(bcols>bs && brows>bs)
{
SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
SubMatType A10( A.block(bs,0, brows-bs,bs) );
SubMatType A01( A.block(0,bs, bs,bcols-bs) );
Scalar tmp = A01(bs-1,0);
A01(bs-1,0) = Literal(1);
A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
A01(bs-1,0) = tmp;
}
}
/** \internal
*
* Implementation of a block-bidiagonal reduction.
* It is based on the following paper:
* The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form.
* by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995)
* section 3.3
*/
template<typename MatrixType, typename BidiagType>
void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal,
Index maxBlockSize=32,
typename MatrixType::Scalar* /*tempData*/ = 0)
{
typedef typename MatrixType::Scalar Scalar;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
Index rows = A.rows();
Index cols = A.cols();
Index size = (std::min)(rows, cols);
// X and Y are work space
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
Matrix<Scalar,
MatrixType::RowsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
Matrix<Scalar,
MatrixType::ColsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
Index blockSize = (std::min)(maxBlockSize,size);
Index k = 0;
for(k = 0; k < size; k += blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
Index brows = rows - k; // rows of the block
Index bcols = cols - k; // columns of the block
// partition the matrix A:
//
// | A00 A01 A02 |
// | |
// A = | A10 A11 A12 |
// | |
// | A20 A21 A22 |
//
// where A11 is a bs x bs diagonal block,
// and let:
// | A11 A12 |
// B = | |
// | A21 A22 |
BlockType B = A.block(k,k,brows,bcols);
// This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
// Finally, the algorithm continue on the updated A22.
//
// However, if B is too small, or A22 empty, then let's use an unblocked strategy
if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
{
upperbidiagonalization_inplace_unblocked(B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
X.data()
);
break; // We're done
}
else
{
upperbidiagonalization_blocked_helper<BlockType>( B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
bs,
X.topLeftCorner(brows,bs),
Y.topLeftCorner(bcols,bs)
);
}
}
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
ColVectorType temp(rows);
upperbidiagonalization_inplace_unblocked(m_householder,
&(m_bidiagonal.template diagonal<0>().coeffRef(0)),
&(m_bidiagonal.template diagonal<1>().coeffRef(0)),
temp.data());
m_isInitialized = true;
return *this;
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal);
m_isInitialized = true;
return *this;
}
#if 0
/** \return the Householder QR decomposition of \c *this.
*
* \sa class Bidiagonalization
*/
template<typename Derived>
const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::bidiagonalization() const
{
return UpperBidiagonalization<PlainObject>(eval());
}
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_BIDIAGONALIZATION_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/MetisSupport/MetisSupport.h
|
.h
| 4,588
| 138
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef METIS_SUPPORT_H
#define METIS_SUPPORT_H
namespace Eigen {
/**
* Get the fill-reducing ordering from the METIS package
*
* If A is the original matrix and Ap is the permuted matrix,
* the fill-reducing permutation is defined as follows :
* Row (column) i of A is the matperm(i) row (column) of Ap.
* WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm)
*/
template <typename StorageIndex>
class MetisOrdering
{
public:
typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> PermutationType;
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
template <typename MatrixType>
void get_symmetrized_graph(const MatrixType& A)
{
Index m = A.cols();
eigen_assert((A.rows() == A.cols()) && "ONLY FOR SQUARED MATRICES");
// Get the transpose of the input matrix
MatrixType At = A.transpose();
// Get the number of nonzeros elements in each row/col of At+A
Index TotNz = 0;
IndexVector visited(m);
visited.setConstant(-1);
for (StorageIndex j = 0; j < m; j++)
{
// Compute the union structure of of A(j,:) and At(j,:)
visited(j) = j; // Do not include the diagonal element
// Get the nonzeros in row/column j of A
for (typename MatrixType::InnerIterator it(A, j); it; ++it)
{
Index idx = it.index(); // Get the row index (for column major) or column index (for row major)
if (visited(idx) != j )
{
visited(idx) = j;
++TotNz;
}
}
//Get the nonzeros in row/column j of At
for (typename MatrixType::InnerIterator it(At, j); it; ++it)
{
Index idx = it.index();
if(visited(idx) != j)
{
visited(idx) = j;
++TotNz;
}
}
}
// Reserve place for A + At
m_indexPtr.resize(m+1);
m_innerIndices.resize(TotNz);
// Now compute the real adjacency list of each column/row
visited.setConstant(-1);
StorageIndex CurNz = 0;
for (StorageIndex j = 0; j < m; j++)
{
m_indexPtr(j) = CurNz;
visited(j) = j; // Do not include the diagonal element
// Add the pattern of row/column j of A to A+At
for (typename MatrixType::InnerIterator it(A,j); it; ++it)
{
StorageIndex idx = it.index(); // Get the row index (for column major) or column index (for row major)
if (visited(idx) != j )
{
visited(idx) = j;
m_innerIndices(CurNz) = idx;
CurNz++;
}
}
//Add the pattern of row/column j of At to A+At
for (typename MatrixType::InnerIterator it(At, j); it; ++it)
{
StorageIndex idx = it.index();
if(visited(idx) != j)
{
visited(idx) = j;
m_innerIndices(CurNz) = idx;
++CurNz;
}
}
}
m_indexPtr(m) = CurNz;
}
template <typename MatrixType>
void operator() (const MatrixType& A, PermutationType& matperm)
{
StorageIndex m = internal::convert_index<StorageIndex>(A.cols()); // must be StorageIndex, because it is passed by address to METIS
IndexVector perm(m),iperm(m);
// First, symmetrize the matrix graph.
get_symmetrized_graph(A);
int output_error;
// Call the fill-reducing routine from METIS
output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data());
if(output_error != METIS_OK)
{
//FIXME The ordering interface should define a class of possible errors
std::cerr << "ERROR WHILE CALLING THE METIS PACKAGE \n";
return;
}
// Get the fill-reducing permutation
//NOTE: If Ap is the permuted matrix then perm and iperm vectors are defined as follows
// Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap
matperm.resize(m);
for (int j = 0; j < m; j++)
matperm.indices()(iperm(j)) = j;
}
protected:
IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column
IndexVector m_innerIndices; // Adjacency list
};
}// end namespace eigen
#endif
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseTranspose.h
|
.h
| 3,175
| 93
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSETRANSPOSE_H
#define EIGEN_SPARSETRANSPOSE_H
namespace Eigen {
namespace internal {
template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)>
class SparseTransposeImpl
: public SparseMatrixBase<Transpose<MatrixType> >
{};
template<typename MatrixType>
class SparseTransposeImpl<MatrixType,CompressedAccessBit>
: public SparseCompressedBase<Transpose<MatrixType> >
{
typedef SparseCompressedBase<Transpose<MatrixType> > Base;
public:
using Base::derived;
typedef typename Base::Scalar Scalar;
typedef typename Base::StorageIndex StorageIndex;
inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); }
inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); }
inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); }
inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); }
inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); }
inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); }
inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); }
inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); }
};
}
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
: public internal::SparseTransposeImpl<MatrixType>
{
protected:
typedef internal::SparseTransposeImpl<MatrixType> Base;
};
namespace internal {
template<typename ArgType>
struct unary_evaluator<Transpose<ArgType>, IteratorBased>
: public evaluator_base<Transpose<ArgType> >
{
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
public:
typedef Transpose<ArgType> XprType;
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
class InnerIterator : public EvalIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: EvalIterator(unaryOp.m_argImpl,outer)
{}
Index row() const { return EvalIterator::col(); }
Index col() const { return EvalIterator::row(); }
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {}
protected:
evaluator<ArgType> m_argImpl;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSETRANSPOSE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseSelfAdjointView.h
|
.h
| 25,721
| 657
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
namespace Eigen {
/** \ingroup SparseCore_Module
* \class SparseSelfAdjointView
*
* \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
*
* \param MatrixType the type of the dense matrix storing the coefficients
* \param Mode can be either \c #Lower or \c #Upper
*
* This class is an expression of a sefladjoint matrix from a triangular part of a matrix
* with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
* and most of the time this is the only way that it is used.
*
* \sa SparseMatrixBase::selfadjointView()
*/
namespace internal {
template<typename MatrixType, unsigned int Mode>
struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
};
template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
}
template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
: public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
{
public:
enum {
Mode = _Mode,
TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
};
typedef EigenBase<SparseSelfAdjointView> Base;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
{
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
/** \internal \returns a reference to the nested matrix */
const _MatrixTypeNested& matrix() const { return m_matrix; }
typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
/** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
*
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
template<typename OtherDerived>
Product<SparseSelfAdjointView, OtherDerived>
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
{
return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
}
/** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
*
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
template<typename OtherDerived> friend
Product<OtherDerived, SparseSelfAdjointView>
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
}
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
template<typename OtherDerived>
Product<SparseSelfAdjointView,OtherDerived>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
}
/** Efficient dense vector/matrix times sparse self-adjoint matrix product */
template<typename OtherDerived> friend
Product<OtherDerived,SparseSelfAdjointView>
operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
}
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
*
* \returns a reference to \c *this
*
* To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
* call this function with u.adjoint().
*/
template<typename DerivedU>
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
/** \returns an expression of P H P^-1 */
// TODO implement twists in a more evaluator friendly fashion
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
{
return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
}
template<typename SrcMatrixType,int SrcMode>
SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
{
internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
return *this;
}
SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
{
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
return *this = src.twistedBy(pnull);
}
template<typename SrcMatrixType,unsigned int SrcMode>
SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
{
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
return *this = src.twistedBy(pnull);
}
void resize(Index rows, Index cols)
{
EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows == this->rows() && cols == this->cols()
&& "SparseSelfadjointView::resize() does not actually allow to resize.");
}
protected:
MatrixTypeNested m_matrix;
//mutable VectorI m_countPerRow;
//mutable VectorI m_countPerCol;
private:
template<typename Dest> void evalTo(Dest &) const;
};
/***************************************************************************
* Implementation of SparseMatrixBase methods
***************************************************************************/
template<typename Derived>
template<unsigned int UpLo>
typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
{
return SparseSelfAdjointView<const Derived, UpLo>(derived());
}
template<typename Derived>
template<unsigned int UpLo>
typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
{
return SparseSelfAdjointView<Derived, UpLo>(derived());
}
/***************************************************************************
* Implementation of SparseSelfAdjointView methods
***************************************************************************/
template<typename MatrixType, unsigned int Mode>
template<typename DerivedU>
SparseSelfAdjointView<MatrixType,Mode>&
SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
{
SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0))
m_matrix = tmp.template triangularView<Mode>();
else
m_matrix += alpha * tmp.template triangularView<Mode>();
return *this;
}
namespace internal {
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
// in the future selfadjoint-ness should be defined by the expression traits
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
template<typename MatrixType, unsigned int Mode>
struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
{
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
typedef SparseSelfAdjointShape Shape;
};
struct SparseSelfAdjoint2Sparse {};
template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
template<typename DestScalar,int StorageOrder>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
{
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
}
// FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
template<typename DestScalar,int StorageOrder,typename AssignFunc>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
{
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
run(tmp, src, AssignOpType());
call_assignment_no_alias_no_transpose(dst, tmp, func);
}
template<typename DestScalar,int StorageOrder>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
{
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
run(tmp, src, AssignOpType());
dst += tmp;
}
template<typename DestScalar,int StorageOrder>
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
{
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
run(tmp, src, AssignOpType());
dst -= tmp;
}
template<typename DestScalar>
static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
{
// TODO directly evaluate into dst;
SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
dst = tmp;
}
};
} // end namespace internal
/***************************************************************************
* Implementation of sparse self-adjoint time dense matrix
***************************************************************************/
namespace internal {
template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
typedef typename LhsEval::InnerIterator LhsIterator;
typedef typename SparseLhsType::Scalar LhsScalar;
enum {
LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
ProcessFirstHalf =
((Mode&(Upper|Lower))==(Upper|Lower))
|| ( (Mode&Upper) && !LhsIsRowMajor)
|| ( (Mode&Lower) && LhsIsRowMajor),
ProcessSecondHalf = !ProcessFirstHalf
};
SparseLhsTypeNested lhs_nested(lhs);
LhsEval lhsEval(lhs_nested);
// work on one column at once
for (Index k=0; k<rhs.cols(); ++k)
{
for (Index j=0; j<lhs.outerSize(); ++j)
{
LhsIterator i(lhsEval,j);
// handle diagonal coeff
if (ProcessSecondHalf)
{
while (i && i.index()<j) ++i;
if(i && i.index()==j)
{
res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
++i;
}
}
// premultiplied rhs for scatters
typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
// accumulator for partial scalar product
typename DenseResType::Scalar res_j(0);
for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
{
LhsScalar lhs_ij = i.value();
if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
res_j += lhs_ij * rhs.coeff(i.index(),k);
res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
}
res.coeffRef(j,k) += alpha * res_j;
// handle diagonal coeff
if (ProcessFirstHalf && i && (i.index()==j))
res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
}
}
}
template<typename LhsView, typename Rhs, int ProductType>
struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
{
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
{
typedef typename LhsView::_MatrixTypeNested Lhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhsView.matrix());
RhsNested rhsNested(rhs);
internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename RhsView, int ProductType>
struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
{
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
{
typedef typename RhsView::_MatrixTypeNested Rhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhsView.matrix());
// transpose everything
Transpose<Dest> dstT(dst);
internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
template<typename LhsView, typename Rhs, int ProductTag>
struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
: public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
{
typedef Product<LhsView, Rhs, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
product_evaluator(const XprType& xpr)
: m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
}
protected:
typename Rhs::PlainObject m_lhs;
PlainObject m_result;
};
template<typename Lhs, typename RhsView, int ProductTag>
struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
: public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
{
typedef Product<Lhs, RhsView, DefaultProduct> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
product_evaluator(const XprType& xpr)
: m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
}
protected:
typename Lhs::PlainObject m_rhs;
PlainObject m_result;
};
} // namespace internal
/***************************************************************************
* Implementation of symmetric copies and permutations
***************************************************************************/
namespace internal {
template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
{
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::Scalar Scalar;
typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef evaluator<MatrixType> MatEval;
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
MatEval matEval(mat);
Dest& dest(_dest.derived());
enum {
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
};
Index size = mat.rows();
VectorI count;
count.resize(size);
count.setZero();
dest.resize(size,size);
for(Index j = 0; j<size; ++j)
{
Index jp = perm ? perm[j] : j;
for(MatIterator it(matEval,j); it; ++it)
{
Index i = it.index();
Index r = it.row();
Index c = it.col();
Index ip = perm ? perm[i] : i;
if(Mode==int(Upper|Lower))
count[StorageOrderMatch ? jp : ip]++;
else if(r==c)
count[ip]++;
else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
{
count[ip]++;
count[jp]++;
}
}
}
Index nnz = count.sum();
// reserve space
dest.resizeNonZeros(nnz);
dest.outerIndexPtr()[0] = 0;
for(Index j=0; j<size; ++j)
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
for(Index j=0; j<size; ++j)
count[j] = dest.outerIndexPtr()[j];
// copy data
for(StorageIndex j = 0; j<size; ++j)
{
for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
Index r = it.row();
Index c = it.col();
StorageIndex jp = perm ? perm[j] : j;
StorageIndex ip = perm ? perm[i] : i;
if(Mode==int(Upper|Lower))
{
Index k = count[StorageOrderMatch ? jp : ip]++;
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
dest.valuePtr()[k] = it.value();
}
else if(r==c)
{
Index k = count[ip]++;
dest.innerIndexPtr()[k] = ip;
dest.valuePtr()[k] = it.value();
}
else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
{
if(!StorageOrderMatch)
std::swap(ip,jp);
Index k = count[jp]++;
dest.innerIndexPtr()[k] = ip;
dest.valuePtr()[k] = it.value();
k = count[ip]++;
dest.innerIndexPtr()[k] = jp;
dest.valuePtr()[k] = numext::conj(it.value());
}
}
}
}
template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
{
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::Scalar Scalar;
SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef evaluator<MatrixType> MatEval;
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
enum {
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
};
MatEval matEval(mat);
Index size = mat.rows();
VectorI count(size);
count.setZero();
dest.resize(size,size);
for(StorageIndex j = 0; j<size; ++j)
{
StorageIndex jp = perm ? perm[j] : j;
for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = it.index();
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
StorageIndex ip = perm ? perm[i] : i;
count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
}
}
dest.outerIndexPtr()[0] = 0;
for(Index j=0; j<size; ++j)
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
dest.resizeNonZeros(dest.outerIndexPtr()[size]);
for(Index j=0; j<size; ++j)
count[j] = dest.outerIndexPtr()[j];
for(StorageIndex j = 0; j<size; ++j)
{
for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = it.index();
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
StorageIndex jp = perm ? perm[j] : j;
StorageIndex ip = perm? perm[i] : i;
Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
if(!StorageOrderMatch) std::swap(ip,jp);
if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
dest.valuePtr()[k] = numext::conj(it.value());
else
dest.valuePtr()[k] = it.value();
}
}
}
}
// TODO implement twists in a more evaluator friendly fashion
namespace internal {
template<typename MatrixType, int Mode>
struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
};
}
template<typename MatrixType,int Mode>
class SparseSymmetricPermutationProduct
: public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
{
public:
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
enum {
RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
};
protected:
typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
public:
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
: m_matrix(mat), m_perm(perm)
{}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
const NestedExpression& matrix() const { return m_matrix; }
const Perm& perm() const { return m_perm; }
protected:
MatrixTypeNested m_matrix;
const Perm& m_perm;
};
namespace internal {
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
{
typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
typedef typename DstXprType::StorageIndex DstIndex;
template<int Options>
static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
{
// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
dst = tmp;
}
template<typename DestType,unsigned int DestMode>
static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
{
internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseCompressedBase.h
|
.h
| 12,720
| 342
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H
#define EIGEN_SPARSE_COMPRESSED_BASE_H
namespace Eigen {
template<typename Derived> class SparseCompressedBase;
namespace internal {
template<typename Derived>
struct traits<SparseCompressedBase<Derived> > : traits<Derived>
{};
} // end namespace internal
/** \ingroup SparseCore_Module
* \class SparseCompressedBase
* \brief Common base class for sparse [compressed]-{row|column}-storage format.
*
* This class defines the common interface for all derived classes implementing the compressed sparse storage format, such as:
* - SparseMatrix
* - Ref<SparseMatrixType,Options>
* - Map<SparseMatrixType>
*
*/
template<typename Derived>
class SparseCompressedBase
: public SparseMatrixBase<Derived>
{
public:
typedef SparseMatrixBase<Derived> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseCompressedBase)
using Base::operator=;
using Base::IsRowMajor;
class InnerIterator;
class ReverseInnerIterator;
protected:
typedef typename Base::IndexVector IndexVector;
Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
public:
/** \returns the number of non zero coefficients */
inline Index nonZeros() const
{
if(Derived::IsVectorAtCompileTime && outerIndexPtr()==0)
return derived().nonZeros();
else if(isCompressed())
return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0];
else if(derived().outerSize()==0)
return 0;
else
return innerNonZeros().sum();
}
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline const Scalar* valuePtr() const { return derived().valuePtr(); }
/** \returns a non-const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline Scalar* valuePtr() { return derived().valuePtr(); }
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline const StorageIndex* innerIndexPtr() const { return derived().innerIndexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline StorageIndex* innerIndexPtr() { return derived().innerIndexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 for SparseVector
* \sa valuePtr(), innerIndexPtr() */
inline const StorageIndex* outerIndexPtr() const { return derived().outerIndexPtr(); }
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 for SparseVector
* \sa valuePtr(), innerIndexPtr() */
inline StorageIndex* outerIndexPtr() { return derived().outerIndexPtr(); }
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline const StorageIndex* innerNonZeroPtr() const { return derived().innerNonZeroPtr(); }
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline StorageIndex* innerNonZeroPtr() { return derived().innerNonZeroPtr(); }
/** \returns whether \c *this is in compressed form. */
inline bool isCompressed() const { return innerNonZeroPtr()==0; }
/** \returns a read-only view of the stored coefficients as a 1D array expression.
*
* \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
*
* \sa valuePtr(), isCompressed() */
const Map<const Array<Scalar,Dynamic,1> > coeffs() const { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
/** \returns a read-write view of the stored coefficients as a 1D array expression
*
* \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
*
* Here is an example:
* \include SparseMatrix_coeffs.cpp
* and the output is:
* \include SparseMatrix_coeffs.out
*
* \sa valuePtr(), isCompressed() */
Map<Array<Scalar,Dynamic,1> > coeffs() { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
protected:
/** Default constructor. Do nothing. */
SparseCompressedBase() {}
private:
template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
};
template<typename Derived>
class SparseCompressedBase<Derived>::InnerIterator
{
public:
InnerIterator()
: m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0)
{}
InnerIterator(const InnerIterator& other)
: m_values(other.m_values), m_indices(other.m_indices), m_outer(other.m_outer), m_id(other.m_id), m_end(other.m_end)
{}
InnerIterator& operator=(const InnerIterator& other)
{
m_values = other.m_values;
m_indices = other.m_indices;
const_cast<OuterType&>(m_outer).setValue(other.m_outer.value());
m_id = other.m_id;
m_end = other.m_end;
return *this;
}
InnerIterator(const SparseCompressedBase& mat, Index outer)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
{
if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
{
m_id = 0;
m_end = mat.nonZeros();
}
else
{
m_id = mat.outerIndexPtr()[outer];
if(mat.isCompressed())
m_end = mat.outerIndexPtr()[outer+1];
else
m_end = m_id + mat.innerNonZeroPtr()[outer];
}
}
explicit InnerIterator(const SparseCompressedBase& mat)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_id(0), m_end(mat.nonZeros())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
explicit InnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
: m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.size())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
inline InnerIterator& operator++() { m_id++; return *this; }
inline const Scalar& value() const { return m_values[m_id]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
inline StorageIndex index() const { return m_indices[m_id]; }
inline Index outer() const { return m_outer.value(); }
inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
inline operator bool() const { return (m_id < m_end); }
protected:
const Scalar* m_values;
const StorageIndex* m_indices;
typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
const OuterType m_outer;
Index m_id;
Index m_end;
private:
// If you get here, then you're not using the right InnerIterator type, e.g.:
// SparseMatrix<double,RowMajor> A;
// SparseMatrix<double>::InnerIterator it(A,0);
template<typename T> InnerIterator(const SparseMatrixBase<T>&, Index outer);
};
template<typename Derived>
class SparseCompressedBase<Derived>::ReverseInnerIterator
{
public:
ReverseInnerIterator(const SparseCompressedBase& mat, Index outer)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
{
if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
{
m_start = 0;
m_id = mat.nonZeros();
}
else
{
m_start = mat.outerIndexPtr()[outer];
if(mat.isCompressed())
m_id = mat.outerIndexPtr()[outer+1];
else
m_id = m_start + mat.innerNonZeroPtr()[outer];
}
}
explicit ReverseInnerIterator(const SparseCompressedBase& mat)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_start(0), m_id(mat.nonZeros())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
: m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.size())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
inline ReverseInnerIterator& operator--() { --m_id; return *this; }
inline const Scalar& value() const { return m_values[m_id-1]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
inline StorageIndex index() const { return m_indices[m_id-1]; }
inline Index outer() const { return m_outer.value(); }
inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
inline operator bool() const { return (m_id > m_start); }
protected:
const Scalar* m_values;
const StorageIndex* m_indices;
typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
const OuterType m_outer;
Index m_start;
Index m_id;
};
namespace internal {
template<typename Derived>
struct evaluator<SparseCompressedBase<Derived> >
: evaluator_base<Derived>
{
typedef typename Derived::Scalar Scalar;
typedef typename Derived::InnerIterator InnerIterator;
enum {
CoeffReadCost = NumTraits<Scalar>::ReadCost,
Flags = Derived::Flags
};
evaluator() : m_matrix(0), m_zero(0)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
explicit evaluator(const Derived &mat) : m_matrix(&mat), m_zero(0)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_matrix->nonZeros();
}
operator Derived&() { return m_matrix->const_cast_derived(); }
operator const Derived&() const { return *m_matrix; }
typedef typename DenseCoeffsBase<Derived,ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;
const Scalar& coeff(Index row, Index col) const
{
Index p = find(row,col);
if(p==Dynamic)
return m_zero;
else
return m_matrix->const_cast_derived().valuePtr()[p];
}
Scalar& coeffRef(Index row, Index col)
{
Index p = find(row,col);
eigen_assert(p!=Dynamic && "written coefficient does not exist");
return m_matrix->const_cast_derived().valuePtr()[p];
}
protected:
Index find(Index row, Index col) const
{
eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
const Index outer = Derived::IsRowMajor ? row : col;
const Index inner = Derived::IsRowMajor ? col : row;
Index start = m_matrix->outerIndexPtr()[outer];
Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();
return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;
}
const Derived *m_matrix;
const Scalar m_zero;
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_COMPRESSED_BASE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
|
.h
| 25,840
| 727
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
#define EIGEN_SPARSE_CWISE_BINARY_OP_H
namespace Eigen {
// Here we have to handle 3 cases:
// 1 - sparse op dense
// 2 - dense op sparse
// 3 - sparse op sparse
// We also need to implement a 4th iterator for:
// 4 - dense op dense
// Finally, we also need to distinguish between the product and other operations :
// configuration returned mode
// 1 - sparse op dense product sparse
// generic dense
// 2 - dense op sparse product sparse
// generic dense
// 3 - sparse op sparse product sparse
// generic sparse
// 4 - dense op dense product dense
// generic dense
//
// TODO to ease compiler job, we could specialize product/quotient with a scalar
// and fallback to cwise-unary evaluator using bind1st_op and bind2nd_op.
template<typename BinaryOp, typename Lhs, typename Rhs>
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
: public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
public:
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
typedef SparseMatrixBase<Derived> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
CwiseBinaryOpImpl()
{
EIGEN_STATIC_ASSERT((
(!internal::is_same<typename internal::traits<Lhs>::StorageKind,
typename internal::traits<Rhs>::StorageKind>::value)
|| ((internal::evaluator<Lhs>::Flags&RowMajorBit) == (internal::evaluator<Rhs>::Flags&RowMajorBit))),
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
}
};
namespace internal {
// Generic "sparse OP sparse"
template<typename XprType> struct binary_sparse_evaluator;
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IteratorBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
protected:
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef typename traits<XprType>::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
{
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
{
m_id = m_lhsIter.index();
m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
++m_lhsIter;
++m_rhsIter;
}
else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
{
m_id = m_lhsIter.index();
m_value = m_functor(m_lhsIter.value(), Scalar(0));
++m_lhsIter;
}
else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
{
m_id = m_rhsIter.index();
m_value = m_functor(Scalar(0), m_rhsIter.value());
++m_rhsIter;
}
else
{
m_value = 0; // this is to avoid a compilation warning
m_id = -1;
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
protected:
LhsIterator m_lhsIter;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
};
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
Flags = XprType::Flags
};
explicit binary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate();
}
protected:
const BinaryOp m_functor;
evaluator<Lhs> m_lhsImpl;
evaluator<Rhs> m_rhsImpl;
};
// dense op sparse
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IteratorBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
protected:
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef typename traits<XprType>::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
: m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())
{
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_id;
if(m_id<m_innerSize)
{
Scalar lhsVal = m_lhsEval.coeff(IsRowMajor?m_rhsIter.outer():m_id,
IsRowMajor?m_id:m_rhsIter.outer());
if(m_rhsIter && m_rhsIter.index()==m_id)
{
m_value = m_functor(lhsVal, m_rhsIter.value());
++m_rhsIter;
}
else
m_value = m_functor(lhsVal, Scalar(0));
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_rhsIter.outer(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
protected:
const evaluator<Lhs> &m_lhsEval;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
StorageIndex m_innerSize;
};
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(Rhs::Flags)&RowMajorBit)
};
explicit binary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs()),
m_expr(xpr)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_expr.size();
}
protected:
const BinaryOp m_functor;
evaluator<Lhs> m_lhsImpl;
evaluator<Rhs> m_rhsImpl;
const XprType &m_expr;
};
// sparse op dense
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IndexBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
{
protected:
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
typedef typename traits<XprType>::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())
{
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_id;
if(m_id<m_innerSize)
{
Scalar rhsVal = m_rhsEval.coeff(IsRowMajor?m_lhsIter.outer():m_id,
IsRowMajor?m_id:m_lhsIter.outer());
if(m_lhsIter && m_lhsIter.index()==m_id)
{
m_value = m_functor(m_lhsIter.value(), rhsVal);
++m_lhsIter;
}
else
m_value = m_functor(Scalar(0),rhsVal);
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_lhsIter.outer(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
protected:
LhsIterator m_lhsIter;
const evaluator<Rhs> &m_rhsEval;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
StorageIndex m_innerSize;
};
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(Lhs::Flags)&RowMajorBit)
};
explicit binary_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs()),
m_expr(xpr)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_expr.size();
}
protected:
const BinaryOp m_functor;
evaluator<Lhs> m_lhsImpl;
evaluator<Rhs> m_rhsImpl;
const XprType &m_expr;
};
template<typename T,
typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct sparse_conjunction_evaluator;
// "sparse .* sparse"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "dense .* sparse"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IndexBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse .* dense"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse ./ dense"
template<typename T1, typename T2, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse && sparse"
template<typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "dense && sparse"
template<typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IndexBased, IteratorBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse && dense"
template<typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IndexBased>
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
{
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
typedef sparse_conjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// "sparse ^ sparse"
template<typename XprType>
struct sparse_conjunction_evaluator<XprType, IteratorBased, IteratorBased>
: evaluator_base<XprType>
{
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
{
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
{
if (m_lhsIter.index() < m_rhsIter.index())
++m_lhsIter;
else
++m_rhsIter;
}
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_lhsIter;
++m_rhsIter;
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
{
if (m_lhsIter.index() < m_rhsIter.index())
++m_lhsIter;
else
++m_rhsIter;
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
protected:
LhsIterator m_lhsIter;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
};
enum {
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
Flags = XprType::Flags
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate());
}
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
};
// "dense ^ sparse"
template<typename XprType>
struct sparse_conjunction_evaluator<XprType, IndexBased, IteratorBased>
: evaluator_base<XprType>
{
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef evaluator<LhsArg> LhsEvaluator;
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator
{
enum { IsRowMajor = (int(RhsArg::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
: m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_outer(outer)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_rhsIter;
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
{ return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); }
EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
protected:
const LhsEvaluator &m_lhsEval;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
const Index m_outer;
};
enum {
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(RhsArg::Flags)&RowMajorBit)
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_rhsImpl.nonZerosEstimate();
}
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
};
// "sparse ^ dense"
template<typename XprType>
struct sparse_conjunction_evaluator<XprType, IteratorBased, IndexBased>
: evaluator_base<XprType>
{
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
typedef evaluator<RhsArg> RhsEvaluator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator
{
enum { IsRowMajor = (int(LhsArg::Flags)&RowMajorBit)==RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_outer(outer)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_lhsIter;
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
{ return m_functor(m_lhsIter.value(),
m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
protected:
LhsIterator m_lhsIter;
const evaluator<RhsArg> &m_rhsEval;
const BinaryOp& m_functor;
const Index m_outer;
};
enum {
CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
// Expose storage order of the sparse expression
Flags = (XprType::Flags & ~RowMajorBit) | (int(LhsArg::Flags)&RowMajorBit)
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()),
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_lhsImpl.nonZerosEstimate();
}
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
};
}
/***************************************************************************
* Implementation of SparseMatrixBase and SparseCwise functions/operators
***************************************************************************/
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
{
call_assignment(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
{
return derived() = derived() - other.derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
{
return derived() = derived() + other.derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)
{
call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)
{
call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
{
return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
}
template<typename DenseDerived, typename SparseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
}
template<typename SparseDerived, typename DenseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
}
template<typename DenseDerived, typename SparseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
}
template<typename SparseDerived, typename DenseDerived>
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
{
return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseFuzzy.h
|
.h
| 1,107
| 30
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_FUZZY_H
#define EIGEN_SPARSE_FUZZY_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const
{
const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());
typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,
const PlainObject>::type actualB(other.derived());
return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_FUZZY_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseDot.h
|
.h
| 3,080
| 99
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_DOT_H
#define EIGEN_SPARSE_DOT_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(size() == other.size());
eigen_assert(other.size()>0 && "you are using a non initialized vector");
internal::evaluator<Derived> thisEval(derived());
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
Scalar res(0);
while (i)
{
res += numext::conj(i.value()) * other.coeff(i.index());
++i;
}
return res;
}
template<typename Derived>
template<typename OtherDerived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(size() == other.size());
internal::evaluator<Derived> thisEval(derived());
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
internal::evaluator<OtherDerived> otherEval(other.derived());
typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0);
Scalar res(0);
while (i && j)
{
if (i.index()==j.index())
{
res += numext::conj(i.value()) * j.value();
++i; ++j;
}
else if (i.index()<j.index())
++i;
else
++j;
}
return res;
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::squaredNorm() const
{
return numext::real((*this).cwiseAbs2().sum());
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::norm() const
{
using std::sqrt;
return sqrt(squaredNorm());
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::blueNorm() const
{
return internal::blueNorm_impl(*this);
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_DOT_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
|
.h
| 4,737
| 151
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
#define EIGEN_SPARSE_CWISE_UNARY_OP_H
namespace Eigen {
namespace internal {
template<typename UnaryOp, typename ArgType>
struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
: public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> >
{
public:
typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
class InnerIterator;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
const UnaryOp m_functor;
evaluator<ArgType> m_argImpl;
};
template<typename UnaryOp, typename ArgType>
class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
{
protected:
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
protected:
const UnaryOp m_functor;
private:
Scalar& valueRef();
};
template<typename ViewOp, typename ArgType>
struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
: public evaluator_base<CwiseUnaryView<ViewOp,ArgType> >
{
public:
typedef CwiseUnaryView<ViewOp, ArgType> XprType;
class InnerIterator;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<ViewOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
const ViewOp m_functor;
evaluator<ArgType> m_argImpl;
};
template<typename ViewOp, typename ArgType>
class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
{
protected:
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }
protected:
const ViewOp m_functor;
};
} // end namespace internal
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator*=(const Scalar& other)
{
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() *= other;
return derived();
}
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator/=(const Scalar& other)
{
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() /= other;
return derived();
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseUtil.h
|
.h
| 6,602
| 179
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEUTIL_H
#define EIGEN_SPARSEUTIL_H
namespace Eigen {
#ifdef NDEBUG
#define EIGEN_DBG_SPARSE(X)
#else
#define EIGEN_DBG_SPARSE(X) X
#endif
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
template<typename OtherDerived> \
EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
{ \
return Base::operator Op(other.derived()); \
} \
EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
{ \
return Base::operator Op(other); \
}
#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
template<typename Other> \
EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
{ \
return Base::operator Op(scalar); \
}
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =)
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
const int CoherentAccessPattern = 0x1;
const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseMatrix;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class DynamicSparseMatrix;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseVector;
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class MappedSparseMatrix;
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
template<typename MatrixType> class SparseView;
template<typename Lhs, typename Rhs> class SparseSparseProduct;
template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
template<typename Lhs, typename Rhs,
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
template<typename Lhs, typename Rhs,
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
namespace internal {
template<typename T,int Rows,int Cols,int Flags> struct sparse_eval;
template<typename T> struct eval<T,Sparse>
: sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime,traits<T>::Flags>
{};
template<typename T,int Cols,int Flags> struct sparse_eval<T,1,Cols,Flags> {
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
public:
typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;
};
template<typename T,int Rows,int Flags> struct sparse_eval<T,Rows,1,Flags> {
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
public:
typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type;
};
// TODO this seems almost identical to plain_matrix_type<T, Sparse>
template<typename T,int Rows,int Cols,int Flags> struct sparse_eval {
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
};
template<typename T,int Flags> struct sparse_eval<T,1,1,Flags> {
typedef typename traits<T>::Scalar _Scalar;
public:
typedef Matrix<_Scalar, 1, 1> type;
};
template<typename T> struct plain_matrix_type<T,Sparse>
{
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::StorageIndex _StorageIndex;
enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
};
template<typename T>
struct plain_object_eval<T,Sparse>
: sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime, evaluator<T>::Flags>
{};
template<typename Decomposition, typename RhsType>
struct solve_traits<Decomposition,RhsType,Sparse>
{
typedef typename sparse_eval<RhsType, RhsType::RowsAtCompileTime, RhsType::ColsAtCompileTime,traits<RhsType>::Flags>::type PlainObject;
};
template<typename Derived>
struct generic_xpr_base<Derived, MatrixXpr, Sparse>
{
typedef SparseMatrixBase<Derived> type;
};
struct SparseTriangularShape { static std::string debugName() { return "SparseTriangularShape"; } };
struct SparseSelfAdjointShape { static std::string debugName() { return "SparseSelfAdjointShape"; } };
template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
} // end namespace internal
/** \ingroup SparseCore_Module
*
* \class Triplet
*
* \brief A small structure to hold a non zero as a triplet (i,j,value).
*
* \sa SparseMatrix::setFromTriplets()
*/
template<typename Scalar, typename StorageIndex=typename SparseMatrix<Scalar>::StorageIndex >
class Triplet
{
public:
Triplet() : m_row(0), m_col(0), m_value(0) {}
Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0))
: m_row(i), m_col(j), m_value(v)
{}
/** \returns the row index of the element */
const StorageIndex& row() const { return m_row; }
/** \returns the column index of the element */
const StorageIndex& col() const { return m_col; }
/** \returns the value of the element */
const Scalar& value() const { return m_value; }
protected:
StorageIndex m_row, m_col;
Scalar m_value;
};
} // end namespace Eigen
#endif // EIGEN_SPARSEUTIL_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseColEtree.h
|
.h
| 6,485
| 207
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of sp_coletree.c file in SuperLU
* -- SuperLU routine (version 3.1) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* August 1, 2008
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSE_COLETREE_H
#define SPARSE_COLETREE_H
namespace Eigen {
namespace internal {
/** Find the root of the tree/set containing the vertex i : Use Path halving */
template<typename Index, typename IndexVector>
Index etree_find (Index i, IndexVector& pp)
{
Index p = pp(i); // Parent
Index gp = pp(p); // Grand parent
while (gp != p)
{
pp(i) = gp; // Parent pointer on find path is changed to former grand parent
i = gp;
p = pp(i);
gp = pp(p);
}
return p;
}
/** Compute the column elimination tree of a sparse matrix
* \param mat The matrix in column-major format.
* \param parent The elimination tree
* \param firstRowElt The column index of the first element in each row
* \param perm The permutation to apply to the column of \b mat
*/
template <typename MatrixType, typename IndexVector>
int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)
{
typedef typename MatrixType::StorageIndex StorageIndex;
StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
StorageIndex m = convert_index<StorageIndex>(mat.rows());
StorageIndex diagSize = (std::min)(nc,m);
IndexVector root(nc); // root of subtree of etree
root.setZero();
IndexVector pp(nc); // disjoint sets
pp.setZero(); // Initialize disjoint sets
parent.resize(mat.cols());
//Compute first nonzero column in each row
firstRowElt.resize(m);
firstRowElt.setConstant(nc);
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
bool found_diag;
for (StorageIndex col = 0; col < nc; col++)
{
StorageIndex pcol = col;
if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
{
Index row = it.row();
firstRowElt(row) = (std::min)(firstRowElt(row), col);
}
}
/* Compute etree by Liu's algorithm for symmetric matrices,
except use (firstRowElt[r],c) in place of an edge (r,c) of A.
Thus each row clique in A'*A is replaced by a star
centered at its first vertex, which has the same fill. */
StorageIndex rset, cset, rroot;
for (StorageIndex col = 0; col < nc; col++)
{
found_diag = col>=m;
pp(col) = col;
cset = col;
root(cset) = col;
parent(col) = nc;
/* The diagonal element is treated here even if it does not exist in the matrix
* hence the loop is executed once more */
StorageIndex pcol = col;
if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
{ // A sequence of interleaved find and union is performed
Index i = col;
if(it) i = it.index();
if (i == col) found_diag = true;
StorageIndex row = firstRowElt(i);
if (row >= col) continue;
rset = internal::etree_find(row, pp); // Find the name of the set containing row
rroot = root(rset);
if (rroot != col)
{
parent(rroot) = col;
pp(cset) = rset;
cset = rset;
root(cset) = col;
}
}
}
return 0;
}
/**
* Depth-first search from vertex n. No recursion.
* This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
*/
template <typename IndexVector>
void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum)
{
typedef typename IndexVector::Scalar StorageIndex;
StorageIndex current = n, first, next;
while (postnum != n)
{
// No kid for the current node
first = first_kid(current);
// no kid for the current node
if (first == -1)
{
// Numbering this node because it has no kid
post(current) = postnum++;
// looking for the next kid
next = next_kid(current);
while (next == -1)
{
// No more kids : back to the parent node
current = parent(current);
// numbering the parent node
post(current) = postnum++;
// Get the next kid
next = next_kid(current);
}
// stopping criterion
if (postnum == n+1) return;
// Updating current node
current = next;
}
else
{
current = first;
}
}
}
/**
* \brief Post order a tree
* \param n the number of nodes
* \param parent Input tree
* \param post postordered tree
*/
template <typename IndexVector>
void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post)
{
typedef typename IndexVector::Scalar StorageIndex;
IndexVector first_kid, next_kid; // Linked list of children
StorageIndex postnum;
// Allocate storage for working arrays and results
first_kid.resize(n+1);
next_kid.setZero(n+1);
post.setZero(n+1);
// Set up structure describing children
first_kid.setConstant(-1);
for (StorageIndex v = n-1; v >= 0; v--)
{
StorageIndex dad = parent(v);
next_kid(v) = first_kid(dad);
first_kid(dad) = v;
}
// Depth-first search from dummy root vertex #n
postnum = 0;
internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSE_COLETREE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
|
.h
| 13,178
| 353
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
{
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
typedef typename remove_all<ResultType>::type::Scalar ResScalar;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
std::memset(mask,0,sizeof(bool)*rows);
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.setZero();
res.reserve(Index(estimated_nnz_prod));
// we compute each column of the result, one after the other
for (Index j=0; j<cols; ++j)
{
res.startVec(j);
Index nnz = 0;
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
RhsScalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
LhsScalar x = lhsIt.value();
if(!mask[i])
{
mask[i] = true;
values[i] = x * y;
indices[nnz] = i;
++nnz;
}
else
values[i] += x * y;
}
}
if(!sortedInsertion)
{
// unordered insertion
for(Index k=0; k<nnz; ++k)
{
Index i = indices[k];
res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
}
else
{
// alternative ordered insertion code:
const Index t200 = rows/11; // 11 == (log2(200)*1.39)
const Index t = (rows*100)/139;
// FIXME reserve nnz non zeros
// FIXME implement faster sorting algorithms for very small nnz
// if the result is sparse enough => use a quick sort
// otherwise => loop through the entire vector
// In order to avoid to perform an expensive log2 when the
// result is clearly very sparse we use a linear bound up to 200.
if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)
{
if(nnz>1) std::sort(indices,indices+nnz);
for(Index k=0; k<nnz; ++k)
{
Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
mask[i] = false;
}
}
else
{
// dense path
for(Index i=0; i<rows; ++i)
{
if(mask[i])
{
mask[i] = false;
res.insertBackByOuterInner(j,i) = values[i];
}
}
}
}
}
res.finalize();
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType,
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>
struct conservative_sparse_sparse_product_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
{
typedef typename remove_all<Lhs>::type LhsCleaned;
typedef typename LhsCleaned::Scalar Scalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
// If the result is tall and thin (in the extreme case a column vector)
// then it is faster to sort the coefficients inplace instead of transposing twice.
// FIXME, the following heuristic is probably not very good.
if(lhs.rows()>rhs.cols())
{
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
// perform sorted insertion
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
res = resCol.markAsRValue();
}
else
{
ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
// ressort to transpose to sort the entries
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
RowMajorMatrix resRow(resCol);
res = resRow.markAsRValue();
}
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRhs;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
RowMajorRhs rhsRow = rhs;
RowMajorRes resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<RowMajorRhs,Lhs,RowMajorRes>(rhsRow, lhs, resRow);
res = resRow;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorLhs;
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
RowMajorLhs lhsRow = lhs;
RowMajorRes resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorLhs,RowMajorRes>(rhs, lhsRow, resRow);
res = resRow;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
res = resRow;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
{
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
res = resCol;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
ColMajorLhs lhsCol = lhs;
ColMajorRes resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<ColMajorLhs,Rhs,ColMajorRes>(lhsCol, rhs, resCol);
res = resCol;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
ColMajorRhs rhsCol = rhs;
ColMajorRes resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorRhs,ColMajorRes>(lhs, rhsCol, resCol);
res = resCol;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
// sort the non zeros:
ColMajorMatrix resCol(resRow);
res = resCol;
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
for (Index j=0; j<cols; ++j)
{
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
RhsScalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
LhsScalar x = lhsIt.value();
res.coeffRef(i,j) += x * y;
}
}
}
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType,
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor>
struct sparse_sparse_to_dense_product_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
internal::sparse_sparse_to_dense_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
ColMajorLhs lhsCol(lhs);
internal::sparse_sparse_to_dense_product_impl<ColMajorLhs,Rhs,ResultType>(lhsCol, rhs, res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
ColMajorRhs rhsCol(rhs);
internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorRhs,ResultType>(lhs, rhsCol, res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor>
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
Transpose<ResultType> trRes(res);
internal::sparse_sparse_to_dense_product_impl<Rhs,Lhs,Transpose<ResultType> >(rhs, lhs, trRes);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
|
.h
| 8,704
| 199
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
namespace Eigen {
namespace internal {
// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
{
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
typedef typename remove_all<ResultType>::type::Scalar ResScalar;
typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
//Index size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
AmbiVector<ResScalar,StorageIndex> tempVector(rows);
// mimics a resizeByInnerOuter:
if(ResultType::IsRowMajor)
res.resize(cols, rows);
else
res.resize(rows, cols);
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.reserve(estimated_nnz_prod);
double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));
for (Index j=0; j<cols; ++j)
{
// FIXME:
//double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
// let's do a more accurate determination of the nnz ratio for the current column j of res
tempVector.init(ratioColRes);
tempVector.setZero();
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
tempVector.restart();
RhsScalar x = rhsIt.value();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
{
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
}
}
res.startVec(j);
for (typename AmbiVector<ResScalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
res.insertBackByOuterInner(j,it.index()) = it.value();
}
res.finalize();
}
template<typename Lhs, typename Rhs, typename ResultType,
int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
struct sparse_sparse_product_with_pruning_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
res.swap(_res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// let's transpose the product to get a column x column product
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
res.swap(_res);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixLhs colLhs(lhs);
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
// let's transpose the product to get a column x column product
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
// SparseTemporaryType _res(res.cols(), res.rows());
// sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
// res = _res.transpose();
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
RowMajorMatrixLhs rowLhs(lhs);
sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
RowMajorMatrixRhs rowRhs(rhs);
sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
}
};
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
ColMajorMatrixLhs colLhs(lhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseMatrixBase.h
|
.h
| 17,923
| 406
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEMATRIXBASE_H
#define EIGEN_SPARSEMATRIXBASE_H
namespace Eigen {
/** \ingroup SparseCore_Module
*
* \class SparseMatrixBase
*
* \brief Base class of any sparse matrices or sparse expressions
*
* \tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
*/
template<typename Derived> class SparseMatrixBase
: public EigenBase<Derived>
{
public:
typedef typename internal::traits<Derived>::Scalar Scalar;
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
*
* It is an alias for the Scalar type */
typedef Scalar value_type;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
/** The integer type used to \b store indices within a SparseMatrix.
* For a \c SparseMatrix<Scalar,Options,IndexType> it an alias of the third template parameter \c IndexType. */
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
typedef typename internal::add_const_on_value_type_if_arithmetic<
typename internal::packet_traits<Scalar>::type
>::type PacketReturnType;
typedef SparseMatrixBase StorageBaseType;
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
template<typename OtherDerived>
Derived& operator=(const EigenBase<OtherDerived> &other);
enum {
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
/**< The number of rows at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
/**< The number of columns at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
internal::traits<Derived>::ColsAtCompileTime>::ret),
/**< This is equal to the number of coefficients, i.e. the number of
* rows times the number of columns, or to \a Dynamic if this is not
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
MaxColsAtCompileTime>::ret),
IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
/**< This is set to true if either the number of rows or the number of
* columns is known at compile-time to be equal to 1. Indeed, in that case,
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
*/
IsRowMajor = Flags&RowMajorBit ? 1 : 0,
InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
: int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
#ifndef EIGEN_PARSED_BY_DOXYGEN
_HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
#endif
};
/** \internal the return type of MatrixBase::adjoint() */
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
Transpose<const Derived>
>::type AdjointReturnType;
typedef Transpose<Derived> TransposeReturnType;
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
// FIXME storage order do not match evaluator storage order
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, StorageIndex> PlainObject;
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
* (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
* \a Scalar is \a std::complex<T> then RealScalar is \a T.
*
* \sa class NumTraits
*/
typedef typename NumTraits<Scalar>::Real RealScalar;
/** \internal the return type of coeff()
*/
typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
/** \internal Represents a matrix with all coefficients equal to one another*/
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
/** type of the equivalent dense matrix */
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
/** type of the equivalent square matrix */
typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
inline Derived& derived() { return *static_cast<Derived*>(this); }
inline Derived& const_cast_derived() const
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
typedef EigenBase<Derived> Base;
#endif // not EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
#ifdef EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_DOC_UNARY_ADDONS(METHOD,OP) /** <p>This method does not change the sparsity of \c *this: the OP is applied to explicitly stored coefficients only. \sa SparseCompressedBase::coeffs() </p> */
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /** <p> \warning This method returns a read-only expression for any sparse matrices. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) /** <p> \warning This method returns a read-write expression for COND sparse matrices only. Otherwise, the returned expression is read-only. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
#else
#define EIGEN_DOC_UNARY_ADDONS(X,Y)
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
#endif
# include "../plugins/CommonCwiseUnaryOps.h"
# include "../plugins/CommonCwiseBinaryOps.h"
# include "../plugins/MatrixCwiseUnaryOps.h"
# include "../plugins/MatrixCwiseBinaryOps.h"
# include "../plugins/BlockMethods.h"
# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
# include EIGEN_SPARSEMATRIXBASE_PLUGIN
# endif
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_DOC_UNARY_ADDONS
#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
/** \returns the number of rows. \sa cols() */
inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows() */
inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is \a rows()*cols().
* \sa rows(), cols(). */
inline Index size() const { return rows() * cols(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
* \sa rows(), cols(), IsVectorAtCompileTime. */
inline bool isVector() const { return rows()==1 || cols()==1; }
/** \returns the size of the storage major dimension,
* i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
/** \returns the size of the inner dimension according to the storage order,
* i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
bool isRValue() const { return m_isRValue; }
Derived& markAsRValue() { m_isRValue = true; return derived(); }
SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& other);
template<typename OtherDerived>
inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other);
inline Derived& operator=(const Derived& other);
protected:
template<typename OtherDerived>
inline Derived& assign(const OtherDerived& other);
template<typename OtherDerived>
inline void assignGeneric(const OtherDerived& other);
public:
friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
{
typedef typename Derived::Nested Nested;
typedef typename internal::remove_all<Nested>::type NestedCleaned;
if (Flags&RowMajorBit)
{
Nested nm(m.derived());
internal::evaluator<NestedCleaned> thisEval(nm);
for (Index row=0; row<nm.outerSize(); ++row)
{
Index col = 0;
for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, row); it; ++it)
{
for ( ; col<it.index(); ++col)
s << "0 ";
s << it.value() << " ";
++col;
}
for ( ; col<m.cols(); ++col)
s << "0 ";
s << std::endl;
}
}
else
{
Nested nm(m.derived());
internal::evaluator<NestedCleaned> thisEval(nm);
if (m.cols() == 1) {
Index row = 0;
for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, 0); it; ++it)
{
for ( ; row<it.index(); ++row)
s << "0" << std::endl;
s << it.value() << std::endl;
++row;
}
for ( ; row<m.rows(); ++row)
s << "0" << std::endl;
}
else
{
SparseMatrix<Scalar, RowMajorBit, StorageIndex> trans = m;
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, StorageIndex> >&>(trans);
}
}
return s;
}
template<typename OtherDerived>
Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator+=(const DiagonalBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator-=(const DiagonalBase<OtherDerived>& other);
template<typename OtherDerived>
Derived& operator+=(const EigenBase<OtherDerived> &other);
template<typename OtherDerived>
Derived& operator-=(const EigenBase<OtherDerived> &other);
Derived& operator*=(const Scalar& other);
Derived& operator/=(const Scalar& other);
template<typename OtherDerived> struct CwiseProductDenseReturnType {
typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits<
typename internal::traits<Derived>::Scalar,
typename internal::traits<OtherDerived>::Scalar
>::ReturnType>,
const Derived,
const OtherDerived
> Type;
};
template<typename OtherDerived>
EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType<OtherDerived>::Type
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
// sparse * diagonal
template<typename OtherDerived>
const Product<Derived,OtherDerived>
operator*(const DiagonalBase<OtherDerived> &other) const
{ return Product<Derived,OtherDerived>(derived(), other.derived()); }
// diagonal * sparse
template<typename OtherDerived> friend
const Product<OtherDerived,Derived>
operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
{ return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
// sparse * sparse
template<typename OtherDerived>
const Product<Derived,OtherDerived,AliasFreeProduct>
operator*(const SparseMatrixBase<OtherDerived> &other) const;
// sparse * dense
template<typename OtherDerived>
const Product<Derived,OtherDerived>
operator*(const MatrixBase<OtherDerived> &other) const
{ return Product<Derived,OtherDerived>(derived(), other.derived()); }
// dense * sparse
template<typename OtherDerived> friend
const Product<OtherDerived,Derived>
operator*(const MatrixBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
{ return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
{
return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
}
template<typename OtherDerived>
Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
template<int Mode>
inline const TriangularView<const Derived, Mode> triangularView() const;
template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SparseSelfAdjointView<Derived, UpLo> Type; };
template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SparseSelfAdjointView<const Derived, UpLo> Type; };
template<unsigned int UpLo> inline
typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
template<unsigned int UpLo> inline
typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
RealScalar squaredNorm() const;
RealScalar norm() const;
RealScalar blueNorm() const;
TransposeReturnType transpose() { return TransposeReturnType(derived()); }
const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
// inner-vector
typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
InnerVectorReturnType innerVector(Index outer);
const ConstInnerVectorReturnType innerVector(Index outer) const;
// set of inner-vectors
typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
DenseMatrixType toDense() const
{
return DenseMatrixType(derived());
}
template<typename OtherDerived>
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other,
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other,prec); }
/** \returns the matrix or vector obtained by evaluating this expression.
*
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy.
*/
inline const typename internal::eval<Derived>::type eval() const
{ return typename internal::eval<Derived>::type(derived()); }
Scalar sum() const;
inline const SparseView<Derived>
pruned(const Scalar& reference = Scalar(0), const RealScalar& epsilon = NumTraits<Scalar>::dummy_precision()) const;
protected:
bool m_isRValue;
static inline StorageIndex convert_index(const Index idx) {
return internal::convert_index<StorageIndex>(idx);
}
private:
template<typename Dest> void evalTo(Dest &) const;
};
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIXBASE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseSolverBase.h
|
.h
| 4,424
| 125
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSESOLVERBASE_H
#define EIGEN_SPARSESOLVERBASE_H
namespace Eigen {
namespace internal {
/** \internal
* Helper functions to solve with a sparse right-hand-side and result.
* The rhs is decomposed into small vertical panels which are solved through dense temporaries.
*/
template<typename Decomposition, typename Rhs, typename Dest>
typename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
{
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
typedef typename Dest::Scalar DestScalar;
// we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
static const Index NbColsAtOnce = 4;
Index rhsCols = rhs.cols();
Index size = rhs.rows();
// the temporary matrices do not need more columns than NbColsAtOnce:
Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
{
Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
}
}
// Overload for vector as rhs
template<typename Decomposition, typename Rhs, typename Dest>
typename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
{
typedef typename Dest::Scalar DestScalar;
Index size = rhs.rows();
Eigen::Matrix<DestScalar,Dynamic,1> rhs_dense(rhs);
Eigen::Matrix<DestScalar,Dynamic,1> dest_dense(size);
dest_dense = dec.solve(rhs_dense);
dest = dest_dense.sparseView();
}
} // end namespace internal
/** \class SparseSolverBase
* \ingroup SparseCore_Module
* \brief A base class for sparse solvers
*
* \tparam Derived the actual type of the solver.
*
*/
template<typename Derived>
class SparseSolverBase : internal::noncopyable
{
public:
/** Default constructor */
SparseSolverBase()
: m_isInitialized(false)
{}
~SparseSolverBase()
{}
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const SparseMatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal default implementation of solving with a sparse rhs */
template<typename Rhs,typename Dest>
void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const
{
internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived());
}
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
mutable bool m_isInitialized;
};
} // end namespace Eigen
#endif // EIGEN_SPARSESOLVERBASE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseVector.h
|
.h
| 14,831
| 479
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEVECTOR_H
#define EIGEN_SPARSEVECTOR_H
namespace Eigen {
/** \ingroup SparseCore_Module
* \class SparseVector
*
* \brief a sparse vector class
*
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
*
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
*/
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
IsColVector = (_Options & RowMajorBit) ? 0 : 1,
RowsAtCompileTime = IsColVector ? Dynamic : 1,
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
// Sparse-Vector-Assignment kinds:
enum {
SVA_RuntimeSwitch,
SVA_Inner,
SVA_Outer
};
template< typename Dest, typename Src,
int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
: Src::InnerSizeAtCompileTime==1 ? SVA_Outer
: SVA_Inner>
struct sparse_vector_assign_selector;
}
template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseVector
: public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >
{
typedef SparseCompressedBase<SparseVector> Base;
using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
enum {
Options = _Options
};
EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
inline const StorageIndex* outerIndexPtr() const { return 0; }
inline StorageIndex* outerIndexPtr() { return 0; }
inline const StorageIndex* innerNonZeroPtr() const { return 0; }
inline StorageIndex* innerNonZeroPtr() { return 0; }
/** \internal */
inline Storage& data() { return m_data; }
/** \internal */
inline const Storage& data() const { return m_data; }
inline Scalar coeff(Index row, Index col) const
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeff(IsColVector ? row : col);
}
inline Scalar coeff(Index i) const
{
eigen_assert(i>=0 && i<m_size);
return m_data.at(StorageIndex(i));
}
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeffRef(IsColVector ? row : col);
}
/** \returns a reference to the coefficient value at given index \a i
* This operation involes a log(rho*size) binary search. If the coefficient does not
* exist yet, then a sorted insertion into a sequential buffer is performed.
*
* This insertion might be very costly if the number of nonzeros above \a i is large.
*/
inline Scalar& coeffRef(Index i)
{
eigen_assert(i>=0 && i<m_size);
return m_data.atWithInsertion(StorageIndex(i));
}
public:
typedef typename Base::InnerIterator InnerIterator;
typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
inline void setZero() { m_data.clear(); }
/** \returns the number of non zero coefficients */
inline Index nonZeros() const { return m_data.size(); }
inline void startVec(Index outer)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
}
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
return insertBack(inner);
}
inline Scalar& insertBack(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
return insertBackUnordered(inner);
}
inline Scalar& insertBackUnordered(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
inline Scalar& insert(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
Index inner = IsColVector ? row : col;
Index outer = IsColVector ? col : row;
EIGEN_ONLY_USED_FOR_DEBUG(outer);
eigen_assert(outer==0);
return insert(inner);
}
Scalar& insert(Index i)
{
eigen_assert(i>=0 && i<m_size);
Index startId = 0;
Index p = Index(m_data.size()) - 1;
// TODO smart realloc
m_data.resize(p+2,1);
while ( (p >= startId) && (m_data.index(p) > i) )
{
m_data.index(p+1) = m_data.index(p);
m_data.value(p+1) = m_data.value(p);
--p;
}
m_data.index(p+1) = convert_index(i);
m_data.value(p+1) = 0;
return m_data.value(p+1);
}
/**
*/
inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
inline void finalize() {}
/** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
/** Resizes the sparse vector to \a rows x \a cols
*
* This method is provided for compatibility with matrices.
* For a column vector, \a cols must be equal to 1.
* For a row vector, \a rows must be equal to 1.
*
* \sa resize(Index)
*/
void resize(Index rows, Index cols)
{
eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
resize(IsColVector ? rows : cols);
}
/** Resizes the sparse vector to \a newSize
* This method deletes all entries, thus leaving an empty sparse vector
*
* \sa conservativeResize(), setZero() */
void resize(Index newSize)
{
m_size = newSize;
m_data.clear();
}
/** Resizes the sparse vector to \a newSize, while leaving old values untouched.
*
* If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.
* Call .data().squeeze() to free extra memory.
*
* \sa reserve(), setZero()
*/
void conservativeResize(Index newSize)
{
if (newSize < m_size)
{
Index i = 0;
while (i<m_data.size() && m_data.index(i)<newSize) ++i;
m_data.resize(i);
}
m_size = newSize;
}
void resizeNonZeros(Index size) { m_data.resize(size); }
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
template<typename OtherDerived>
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
: m_size(0)
{
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
check_template_parameters();
*this = other.derived();
}
inline SparseVector(const SparseVector& other)
: Base(other), m_size(0)
{
check_template_parameters();
*this = other.derived();
}
/** Swaps the values of \c *this and \a other.
* Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
* \sa SparseMatrixBase::swap()
*/
inline void swap(SparseVector& other)
{
std::swap(m_size, other.m_size);
m_data.swap(other.m_data);
}
template<int OtherOptions>
inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other)
{
eigen_assert(other.outerSize()==1);
std::swap(m_size, other.m_innerSize);
m_data.swap(other.m_data);
}
inline SparseVector& operator=(const SparseVector& other)
{
if (other.isRValue())
{
swap(other.const_cast_derived());
}
else
{
resize(other.size());
m_data = other.m_data;
}
return *this;
}
template<typename OtherDerived>
inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
{
SparseVector tmp(other.size());
internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
this->swap(tmp);
return *this;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Lhs, typename Rhs>
inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
{
return Base::operator=(product);
}
#endif
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
{
for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
s << std::endl;
return s;
}
/** Destructor */
inline ~SparseVector() {}
/** Overloaded for performance */
Scalar sum() const;
public:
/** \internal \deprecated use setZero() and reserve() */
EIGEN_DEPRECATED void startFill(Index reserve)
{
setZero();
m_data.reserve(reserve);
}
/** \internal \deprecated use insertBack(Index,Index) */
EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fill(IsColVector ? r : c);
}
/** \internal \deprecated use insertBack(Index) */
EIGEN_DEPRECATED Scalar& fill(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
/** \internal \deprecated use insert(Index,Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fillrand(IsColVector ? r : c);
}
/** \internal \deprecated use insert(Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index i)
{
return insert(i);
}
/** \internal \deprecated use finalize() */
EIGEN_DEPRECATED void endFill() {}
// These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
/** \internal \deprecated use data() */
EIGEN_DEPRECATED Storage& _data() { return m_data; }
/** \internal \deprecated use data() */
EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
# ifdef EIGEN_SPARSEVECTOR_PLUGIN
# include EIGEN_SPARSEVECTOR_PLUGIN
# endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
Storage m_data;
Index m_size;
};
namespace internal {
template<typename _Scalar, int _Options, typename _Index>
struct evaluator<SparseVector<_Scalar,_Options,_Index> >
: evaluator_base<SparseVector<_Scalar,_Options,_Index> >
{
typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
typedef evaluator_base<SparseVectorType> Base;
typedef typename SparseVectorType::InnerIterator InnerIterator;
typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
enum {
CoeffReadCost = NumTraits<_Scalar>::ReadCost,
Flags = SparseVectorType::Flags
};
evaluator() : Base() {}
explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_matrix->nonZeros();
}
operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
operator const SparseVectorType&() const { return *m_matrix; }
const SparseVectorType *m_matrix;
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.innerSize()==src.size());
typedef internal::evaluator<Src> SrcEvaluatorType;
SrcEvaluatorType srcEval(src);
for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
dst.insert(it.index()) = it.value();
}
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.outerSize()==src.size());
typedef internal::evaluator<Src> SrcEvaluatorType;
SrcEvaluatorType srcEval(src);
for(Index i=0; i<src.size(); ++i)
{
typename SrcEvaluatorType::InnerIterator it(srcEval, i);
if(it)
dst.insert(i) = it.value();
}
}
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
static void run(Dest& dst, const Src& src) {
if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSEVECTOR_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseProduct.h
|
.h
| 7,049
| 170
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEPRODUCT_H
#define EIGEN_SPARSEPRODUCT_H
namespace Eigen {
/** \returns an expression of the product of two sparse matrices.
* By default a conservative product preserving the symbolic non zeros is performed.
* The automatic pruning of the small values can be achieved by calling the pruned() function
* in which case a totally different product algorithm is employed:
* \code
* C = (A*B).pruned(); // supress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
* where \c ref is a meaningful non zero reference value.
* */
template<typename Derived>
template<typename OtherDerived>
inline const Product<Derived,OtherDerived,AliasFreeProduct>
SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
{
return Product<Derived,OtherDerived,AliasFreeProduct>(derived(), other.derived());
}
namespace internal {
// sparse * sparse
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
{
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
{
evalTo(dst, lhs, rhs, typename evaluator_traits<Dest>::Shape());
}
// dense += sparse * sparse
template<typename Dest,typename ActualLhs>
static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
{
typedef typename nested_eval<ActualLhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_sparse_to_dense_product_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
}
// dense -= sparse * sparse
template<typename Dest>
static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
{
addTo(dst, -lhs, rhs);
}
protected:
// sparse = sparse * sparse
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, SparseShape)
{
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::conservative_sparse_sparse_product_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
}
// dense = sparse * sparse
template<typename Dest>
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, DenseShape)
{
dst.setZero();
addTo(dst, lhs, rhs);
}
};
// sparse * sparse-triangular
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, SparseTriangularShape, ProductType>
: public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
{};
// sparse-triangular * sparse
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType>
: public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
{};
// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
}
};
// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
}
};
// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
}
};
template<typename Lhs, typename Rhs, int Options>
struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
: public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
{
typedef SparseView<Product<Lhs, Rhs, Options> > XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
explicit unary_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
using std::abs;
::new (static_cast<Base*>(this)) Base(m_result);
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(xpr.nestedExpression().lhs());
RhsNested rhsNested(xpr.nestedExpression().rhs());
internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
abs(xpr.reference())*xpr.epsilon());
}
protected:
PlainObject m_result;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEPRODUCT_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparsePermutation.h
|
.h
| 7,329
| 179
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_PERMUTATION_H
#define EIGEN_SPARSE_PERMUTATION_H
// This file implements sparse * permutation products
namespace Eigen {
namespace internal {
template<typename ExpressionType, int Side, bool Transposed>
struct permutation_matrix_product<ExpressionType, Side, Transposed, SparseShape>
{
typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
typedef typename MatrixTypeCleaned::Scalar Scalar;
typedef typename MatrixTypeCleaned::StorageIndex StorageIndex;
enum {
SrcStorageOrder = MatrixTypeCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
};
typedef typename internal::conditional<MoveOuter,
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex>,
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> >::type ReturnType;
template<typename Dest,typename PermutationType>
static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)
{
MatrixType mat(xpr);
if(MoveOuter)
{
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex> tmp(mat.rows(), mat.cols());
Matrix<StorageIndex,Dynamic,1> sizes(mat.outerSize());
for(Index j=0; j<mat.outerSize(); ++j)
{
Index jp = perm.indices().coeff(j);
sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = StorageIndex(mat.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros());
}
tmp.reserve(sizes);
for(Index j=0; j<mat.outerSize(); ++j)
{
Index jp = perm.indices().coeff(j);
Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;
Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;
for(typename MatrixTypeCleaned::InnerIterator it(mat,jsrc); it; ++it)
tmp.insertByOuterInner(jdst,it.index()) = it.value();
}
dst = tmp;
}
else
{
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> tmp(mat.rows(), mat.cols());
Matrix<StorageIndex,Dynamic,1> sizes(tmp.outerSize());
sizes.setZero();
PermutationMatrix<Dynamic,Dynamic,StorageIndex> perm_cpy;
if((Side==OnTheLeft) ^ Transposed)
perm_cpy = perm;
else
perm_cpy = perm.transpose();
for(Index j=0; j<mat.outerSize(); ++j)
for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
sizes[perm_cpy.indices().coeff(it.index())]++;
tmp.reserve(sizes);
for(Index j=0; j<mat.outerSize(); ++j)
for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
tmp.insertByOuterInner(perm_cpy.indices().coeff(it.index()),j) = it.value();
dst = tmp;
}
}
};
}
namespace internal {
template <int ProductTag> struct product_promote_storage_type<Sparse, PermutationStorage, ProductTag> { typedef Sparse ret; };
template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Sparse, ProductTag> { typedef Sparse ret; };
// TODO, the following two overloads are only needed to define the right temporary type through
// typename traits<permutation_sparse_matrix_product<Rhs,Lhs,OnTheRight,false> >::ReturnType
// whereas it should be correctly handled by traits<Product<> >::PlainObject
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, PermutationShape, SparseShape>
: public evaluator<typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType>
{
typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
typedef typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType PlainObject;
typedef evaluator<PlainObject> Base;
enum {
Flags = Base::Flags | EvalBeforeNestingBit
};
explicit product_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
}
protected:
PlainObject m_result;
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, SparseShape, PermutationShape >
: public evaluator<typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType>
{
typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
typedef typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType PlainObject;
typedef evaluator<PlainObject> Base;
enum {
Flags = Base::Flags | EvalBeforeNestingBit
};
explicit product_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
::new (static_cast<Base*>(this)) Base(m_result);
generic_product_impl<Lhs, Rhs, SparseShape, PermutationShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
}
protected:
PlainObject m_result;
};
} // end namespace internal
/** \returns the matrix with the permutation applied to the columns
*/
template<typename SparseDerived, typename PermDerived>
inline const Product<SparseDerived, PermDerived, AliasFreeProduct>
operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
{ return Product<SparseDerived, PermDerived, AliasFreeProduct>(matrix.derived(), perm.derived()); }
/** \returns the matrix with the permutation applied to the rows
*/
template<typename SparseDerived, typename PermDerived>
inline const Product<PermDerived, SparseDerived, AliasFreeProduct>
operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
{ return Product<PermDerived, SparseDerived, AliasFreeProduct>(perm.derived(), matrix.derived()); }
/** \returns the matrix with the inverse permutation applied to the columns.
*/
template<typename SparseDerived, typename PermutationType>
inline const Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>
operator*(const SparseMatrixBase<SparseDerived>& matrix, const InverseImpl<PermutationType, PermutationStorage>& tperm)
{
return Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>(matrix.derived(), tperm.derived());
}
/** \returns the matrix with the inverse permutation applied to the rows.
*/
template<typename SparseDerived, typename PermutationType>
inline const Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>
operator*(const InverseImpl<PermutationType,PermutationStorage>& tperm, const SparseMatrixBase<SparseDerived>& matrix)
{
return Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>(tperm.derived(), matrix.derived());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/CompressedStorage.h
|
.h
| 8,164
| 259
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPRESSED_STORAGE_H
#define EIGEN_COMPRESSED_STORAGE_H
namespace Eigen {
namespace internal {
/** \internal
* Stores a sparse set of values as a list of values and a list of indices.
*
*/
template<typename _Scalar,typename _StorageIndex>
class CompressedStorage
{
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
protected:
typedef typename NumTraits<Scalar>::Real RealScalar;
public:
CompressedStorage()
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{}
explicit CompressedStorage(Index size)
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
resize(size);
}
CompressedStorage(const CompressedStorage& other)
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
*this = other;
}
CompressedStorage& operator=(const CompressedStorage& other)
{
resize(other.size());
if(other.size()>0)
{
internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
}
return *this;
}
void swap(CompressedStorage& other)
{
std::swap(m_values, other.m_values);
std::swap(m_indices, other.m_indices);
std::swap(m_size, other.m_size);
std::swap(m_allocatedSize, other.m_allocatedSize);
}
~CompressedStorage()
{
delete[] m_values;
delete[] m_indices;
}
void reserve(Index size)
{
Index newAllocatedSize = m_size + size;
if (newAllocatedSize > m_allocatedSize)
reallocate(newAllocatedSize);
}
void squeeze()
{
if (m_allocatedSize>m_size)
reallocate(m_size);
}
void resize(Index size, double reserveSizeFactor = 0)
{
if (m_allocatedSize<size)
{
Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size)));
if(realloc_size<size)
internal::throw_std_bad_alloc();
reallocate(realloc_size);
}
m_size = size;
}
void append(const Scalar& v, Index i)
{
Index id = m_size;
resize(m_size+1, 1);
m_values[id] = v;
m_indices[id] = internal::convert_index<StorageIndex>(i);
}
inline Index size() const { return m_size; }
inline Index allocatedSize() const { return m_allocatedSize; }
inline void clear() { m_size = 0; }
const Scalar* valuePtr() const { return m_values; }
Scalar* valuePtr() { return m_values; }
const StorageIndex* indexPtr() const { return m_indices; }
StorageIndex* indexPtr() { return m_indices; }
inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }
inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }
inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index key) const
{
return searchLowerIndex(0, m_size, key);
}
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index start, Index end, Index key) const
{
while(end>start)
{
Index mid = (end+start)>>1;
if (m_indices[mid]<key)
start = mid+1;
else
end = mid;
}
return start;
}
/** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
{
if (m_size==0)
return defaultValue;
else if (key==m_indices[m_size-1])
return m_values[m_size-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const Index id = searchLowerIndex(0,m_size-1,key);
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
/** Like at(), but the search is performed in the range [start,end) */
inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const
{
if (start>=end)
return defaultValue;
else if (end>start && key==m_indices[end-1])
return m_values[end-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const Index id = searchLowerIndex(start,end-1,key);
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
/** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
{
Index id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key)
{
if (m_allocatedSize<m_size+1)
{
m_allocatedSize = 2*(m_size+1);
internal::scoped_array<Scalar> newValues(m_allocatedSize);
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
// copy first chunk
internal::smart_copy(m_values, m_values +id, newValues.ptr());
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
// copy the rest
if(m_size>id)
{
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());
}
else if(m_size>id)
{
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
}
m_size++;
m_indices[id] = internal::convert_index<StorageIndex>(key);
m_values[id] = defaultValue;
}
return m_values[id];
}
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
Index k = 0;
Index n = size();
for (Index i=0; i<n; ++i)
{
if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
{
value(k) = value(i);
index(k) = index(i);
++k;
}
}
resize(k,0);
}
protected:
inline void reallocate(Index size)
{
#ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
#endif
eigen_internal_assert(size!=m_allocatedSize);
internal::scoped_array<Scalar> newValues(size);
internal::scoped_array<StorageIndex> newIndices(size);
Index copySize = (std::min)(size, m_size);
if (copySize>0) {
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());
m_allocatedSize = size;
}
protected:
Scalar* m_values;
StorageIndex* m_indices;
Index m_size;
Index m_allocatedSize;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_COMPRESSED_STORAGE_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseRedux.h
|
.h
| 1,699
| 50
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEREDUX_H
#define EIGEN_SPARSEREDUX_H
namespace Eigen {
template<typename Derived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
Scalar res(0);
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter)
res += iter.value();
return res;
}
template<typename _Scalar, int _Options, typename _Index>
typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
SparseMatrix<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
if(this->isCompressed())
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
else
return Base::sum();
}
template<typename _Scalar, int _Options, typename _Index>
typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
SparseVector<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
}
} // end namespace Eigen
#endif // EIGEN_SPARSEREDUX_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseMatrix.h
|
.h
| 52,401
| 1,405
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEMATRIX_H
#define EIGEN_SPARSEMATRIX_H
namespace Eigen {
/** \ingroup SparseCore_Module
*
* \class SparseMatrix
*
* \brief A versatible sparse matrix representation
*
* This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
* space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies.
*
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
* compatible with many library.
*
* More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
*
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
* is ColMajor or RowMajor. The default is 0 which means column-major.
* \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
*
* \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
* whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
* Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
*/
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
RowsAtCompileTime = Dynamic,
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
{
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
typedef _Scalar Scalar;
typedef Dense StorageKind;
typedef _StorageIndex StorageIndex;
typedef MatrixXpr XprKind;
enum {
RowsAtCompileTime = Dynamic,
ColsAtCompileTime = 1,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = 1,
Flags = LvalueBit
};
};
template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
: public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
{
enum {
Flags = 0
};
};
} // end namespace internal
template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseMatrix
: public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
typedef SparseCompressedBase<SparseMatrix> Base;
using Base::convert_index;
friend class SparseVector<_Scalar,0,_StorageIndex>;
public:
using Base::isCompressed;
using Base::nonZeros;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
using Base::operator+=;
using Base::operator-=;
typedef MappedSparseMatrix<Scalar,Flags> Map;
typedef Diagonal<SparseMatrix> DiagonalReturnType;
typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
typedef typename Base::InnerIterator InnerIterator;
typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
using Base::IsRowMajor;
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum {
Options = _Options
};
typedef typename Base::IndexVector IndexVector;
typedef typename Base::ScalarVector ScalarVector;
protected:
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
Index m_outerSize;
Index m_innerSize;
StorageIndex* m_outerIndex;
StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
Storage m_data;
public:
/** \returns the number of rows of the matrix */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \returns the number of columns of the matrix */
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
/** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
inline Index innerSize() const { return m_innerSize; }
/** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
inline Index outerSize() const { return m_outerSize; }
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
/** \returns a non-const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
inline Scalar* valuePtr() { return m_data.valuePtr(); }
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
/** \internal */
inline Storage& data() { return m_data; }
/** \internal */
inline const Storage& data() const { return m_data; }
/** \returns the value of the matrix at position \a i, \a j
* This function returns Scalar(0) if the element is an explicit \em zero */
inline Scalar coeff(Index row, Index col) const
{
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
}
/** \returns a non-const reference to the value of the matrix at position \a i, \a j
*
* If the element does not exist then it is inserted via the insert(Index,Index) function
* which itself turns the matrix into a non compressed form if that was not the case.
*
* This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
* function if the element does not already exist.
*/
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index start = m_outerIndex[outer];
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
if(end<=start)
return insert(row,col);
const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
if((p<end) && (m_data.index(p)==inner))
return m_data.value(p);
else
return insert(row,col);
}
/** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
* The non zero coefficient must \b not already exist.
*
* If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
* mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
* In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
* inserted by increasing outer-indices.
*
* If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
* call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
*
* Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
* if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
*
*/
Scalar& insert(Index row, Index col);
public:
/** Removes all non zeros but keep allocated memory
*
* This function does not free the currently allocated memory. To release as much as memory as possible,
* call \code mat.data().squeeze(); \endcode after resizing it.
*
* \sa resize(Index,Index), data()
*/
inline void setZero()
{
m_data.clear();
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
if(m_innerNonZeros)
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
}
/** Preallocates \a reserveSize non zeros.
*
* Precondition: the matrix must be in compressed mode. */
inline void reserve(Index reserveSize)
{
eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
m_data.reserve(reserveSize);
}
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
*
* This function turns the matrix in non-compressed mode.
*
* The type \c SizesType must expose the following interface:
\code
typedef value_type;
const value_type& operator[](i) const;
\endcode
* for \c i in the [0,this->outerSize()[ range.
* Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
*/
template<class SizesType>
inline void reserve(const SizesType& reserveSizes);
#else
template<class SizesType>
inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
#if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
typename
#endif
SizesType::value_type())
{
EIGEN_UNUSED_VARIABLE(enableif);
reserveInnerVectors(reserveSizes);
}
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
template<class SizesType>
inline void reserveInnerVectors(const SizesType& reserveSizes)
{
if(isCompressed())
{
Index totalReserveSize = 0;
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
// temporarily use m_innerSizes to hold the new starting points.
StorageIndex* newOuterIndex = m_innerNonZeros;
StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
totalReserveSize += reserveSizes[j];
}
m_data.reserve(totalReserveSize);
StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
for(Index j=m_outerSize-1; j>=0; --j)
{
StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
}
previousOuterIndex = m_outerIndex[j];
m_outerIndex[j] = newOuterIndex[j];
m_innerNonZeros[j] = innerNNZ;
}
if(m_outerSize>0)
m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
m_data.resize(m_outerIndex[m_outerSize]);
}
else
{
StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
count += toReserve + m_innerNonZeros[j];
}
newOuterIndex[m_outerSize] = count;
m_data.resize(count);
for(Index j=m_outerSize-1; j>=0; --j)
{
Index offset = newOuterIndex[j] - m_outerIndex[j];
if(offset>0)
{
StorageIndex innerNNZ = m_innerNonZeros[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
}
}
}
std::swap(m_outerIndex, newOuterIndex);
std::free(newOuterIndex);
}
}
public:
//--- low level purely coherent filling ---
/** \internal
* \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
* - the nonzero does not already exist
* - the new coefficient is the last one according to the storage order
*
* Before filling a given inner vector you must call the statVec(Index) function.
*
* After an insertion session, you should call the finalize() function.
*
* \sa insert, insertBackByOuterInner, startVec */
inline Scalar& insertBack(Index row, Index col)
{
return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
}
/** \internal
* \sa insertBack, startVec */
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
m_data.append(Scalar(0), inner);
return m_data.value(p);
}
/** \internal
* \warning use it only if you know what you are doing */
inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
{
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
m_data.append(Scalar(0), inner);
return m_data.value(p);
}
/** \internal
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
/** \internal
* Must be called after inserting a set of non zero entries using the low level compressed API.
*/
inline void finalize()
{
if(isCompressed())
{
StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
Index i = m_outerSize;
// find the last filled column
while (i>=0 && m_outerIndex[i]==0)
--i;
++i;
while (i<=m_outerSize)
{
m_outerIndex[i] = size;
++i;
}
}
}
//---
template<typename InputIterators>
void setFromTriplets(const InputIterators& begin, const InputIterators& end);
template<typename InputIterators,typename DupFunctor>
void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
template<typename DupFunctor>
void collapseDuplicates(DupFunctor dup_func = DupFunctor());
//---
/** \internal
* same as insert(Index,Index) except that the indices are given relative to the storage order */
Scalar& insertByOuterInner(Index j, Index i)
{
return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
}
/** Turns the matrix into the \em compressed format.
*/
void makeCompressed()
{
if(isCompressed())
return;
eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
Index oldStart = m_outerIndex[1];
m_outerIndex[1] = m_innerNonZeros[0];
for(Index j=1; j<m_outerSize; ++j)
{
Index nextOldStart = m_outerIndex[j+1];
Index offset = oldStart - m_outerIndex[j];
if(offset>0)
{
for(Index k=0; k<m_innerNonZeros[j]; ++k)
{
m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
}
}
m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
oldStart = nextOldStart;
}
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
m_data.squeeze();
}
/** Turns the matrix into the uncompressed mode */
void uncompress()
{
if(m_innerNonZeros != 0)
return;
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
}
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
}
/** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
* The functor type \a KeepFunc must implement the following function:
* \code
* bool operator() (const Index& row, const Index& col, const Scalar& value) const;
* \endcode
* \sa prune(Scalar,RealScalar)
*/
template<typename KeepFunc>
void prune(const KeepFunc& keep = KeepFunc())
{
// TODO optimize the uncompressed mode to avoid moving and allocating the data twice
makeCompressed();
StorageIndex k = 0;
for(Index j=0; j<m_outerSize; ++j)
{
Index previousStart = m_outerIndex[j];
m_outerIndex[j] = k;
Index end = m_outerIndex[j+1];
for(Index i=previousStart; i<end; ++i)
{
if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
{
m_data.value(k) = m_data.value(i);
m_data.index(k) = m_data.index(i);
++k;
}
}
}
m_outerIndex[m_outerSize] = k;
m_data.resize(k,0);
}
/** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
*
* If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
* and the storage of the out of bounds coefficients is kept and reserved.
* Call makeCompressed() to pack the entries and squeeze extra memory.
*
* \sa reserve(), setZero(), makeCompressed()
*/
void conservativeResize(Index rows, Index cols)
{
// No change
if (this->rows() == rows && this->cols() == cols) return;
// If one dimension is null, then there is nothing to be preserved
if(rows==0 || cols==0) return resize(rows,cols);
Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
// Deals with inner non zeros
if (m_innerNonZeros)
{
// Resize m_innerNonZeros
StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
m_innerNonZeros = newInnerNonZeros;
for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
m_innerNonZeros[i] = 0;
}
else if (innerChange < 0)
{
// Inner size decreased: allocate a new m_innerNonZeros
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
for(Index i = 0; i < m_outerSize; i++)
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
// Change the m_innerNonZeros in case of a decrease of inner size
if (m_innerNonZeros && innerChange < 0)
{
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
{
StorageIndex &n = m_innerNonZeros[i];
StorageIndex start = m_outerIndex[i];
while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
}
}
m_innerSize = newInnerSize;
// Re-allocate outer index structure if necessary
if (outerChange == 0)
return;
StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
m_outerIndex[i] = last;
}
m_outerSize += outerChange;
}
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
*
* This function does not free the currently allocated memory. To release as much as memory as possible,
* call \code mat.data().squeeze(); \endcode after resizing it.
*
* \sa reserve(), setZero()
*/
void resize(Index rows, Index cols)
{
const Index outerSize = IsRowMajor ? rows : cols;
m_innerSize = IsRowMajor ? cols : rows;
m_data.clear();
if (m_outerSize != outerSize || m_outerSize==0)
{
std::free(m_outerIndex);
m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
if (!m_outerIndex) internal::throw_std_bad_alloc();
m_outerSize = outerSize;
}
if(m_innerNonZeros)
{
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
}
/** \internal
* Resize the nonzero vector to \a size */
void resizeNonZeros(Index size)
{
m_data.resize(size);
}
/** \returns a const expression of the diagonal coefficients. */
const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
/** \returns a read-write expression of the diagonal coefficients.
* \warning If the diagonal entries are written, then all diagonal
* entries \b must already exist, otherwise an assertion will be raised.
*/
DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
/** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
inline SparseMatrix()
: m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
resize(0, 0);
}
/** Constructs a \a rows \c x \a cols empty matrix */
inline SparseMatrix(Index rows, Index cols)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
resize(rows, cols);
}
/** Constructs a sparse matrix from the sparse expression \a other */
template<typename OtherDerived>
inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
check_template_parameters();
const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
if (needToTranspose)
*this = other.derived();
else
{
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
internal::call_assignment_no_alias(*this, other.derived());
}
}
/** Constructs a sparse matrix from the sparse selfadjoint view \a other */
template<typename OtherDerived, unsigned int UpLo>
inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
Base::operator=(other);
}
/** Copy constructor (it performs a deep copy) */
inline SparseMatrix(const SparseMatrix& other)
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
*this = other.derived();
}
/** \brief Copy constructor with in-place evaluation */
template<typename OtherDerived>
SparseMatrix(const ReturnByValue<OtherDerived>& other)
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
initAssignment(other);
other.evalTo(*this);
}
/** \brief Copy constructor with in-place evaluation */
template<typename OtherDerived>
explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
*this = other.derived();
}
/** Swaps the content of two sparse matrices of the same type.
* This is a fast operation that simply swaps the underlying pointers and parameters. */
inline void swap(SparseMatrix& other)
{
//EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
std::swap(m_outerIndex, other.m_outerIndex);
std::swap(m_innerSize, other.m_innerSize);
std::swap(m_outerSize, other.m_outerSize);
std::swap(m_innerNonZeros, other.m_innerNonZeros);
m_data.swap(other.m_data);
}
/** Sets *this to the identity matrix.
* This function also turns the matrix into compressed mode, and drop any reserved memory. */
inline void setIdentity()
{
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
this->m_data.resize(rows());
Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
inline SparseMatrix& operator=(const SparseMatrix& other)
{
if (other.isRValue())
{
swap(other.const_cast_derived());
}
else if(this!=&other)
{
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
initAssignment(other);
if(other.isCompressed())
{
internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
m_data = other.m_data;
}
else
{
Base::operator=(other);
}
}
return *this;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
{
EIGEN_DBG_SPARSE(
s << "Nonzero entries:\n";
if(m.isCompressed())
{
for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
}
else
{
for (Index i=0; i<m.outerSize(); ++i)
{
Index p = m.m_outerIndex[i];
Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k) {
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
}
for (; k<m.m_outerIndex[i+1]; ++k) {
s << "(_,_) ";
}
}
}
s << std::endl;
s << std::endl;
s << "Outer pointers:\n";
for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_outerIndex[i] << " ";
}
s << " $" << std::endl;
if(!m.isCompressed())
{
s << "Inner non zeros:\n";
for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_innerNonZeros[i] << " ";
}
s << " $" << std::endl;
}
s << std::endl;
);
s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
return s;
}
/** Destructor */
inline ~SparseMatrix()
{
std::free(m_outerIndex);
std::free(m_innerNonZeros);
}
/** Overloaded for performance */
Scalar sum() const;
# ifdef EIGEN_SPARSEMATRIX_PLUGIN
# include EIGEN_SPARSEMATRIX_PLUGIN
# endif
protected:
template<typename Other>
void initAssignment(const Other& other)
{
resize(other.rows(), other.cols());
if(m_innerNonZeros)
{
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
}
/** \internal
* \sa insert(Index,Index) */
EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
/** \internal
* A vector object that is equal to 0 everywhere but v at the position i */
class SingletonVector
{
StorageIndex m_index;
StorageIndex m_value;
public:
typedef StorageIndex value_type;
SingletonVector(Index i, Index v)
: m_index(convert_index(i)), m_value(convert_index(v))
{}
StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
};
/** \internal
* \sa insert(Index,Index) */
EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
public:
/** \internal
* \sa insert(Index,Index) */
EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
eigen_assert(!isCompressed());
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_data.index(p) = convert_index(inner);
return (m_data.value(p) = Scalar(0));
}
private:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
struct default_prunning_func {
default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
inline bool operator() (const Index&, const Index&, const Scalar& value) const
{
return !internal::isMuchSmallerThan(value, reference, epsilon);
}
Scalar reference;
RealScalar epsilon;
};
};
namespace internal {
template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
{
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar;
typedef typename SparseMatrixType::StorageIndex StorageIndex;
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
if(begin!=end)
{
// pass 1: count the nnz per inner-vector
typename SparseMatrixType::IndexVector wi(trMat.outerSize());
wi.setZero();
for(InputIterator it(begin); it!=end; ++it)
{
eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
wi(IsRowMajor ? it->col() : it->row())++;
}
// pass 2: insert all the elements into trMat
trMat.reserve(wi);
for(InputIterator it(begin); it!=end; ++it)
trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
// pass 3:
trMat.collapseDuplicates(dup_func);
}
// pass 4: transposed copy -> implicit sorting
mat = trMat;
}
}
/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
*
* A \em triplet is a tuple (i,j,value) defining a non-zero element.
* The input list of triplets does not have to be sorted, and can contains duplicated elements.
* In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
* This is a \em O(n) operation, with \em n the number of triplet elements.
* The initial contents of \c *this is destroyed.
* The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
* or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
*
* The \a InputIterators value_type must provide the following interface:
* \code
* Scalar value() const; // the value
* Scalar row() const; // the row index i
* Scalar col() const; // the column index j
* \endcode
* See for instance the Eigen::Triplet template class.
*
* Here is a typical usage example:
* \code
typedef Triplet<double> T;
std::vector<T> tripletList;
triplets.reserve(estimation_of_entries);
for(...)
{
// ...
tripletList.push_back(T(i,j,v_ij));
}
SparseMatrixType m(rows,cols);
m.setFromTriplets(tripletList.begin(), tripletList.end());
// m is ready to go!
* \endcode
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
* be explicitely stored into a std::vector for instance.
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
{
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
}
/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
* \code
* value = dup_func(OldValue, NewValue)
* \endcode
* Here is a C++11 example keeping the latest entry only:
* \code
* mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
* \endcode
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators,typename DupFunctor>
void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
}
/** \internal */
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename DupFunctor>
void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
IndexVector wi(innerSize());
wi.fill(-1);
StorageIndex count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for(Index j=0; j<outerSize(); ++j)
{
StorageIndex start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
{
Index i = m_data.index(k);
if(wi(i)>=start)
{
// we already meet this entry => accumulate it
m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
}
else
{
m_data.value(count) = m_data.value(k);
m_data.index(count) = m_data.index(k);
wi(i) = count;
++count;
}
}
m_outerIndex[j] = start;
}
m_outerIndex[m_outerSize] = count;
// turn the matrix into compressed form
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
}
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
if (needToTranspose)
{
#ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
#endif
// two passes algorithm:
// 1 - compute the number of coeffs per dest inner vector
// 2 - do the actual copy/eval
// Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
typedef internal::evaluator<_OtherCopy> OtherCopyEval;
OtherCopy otherCopy(other.derived());
OtherCopyEval otherCopyEval(otherCopy);
SparseMatrix dest(other.rows(),other.cols());
Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j=0; j<otherCopy.outerSize(); ++j)
for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
++dest.m_outerIndex[it.index()];
// prefix sum
StorageIndex count = 0;
IndexVector positions(dest.outerSize());
for (Index j=0; j<dest.outerSize(); ++j)
{
StorageIndex tmp = dest.m_outerIndex[j];
dest.m_outerIndex[j] = count;
positions[j] = count;
count += tmp;
}
dest.m_outerIndex[dest.outerSize()] = count;
// alloc
dest.m_data.resize(count);
// pass 2
for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
{
for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
{
Index pos = positions[it.index()]++;
dest.m_data.index(pos) = j;
dest.m_data.value(pos) = it.value();
}
}
this->swap(dest);
return *this;
}
else
{
if(other.isRValue())
{
initAssignment(other.derived());
}
// there is no special optimization
return Base::operator=(other.derived());
}
}
template<typename _Scalar, int _Options, typename _StorageIndex>
typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
{
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
if(isCompressed())
{
if(nonZeros()==0)
{
// reserve space if not already done
if(m_data.allocatedSize()==0)
m_data.reserve(2*m_innerSize);
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
// pack all inner-vectors to the end of the pre-allocated space
// and allocate the entire free-space to the first inner-vector
StorageIndex end = convert_index(m_data.allocatedSize());
for(Index j=1; j<=m_outerSize; ++j)
m_outerIndex[j] = end;
}
else
{
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
for(Index j=0; j<m_outerSize; ++j)
m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
}
}
// check whether we can do a fast "push back" insertion
Index data_end = m_data.allocatedSize();
// First case: we are filling a new inner vector which is packed at the end.
// We assume that all remaining inner-vectors are also empty and packed to the end.
if(m_outerIndex[outer]==data_end)
{
eigen_internal_assert(m_innerNonZeros[outer]==0);
// pack previous empty inner-vectors to end of the used-space
// and allocate the entire free-space to the current inner-vector.
StorageIndex p = convert_index(m_data.size());
Index j = outer;
while(j>=0 && m_innerNonZeros[j]==0)
m_outerIndex[j--] = p;
// push back the new element
++m_innerNonZeros[outer];
m_data.append(Scalar(0), inner);
// check for reallocation
if(data_end != m_data.allocatedSize())
{
// m_data has been reallocated
// -> move remaining inner-vectors back to the end of the free-space
// so that the entire free-space is allocated to the current inner-vector.
eigen_internal_assert(data_end < m_data.allocatedSize());
StorageIndex new_end = convert_index(m_data.allocatedSize());
for(Index k=outer+1; k<=m_outerSize; ++k)
if(m_outerIndex[k]==data_end)
m_outerIndex[k] = new_end;
}
return m_data.value(p);
}
// Second case: the next inner-vector is packed to the end
// and the current inner-vector end match the used-space.
if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
{
eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
// add space for the new element
++m_innerNonZeros[outer];
m_data.resize(m_data.size()+1);
// check for reallocation
if(data_end != m_data.allocatedSize())
{
// m_data has been reallocated
// -> move remaining inner-vectors back to the end of the free-space
// so that the entire free-space is allocated to the current inner-vector.
eigen_internal_assert(data_end < m_data.allocatedSize());
StorageIndex new_end = convert_index(m_data.allocatedSize());
for(Index k=outer+1; k<=m_outerSize; ++k)
if(m_outerIndex[k]==data_end)
m_outerIndex[k] = new_end;
}
// and insert it at the right position (sorted insertion)
Index startId = m_outerIndex[outer];
Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
m_data.index(p) = convert_index(inner);
return (m_data.value(p) = 0);
}
if(m_data.size() != m_data.allocatedSize())
{
// make sure the matrix is compatible to random un-compressed insertion:
m_data.resize(m_data.allocatedSize());
this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
}
return insertUncompressed(row,col);
}
template<typename _Scalar, int _Options, typename _StorageIndex>
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
{
eigen_assert(!isCompressed());
const Index outer = IsRowMajor ? row : col;
const StorageIndex inner = convert_index(IsRowMajor ? col : row);
Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
StorageIndex innerNNZ = m_innerNonZeros[outer];
if(innerNNZ>=room)
{
// this inner vector is full, we need to reallocate the whole buffer :(
reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
}
Index startId = m_outerIndex[outer];
Index p = startId + m_innerNonZeros[outer];
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
m_innerNonZeros[outer]++;
m_data.index(p) = inner;
return (m_data.value(p) = Scalar(0));
}
template<typename _Scalar, int _Options, typename _StorageIndex>
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
{
eigen_assert(isCompressed());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index previousOuter = outer;
if (m_outerIndex[outer+1]==0)
{
// we start a new inner vector
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
{
m_outerIndex[previousOuter] = convert_index(m_data.size());
--previousOuter;
}
m_outerIndex[outer+1] = m_outerIndex[outer];
}
// here we have to handle the tricky case where the outerIndex array
// starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
// the 2nd inner vector...
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
&& (std::size_t(m_outerIndex[outer+1]) == m_data.size());
std::size_t startId = m_outerIndex[outer];
// FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
std::size_t p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
double reallocRatio = 1;
if (m_data.allocatedSize()<=m_data.size())
{
// if there is no preallocated memory, let's reserve a minimum of 32 elements
if (m_data.size()==0)
{
m_data.reserve(32);
}
else
{
// we need to reallocate the data, to reduce multiple reallocations
// we use a smart resize algorithm based on the current filling ratio
// in addition, we use double to avoid integers overflows
double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
// furthermore we bound the realloc ratio to:
// 1) reduce multiple minor realloc when the matrix is almost filled
// 2) avoid to allocate too much memory when the matrix is almost empty
reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
}
}
m_data.resize(m_data.size()+1,reallocRatio);
if (!isLastVec)
{
if (previousOuter==-1)
{
// oops wrong guess.
// let's correct the outer offsets
for (Index k=0; k<=(outer+1); ++k)
m_outerIndex[k] = 0;
Index k=outer+1;
while(m_outerIndex[k]==0)
m_outerIndex[k++] = 1;
while (k<=m_outerSize && m_outerIndex[k]!=0)
m_outerIndex[k++]++;
p = 0;
--k;
k = m_outerIndex[k]-1;
while (k>0)
{
m_data.index(k) = m_data.index(k-1);
m_data.value(k) = m_data.value(k-1);
k--;
}
}
else
{
// we are not inserting into the last inner vec
// update outer indices:
Index j = outer+2;
while (j<=m_outerSize && m_outerIndex[j]!=0)
m_outerIndex[j++]++;
--j;
// shift data of last vecs:
Index k = m_outerIndex[j]-1;
while (k>=Index(p))
{
m_data.index(k) = m_data.index(k-1);
m_data.value(k) = m_data.value(k-1);
k--;
}
}
}
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
m_data.index(p) = inner;
return (m_data.value(p) = Scalar(0));
}
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
: evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
{
typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
evaluator() : Base() {}
explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIX_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseRef.h
|
.h
| 15,492
| 398
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_REF_H
#define EIGEN_SPARSE_REF_H
namespace Eigen {
enum {
StandardCompressedFormat = 2 /**< used by Ref<SparseMatrix> to specify whether the input storage must be in standard compressed form */
};
namespace internal {
template<typename Derived> class SparseRefBase;
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
enum {
Options = _Options,
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
};
template<typename Derived> struct match {
enum {
StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
{
enum {
Flags = (traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<SparseVector<MatScalar,MatOptions,MatIndex> >
{
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
enum {
Options = _Options,
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
};
template<typename Derived> struct match {
enum {
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && Derived::IsVectorAtCompileTime
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
struct traits<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
: public traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
{
enum {
Flags = (traits<SparseVector<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
};
};
template<typename Derived>
struct traits<SparseRefBase<Derived> > : public traits<Derived> {};
template<typename Derived> class SparseRefBase
: public SparseMapBase<Derived>
{
public:
typedef SparseMapBase<Derived> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseRefBase)
SparseRefBase()
: Base(RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime, 0, 0, 0, 0, 0)
{}
protected:
template<typename Expression>
void construct(Expression& expr)
{
if(expr.outerIndexPtr()==0)
::new (static_cast<Base*>(this)) Base(expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr());
else
::new (static_cast<Base*>(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());
}
};
} // namespace internal
/**
* \ingroup SparseCore_Module
*
* \brief A sparse matrix expression referencing an existing sparse expression
*
* \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
* \tparam Options specifies whether the a standard compressed format is required \c Options is \c #StandardCompressedFormat, or \c 0.
* The default is \c 0.
*
* \sa class Ref
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >
: public internal::SparseRefBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
#else
template<typename SparseMatrixType, int Options>
class Ref<SparseMatrixType, Options>
: public SparseMapBase<Derived,WriteAccessors> // yes, that's weird to use Derived here, but that works!
#endif
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef internal::traits<Ref> Traits;
template<int OtherOptions>
inline Ref(const SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
template<int OtherOptions>
inline Ref(const MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<int OtherOptions>
inline Ref(SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
Base::construct(expr.derived());
}
template<int OtherOptions>
inline Ref(MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
Base::construct(expr.derived());
}
template<typename Derived>
inline Ref(const SparseCompressedBase<Derived>& expr)
#else
/** Implicit constructor from any sparse expression (2D matrix or 1D vector) */
template<typename Derived>
inline Ref(SparseCompressedBase<Derived>& expr)
#endif
{
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
Base::construct(expr.const_cast_derived());
}
};
// this is the const ref version
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public internal::SparseRefBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> TPlainObjectType;
typedef internal::traits<Ref> Traits;
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
{
construct(expr.derived(), typename Traits::template match<Derived>::type());
}
inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
}
template<typename OtherRef>
inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
construct(other.derived(), typename Traits::template match<OtherRef>::type());
}
~Ref() {
if(m_hasCopy) {
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
obj->~TPlainObjectType();
}
}
protected:
template<typename Expression>
void construct(const Expression& expr,internal::true_type)
{
if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
{
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
else
{
Base::construct(expr);
}
}
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
char m_object_bytes[sizeof(TPlainObjectType)];
bool m_hasCopy;
};
/**
* \ingroup SparseCore_Module
*
* \brief A sparse vector expression referencing an existing sparse vector expression
*
* \tparam SparseVectorType the equivalent sparse vector type of the referenced data, it must be a template instance of class SparseVector.
*
* \sa class Ref
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType >
: public internal::SparseRefBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
#else
template<typename SparseVectorType>
class Ref<SparseVectorType>
: public SparseMapBase<Derived,WriteAccessors>
#endif
{
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef internal::traits<Ref> Traits;
template<int OtherOptions>
inline Ref(const SparseVector<MatScalar,OtherOptions,MatIndex>& expr);
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<int OtherOptions>
inline Ref(SparseVector<MatScalar,OtherOptions,MatIndex>& expr)
{
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseVector<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.derived());
}
template<typename Derived>
inline Ref(const SparseCompressedBase<Derived>& expr)
#else
/** Implicit constructor from any 1D sparse vector expression */
template<typename Derived>
inline Ref(SparseCompressedBase<Derived>& expr)
#endif
{
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.const_cast_derived());
}
};
// this is the const ref version
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public internal::SparseRefBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
{
typedef SparseVector<MatScalar,MatOptions,MatIndex> TPlainObjectType;
typedef internal::traits<Ref> Traits;
public:
typedef internal::SparseRefBase<Ref> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
{
construct(expr.derived(), typename Traits::template match<Derived>::type());
}
inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
}
template<typename OtherRef>
inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
construct(other.derived(), typename Traits::template match<OtherRef>::type());
}
~Ref() {
if(m_hasCopy) {
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
obj->~TPlainObjectType();
}
}
protected:
template<typename Expression>
void construct(const Expression& expr,internal::true_type)
{
Base::construct(expr);
}
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
char m_object_bytes[sizeof(TPlainObjectType)];
bool m_hasCopy;
};
namespace internal {
// FIXME shall we introduce a general evaluatior_ref that we can specialize for any sparse object once, and thus remove this copy-pasta thing...
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_REF_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseMap.h
|
.h
| 12,589
| 306
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_MAP_H
#define EIGEN_SPARSE_MAP_H
namespace Eigen {
namespace internal {
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct traits<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef traits<PlainObjectType> TraitsBase;
enum {
Flags = TraitsBase::Flags & (~NestByRefBit)
};
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct traits<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef traits<PlainObjectType> TraitsBase;
enum {
Flags = TraitsBase::Flags & (~ (NestByRefBit | LvalueBit))
};
};
} // end namespace internal
template<typename Derived,
int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
> class SparseMapBase;
/** \ingroup SparseCore_Module
* class SparseMapBase
* \brief Common base class for Map and Ref instance of sparse matrix and vector.
*/
template<typename Derived>
class SparseMapBase<Derived,ReadOnlyAccessors>
: public SparseCompressedBase<Derived>
{
public:
typedef SparseCompressedBase<Derived> Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::StorageIndex StorageIndex;
enum { IsRowMajor = Base::IsRowMajor };
using Base::operator=;
protected:
typedef typename internal::conditional<
bool(internal::is_lvalue<Derived>::value),
Scalar *, const Scalar *>::type ScalarPointer;
typedef typename internal::conditional<
bool(internal::is_lvalue<Derived>::value),
StorageIndex *, const StorageIndex *>::type IndexPointer;
Index m_outerSize;
Index m_innerSize;
Array<StorageIndex,2,1> m_zero_nnz;
IndexPointer m_outerIndex;
IndexPointer m_innerIndices;
ScalarPointer m_values;
IndexPointer m_innerNonZeros;
public:
/** \copydoc SparseMatrixBase::rows() */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \copydoc SparseMatrixBase::cols() */
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
/** \copydoc SparseMatrixBase::innerSize() */
inline Index innerSize() const { return m_innerSize; }
/** \copydoc SparseMatrixBase::outerSize() */
inline Index outerSize() const { return m_outerSize; }
/** \copydoc SparseCompressedBase::nonZeros */
inline Index nonZeros() const { return m_zero_nnz[1]; }
/** \copydoc SparseCompressedBase::isCompressed */
bool isCompressed() const { return m_innerNonZeros==0; }
//----------------------------------------
// direct access interface
/** \copydoc SparseMatrix::valuePtr */
inline const Scalar* valuePtr() const { return m_values; }
/** \copydoc SparseMatrix::innerIndexPtr */
inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; }
/** \copydoc SparseMatrix::outerIndexPtr */
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
/** \copydoc SparseMatrix::innerNonZeroPtr */
inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
//----------------------------------------
/** \copydoc SparseMatrix::coeff */
inline Scalar coeff(Index row, Index col) const
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index start = m_outerIndex[outer];
Index end = isCompressed() ? m_outerIndex[outer+1] : start + m_innerNonZeros[outer];
if (start==end)
return Scalar(0);
else if (end>0 && inner==m_innerIndices[end-1])
return m_values[end-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
const Index id = r-&m_innerIndices[0];
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
}
inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,
ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(outerIndexPtr),
m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(innerNonZerosPtr)
{}
// for vectors
inline SparseMapBase(Index size, Index nnz, IndexPointer innerIndexPtr, ScalarPointer valuePtr)
: m_outerSize(1), m_innerSize(size), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(m_zero_nnz.data()),
m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(0)
{}
/** Empty destructor */
inline ~SparseMapBase() {}
protected:
inline SparseMapBase() {}
};
/** \ingroup SparseCore_Module
* class SparseMapBase
* \brief Common base class for writable Map and Ref instance of sparse matrix and vector.
*/
template<typename Derived>
class SparseMapBase<Derived,WriteAccessors>
: public SparseMapBase<Derived,ReadOnlyAccessors>
{
typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
public:
typedef SparseMapBase<Derived, ReadOnlyAccessors> Base;
typedef typename Base::Scalar Scalar;
typedef typename Base::StorageIndex StorageIndex;
enum { IsRowMajor = Base::IsRowMajor };
using Base::operator=;
public:
//----------------------------------------
// direct access interface
using Base::valuePtr;
using Base::innerIndexPtr;
using Base::outerIndexPtr;
using Base::innerNonZeroPtr;
/** \copydoc SparseMatrix::valuePtr */
inline Scalar* valuePtr() { return Base::m_values; }
/** \copydoc SparseMatrix::innerIndexPtr */
inline StorageIndex* innerIndexPtr() { return Base::m_innerIndices; }
/** \copydoc SparseMatrix::outerIndexPtr */
inline StorageIndex* outerIndexPtr() { return Base::m_outerIndex; }
/** \copydoc SparseMatrix::innerNonZeroPtr */
inline StorageIndex* innerNonZeroPtr() { return Base::m_innerNonZeros; }
//----------------------------------------
/** \copydoc SparseMatrix::coeffRef */
inline Scalar& coeffRef(Index row, Index col)
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index start = Base::m_outerIndex[outer];
Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer];
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
StorageIndex* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);
const Index id = r - &Base::m_innerIndices[0];
eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
return const_cast<Scalar*>(Base::m_values)[id];
}
inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr,
Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
// for vectors
inline SparseMapBase(Index size, Index nnz, StorageIndex* innerIndexPtr, Scalar* valuePtr)
: Base(size, nnz, innerIndexPtr, valuePtr)
{}
/** Empty destructor */
inline ~SparseMapBase() {}
protected:
inline SparseMapBase() {}
};
/** \ingroup SparseCore_Module
*
* \brief Specialization of class Map for SparseMatrix-like storage.
*
* \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
*
* \sa class Map, class SparseMatrix, class Ref<SparseMatrixType,Options>
*/
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public SparseMapBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
#else
template<typename SparseMatrixType>
class Map<SparseMatrixType>
: public SparseMapBase<Derived,WriteAccessors>
#endif
{
public:
typedef SparseMapBase<Map> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
enum { IsRowMajor = Base::IsRowMajor };
public:
/** Constructs a read-write Map to a sparse matrix of size \a rows x \a cols, containing \a nnz non-zero coefficients,
* stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr.
* If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.
*
* This constructor is available only if \c SparseMatrixType is non-const.
*
* More details on the expected storage schemes are given in the \ref TutorialSparse "manual pages".
*/
inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr,
StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** Empty destructor */
inline ~Map() {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public SparseMapBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
{
public:
typedef SparseMapBase<Map> Base;
EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
enum { IsRowMajor = Base::IsRowMajor };
public:
#endif
/** This is the const version of the above constructor.
*
* This constructor is available only if \c SparseMatrixType is const, e.g.:
* \code Map<const SparseMatrix<double> > \endcode
*/
inline Map(Index rows, Index cols, Index nnz, const StorageIndex* outerIndexPtr,
const StorageIndex* innerIndexPtr, const Scalar* valuePtr, const StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
/** Empty destructor */
inline ~Map() {}
};
namespace internal {
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
struct evaluator<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
: evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
{
typedef evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
typedef Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_MAP_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/MappedSparseMatrix.h
|
.h
| 2,191
| 68
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
#define EIGEN_MAPPED_SPARSEMATRIX_H
namespace Eigen {
/** \deprecated Use Map<SparseMatrix<> >
* \class MappedSparseMatrix
*
* \brief Sparse matrix
*
* \param _Scalar the scalar type, i.e. the type of the coefficients
*
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
*/
namespace internal {
template<typename _Scalar, int _Flags, typename _StorageIndex>
struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
{};
} // end namespace internal
template<typename _Scalar, int _Flags, typename _StorageIndex>
class MappedSparseMatrix
: public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
{
typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;
public:
typedef typename Base::StorageIndex StorageIndex;
typedef typename Base::Scalar Scalar;
inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
{}
/** Empty destructor */
inline ~MappedSparseMatrix() {}
};
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >
: evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >
{
typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;
typedef evaluator<SparseCompressedBase<XprType> > Base;
evaluator() : Base() {}
explicit evaluator(const XprType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseView.h
|
.h
| 8,127
| 255
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEVIEW_H
#define EIGEN_SPARSEVIEW_H
namespace Eigen {
namespace internal {
template<typename MatrixType>
struct traits<SparseView<MatrixType> > : traits<MatrixType>
{
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Sparse StorageKind;
enum {
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
};
};
} // end namespace internal
/** \ingroup SparseCore_Module
* \class SparseView
*
* \brief Expression of a dense or sparse matrix with zero or too small values removed
*
* \tparam MatrixType the type of the object of which we are removing the small entries
*
* This class represents an expression of a given dense or sparse matrix with
* entries smaller than \c reference * \c epsilon are removed.
* It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::sparseView(), SparseMatrixBase::pruned()
*/
template<typename MatrixType>
class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
{
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
typedef SparseMatrixBase<SparseView > Base;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
: m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
inline Index innerSize() const { return m_matrix.innerSize(); }
inline Index outerSize() const { return m_matrix.outerSize(); }
/** \returns the nested expression */
const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
Scalar reference() const { return m_reference; }
RealScalar epsilon() const { return m_epsilon; }
protected:
MatrixTypeNested m_matrix;
Scalar m_reference;
RealScalar m_epsilon;
};
namespace internal {
// TODO find a way to unify the two following variants
// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is
// not easy because the evaluators do not expose the sizes of the underlying expression.
template<typename ArgType>
struct unary_evaluator<SparseView<ArgType>, IteratorBased>
: public evaluator_base<SparseView<ArgType> >
{
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
public:
typedef SparseView<ArgType> XprType;
class InnerIterator : public EvalIterator
{
protected:
typedef typename XprType::Scalar Scalar;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
: EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view)
{
incrementToNonZero();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
EvalIterator::operator++();
incrementToNonZero();
return *this;
}
using EvalIterator::value;
protected:
const XprType &m_view;
private:
void incrementToNonZero()
{
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))
{
EvalIterator::operator++();
}
}
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
protected:
evaluator<ArgType> m_argImpl;
const XprType &m_view;
};
template<typename ArgType>
struct unary_evaluator<SparseView<ArgType>, IndexBased>
: public evaluator_base<SparseView<ArgType> >
{
public:
typedef SparseView<ArgType> XprType;
protected:
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
typedef typename XprType::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
: m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
{
incrementToNonZero();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
m_inner++;
incrementToNonZero();
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
{
return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner)
: m_sve.m_argImpl.coeff(m_inner, m_outer);
}
EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }
inline Index row() const { return IsRowMajor ? m_outer : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer; }
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const unary_evaluator &m_sve;
Index m_inner;
const Index m_outer;
const Index m_end;
private:
void incrementToNonZero()
{
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon()))
{
m_inner++;
}
}
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
protected:
evaluator<ArgType> m_argImpl;
const XprType &m_view;
};
} // end namespace internal
/** \ingroup SparseCore_Module
*
* \returns a sparse expression of the dense expression \c *this with values smaller than
* \a reference * \a epsilon removed.
*
* This method is typically used when prototyping to convert a quickly assembled dense Matrix \c D to a SparseMatrix \c S:
* \code
* MatrixXd D(n,m);
* SparseMatrix<double> S;
* S = D.sparseView(); // suppress numerical zeros (exact)
* S = D.sparseView(reference);
* S = D.sparseView(reference,epsilon);
* \endcode
* where \a reference is a meaningful non zero reference value,
* and \a epsilon is a tolerance factor defaulting to NumTraits<Scalar>::dummy_precision().
*
* \sa SparseMatrixBase::pruned(), class SparseView */
template<typename Derived>
const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference,
const typename NumTraits<Scalar>::Real& epsilon) const
{
return SparseView<Derived>(derived(), reference, epsilon);
}
/** \returns an expression of \c *this with values smaller than
* \a reference * \a epsilon removed.
*
* This method is typically used in conjunction with the product of two sparse matrices
* to automatically prune the smallest values as follows:
* \code
* C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
* where \c ref is a meaningful non zero reference value.
* */
template<typename Derived>
const SparseView<Derived>
SparseMatrixBase<Derived>::pruned(const Scalar& reference,
const RealScalar& epsilon) const
{
return SparseView<Derived>(derived(), reference, epsilon);
}
} // end namespace Eigen
#endif
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseDenseProduct.h
|
.h
| 12,487
| 321
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
#pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
// {
// enum {
// Defined = 1
// };
// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
// };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef evaluator<ActualLhs> LhsEval;
typedef evaluator<ActualRhs> RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = HugeCost
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
const LhsArg m_lhs;
evaluator<ActualLhs> m_lhsXprImpl;
evaluator<ActualRhs> m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseBlock.h
|
.h
| 25,592
| 604
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_BLOCK_H
#define EIGEN_SPARSE_BLOCK_H
namespace Eigen {
// Subset of columns or rows
template<typename XprType, int BlockRows, int BlockCols>
class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
{
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
protected:
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
typedef SparseMatrixBase<BlockType> Base;
using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
inline BlockImpl(XprType& xpr, Index i)
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
Index nonZeros() const
{
typedef internal::evaluator<XprType> EvaluatorType;
EvaluatorType matEval(m_matrix);
Index nnz = 0;
Index end = m_outerStart + m_outerSize.value();
for(Index j=m_outerStart; j<end; ++j)
for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
++nnz;
return nnz;
}
inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
inline const XprType& nestedExpression() const { return m_matrix; }
inline XprType& nestedExpression() { return m_matrix; }
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
typename internal::ref_selector<XprType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
protected:
// Disable assignment with clear error message.
// Note that simply removing operator= yields compilation errors with ICC+MSVC
template<typename T>
BlockImpl& operator=(const T&)
{
EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
return *this;
}
};
/***************************************************************************
* specialization for SparseMatrix
***************************************************************************/
namespace internal {
template<typename SparseMatrixType, int BlockRows, int BlockCols>
class sparse_matrix_block_impl
: public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
{
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
using Base::convert_index;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
protected:
typedef typename Base::IndexVector IndexVector;
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
public:
inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
template<typename OtherDerived>
inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
{
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
_NestedMatrixType& matrix = m_matrix;
// This assignment is slow if this vector set is not empty
// and/or it is not at the end of the nonzeros of the underlying matrix.
// 1 - eval to a temporary to avoid transposition and/or aliasing issues
Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
// 2 - let's check whether there is enough allocated memory
Index nnz = tmp.nonZeros();
Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
Index block_size = end - start; // available room in the current block
Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
Index free_size = m_matrix.isCompressed()
? Index(matrix.data().allocatedSize()) + block_size
: block_size;
Index tmp_start = tmp.outerIndexPtr()[0];
bool update_trailing_pointers = false;
if(nnz>free_size)
{
// realloc manually to reduce copies
typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
matrix.data().swap(newdata);
update_trailing_pointers = true;
}
else
{
if(m_matrix.isCompressed())
{
// no need to realloc, simply copy the tail at its respective position and insert tmp
matrix.data().resize(start + nnz + tail_size);
internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
update_trailing_pointers = true;
}
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
}
// update outer index pointers and innerNonZeros
if(IsVectorAtCompileTime)
{
if(!m_matrix.isCompressed())
matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
}
else
{
StorageIndex p = StorageIndex(start);
for(Index k=0; k<m_outerSize.value(); ++k)
{
StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
if(!m_matrix.isCompressed())
matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
matrix.outerIndexPtr()[m_outerStart+k] = p;
p += nnz_k;
}
}
if(update_trailing_pointers)
{
StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
{
matrix.outerIndexPtr()[k] += offset;
}
}
return derived();
}
inline BlockType& operator=(const BlockType& other)
{
return operator=<BlockType>(other);
}
inline const Scalar* valuePtr() const
{ return m_matrix.valuePtr(); }
inline Scalar* valuePtr()
{ return m_matrix.valuePtr(); }
inline const StorageIndex* innerIndexPtr() const
{ return m_matrix.innerIndexPtr(); }
inline StorageIndex* innerIndexPtr()
{ return m_matrix.innerIndexPtr(); }
inline const StorageIndex* outerIndexPtr() const
{ return m_matrix.outerIndexPtr() + m_outerStart; }
inline StorageIndex* outerIndexPtr()
{ return m_matrix.outerIndexPtr() + m_outerStart; }
inline const StorageIndex* innerNonZeroPtr() const
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
inline StorageIndex* innerNonZeroPtr()
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
const Scalar& lastCoeff() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
eigen_assert(Base::nonZeros()>0);
if(m_matrix.isCompressed())
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
else
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
}
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
inline SparseMatrixType& nestedExpression() { return m_matrix; }
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
};
} // namespace internal
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
: public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
{
public:
typedef _StorageIndex StorageIndex;
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
inline BlockImpl(SparseMatrixType& xpr, Index i)
: Base(xpr, i)
{}
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: Base(xpr, startRow, startCol, blockRows, blockCols)
{}
using Base::operator=;
};
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
: public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
{
public:
typedef _StorageIndex StorageIndex;
typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
inline BlockImpl(SparseMatrixType& xpr, Index i)
: Base(xpr, i)
{}
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: Base(xpr, startRow, startCol, blockRows, blockCols)
{}
using Base::operator=;
private:
template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
};
//----------
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major).
*/
template<typename Derived>
typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
{ return InnerVectorReturnType(derived(), outer); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
{ return ConstInnerVectorReturnType(derived(), outer); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major).
*/
template<typename Derived>
typename SparseMatrixBase<Derived>::InnerVectorsReturnType
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{
return Block<Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
}
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
}
/** Generic implementation of sparse Block expression.
* Real-only.
*/
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
{
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
typedef SparseMatrixBase<BlockType> Base;
using Base::convert_index;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
/** Column or Row constructor
*/
inline BlockImpl(XprType& xpr, Index i)
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
*/
inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
{}
inline Index rows() const { return m_blockRows.value(); }
inline Index cols() const { return m_blockCols.value(); }
inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
}
inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
}
inline Scalar& coeffRef(Index index)
{
return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
inline const Scalar coeff(Index index) const
{
return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
inline const XprType& nestedExpression() const { return m_matrix; }
inline XprType& nestedExpression() { return m_matrix; }
Index startRow() const { return m_startRow.value(); }
Index startCol() const { return m_startCol.value(); }
Index blockRows() const { return m_blockRows.value(); }
Index blockCols() const { return m_blockCols.value(); }
protected:
// friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
Index nonZeros() const { return Dynamic; }
typename internal::ref_selector<XprType>::non_const_type m_matrix;
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
protected:
// Disable assignment with clear error message.
// Note that simply removing operator= yields compilation errors with ICC+MSVC
template<typename T>
BlockImpl& operator=(const T&)
{
EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
return *this;
}
};
namespace internal {
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
: public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
{
class InnerVectorInnerIterator;
class OuterVectorInnerIterator;
public:
typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename XprType::Scalar Scalar;
enum {
IsRowMajor = XprType::IsRowMajor,
OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
| // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
// revert to || as soon as not needed anymore.
(BlockRows==1 && !ArgType::IsRowMajor),
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
explicit unary_evaluator(const XprType& op)
: m_argImpl(op.nestedExpression()), m_block(op)
{}
inline Index nonZerosEstimate() const {
Index nnz = m_block.nonZeros();
if(nnz<0)
return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
return nnz;
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
evaluator<ArgType> m_argImpl;
const XprType &m_block;
};
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
: public EvalIterator
{
enum { IsRowMajor = unary_evaluator::IsRowMajor };
const XprType& m_block;
Index m_end;
public:
EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
: EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
m_block(aEval.m_block),
m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
{
while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
EvalIterator::operator++();
}
inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
};
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
{
enum { IsRowMajor = unary_evaluator::IsRowMajor };
const unary_evaluator& m_eval;
Index m_outerPos;
const Index m_innerIndex;
Index m_end;
EvalIterator m_it;
public:
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
: m_eval(aEval),
m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
m_it(m_eval.m_argImpl, m_outerPos)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
while(m_it && m_it.index() < m_innerIndex) ++m_it;
if((!m_it) || (m_it.index()!=m_innerIndex))
++(*this);
}
inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
inline Index outer() const { return 0; }
inline Index row() const { return IsRowMajor ? 0 : index(); }
inline Index col() const { return IsRowMajor ? index() : 0; }
inline Scalar value() const { return m_it.value(); }
inline Scalar& valueRef() { return m_it.valueRef(); }
inline OuterVectorInnerIterator& operator++()
{
// search next non-zero entry
while(++m_outerPos<m_end)
{
// Restart iterator at the next inner-vector:
m_it.~EvalIterator();
::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
// search for the key m_innerIndex in the current outer-vector
while(m_it && m_it.index() < m_innerIndex) ++m_it;
if(m_it && m_it.index()==m_innerIndex) break;
}
return *this;
}
inline operator bool() const { return m_outerPos < m_end; }
};
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
: evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
{
typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
typedef evaluator<SparseCompressedBase<XprType> > Base;
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
};
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
: evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
{
typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
typedef evaluator<SparseCompressedBase<XprType> > Base;
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseAssign.h
|
.h
| 8,080
| 217
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEASSIGN_H
#define EIGEN_SPARSEASSIGN_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
{
internal::call_assignment_no_alias(derived(), other.derived());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
{
// TODO use the evaluator mechanism
other.evalTo(derived());
return derived();
}
template<typename Derived>
template<typename OtherDerived>
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
// by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
template<typename Derived>
inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
{
internal::call_assignment_no_alias(derived(), other.derived());
return derived();
}
namespace internal {
template<>
struct storage_kind_to_evaluator_kind<Sparse> {
typedef IteratorBased Kind;
};
template<>
struct storage_kind_to_shape<Sparse> {
typedef SparseShape Shape;
};
struct Sparse2Sparse {};
struct Sparse2Dense {};
template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; };
template<typename DstXprType, typename SrcXprType>
void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
{
typedef typename DstXprType::Scalar Scalar;
typedef internal::evaluator<DstXprType> DstEvaluatorType;
typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
SrcEvaluatorType srcEvaluator(src);
const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
if ((!transpose) && src.isRValue())
{
// eval without temporary
dst.resize(src.rows(), src.cols());
dst.setZero();
dst.reserve((std::max)(src.rows(),src.cols())*2);
for (Index j=0; j<outerEvaluationSize; ++j)
{
dst.startVec(j);
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
{
Scalar v = it.value();
dst.insertBackByOuterInner(j,it.index()) = v;
}
}
dst.finalize();
}
else
{
// eval through a temporary
eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
(!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
"the transpose operation is supposed to be handled in SparseMatrix::operator=");
enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
DstXprType temp(src.rows(), src.cols());
temp.reserve((std::max)(src.rows(),src.cols())*2);
for (Index j=0; j<outerEvaluationSize; ++j)
{
temp.startVec(j);
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
{
Scalar v = it.value();
temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
}
}
temp.finalize();
dst = temp.markAsRValue();
}
}
// Generic Sparse to Sparse assignment
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
{
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
assign_sparse_to_sparse(dst.derived(), src.derived());
}
};
// Generic Sparse to Dense assignment
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
{
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
dst.setZero();
internal::evaluator<SrcXprType> srcEval(src);
resize_if_allowed(dst, src, func);
internal::evaluator<DstXprType> dstEval(dst);
const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
for (Index j=0; j<outerEvaluationSize; ++j)
for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
}
};
// Specialization for "dst = dec.solve(rhs)"
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
{
typedef Solve<DecType,RhsType> SrcXprType;
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
src.dec()._solve_impl(src.rhs(), dst);
}
};
struct Diagonal2Sparse {};
template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
template< typename DstXprType, typename SrcXprType, typename Functor>
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
typedef typename DstXprType::Scalar Scalar;
typedef Array<StorageIndex,Dynamic,1> ArrayXI;
typedef Array<Scalar,Dynamic,1> ArrayXS;
template<int Options>
static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
Index dstRows = src.rows();
Index dstCols = src.cols();
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
dst.resize(dstRows, dstCols);
Index size = src.diagonal().size();
dst.makeCompressed();
dst.resizeNonZeros(size);
Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
}
template<typename DstDerived>
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
dst.diagonal() = src.diagonal();
}
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() += src.diagonal(); }
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() -= src.diagonal(); }
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEASSIGN_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/AmbiVector.h
|
.h
| 10,670
| 379
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_AMBIVECTOR_H
#define EIGEN_AMBIVECTOR_H
namespace Eigen {
namespace internal {
/** \internal
* Hybrid sparse/dense vector class designed for intensive read-write operations.
*
* See BasicSparseLLT and SparseProduct for usage examples.
*/
template<typename _Scalar, typename _StorageIndex>
class AmbiVector
{
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef typename NumTraits<Scalar>::Real RealScalar;
explicit AmbiVector(Index size)
: m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{
resize(size);
}
void init(double estimatedDensity);
void init(int mode);
Index nonZeros() const;
/** Specifies a sub-vector to work on */
void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
void setZero();
void restart();
Scalar& coeffRef(Index i);
Scalar& coeff(Index i);
class Iterator;
~AmbiVector() { delete[] m_buffer; }
void resize(Index size)
{
if (m_allocatedSize < size)
reallocate(size);
m_size = convert_index(size);
}
StorageIndex size() const { return m_size; }
protected:
StorageIndex convert_index(Index idx)
{
return internal::convert_index<StorageIndex>(idx);
}
void reallocate(Index size)
{
// if the size of the matrix is not too large, let's allocate a bit more than needed such
// that we can handle dense vector even in sparse mode.
delete[] m_buffer;
if (size<1000)
{
Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[allocSize];
}
else
{
m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[size];
}
m_size = convert_index(size);
m_start = 0;
m_end = m_size;
}
void reallocateSparse()
{
Index copyElements = m_allocatedElements;
m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
Scalar* newBuffer = new Scalar[allocSize];
std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
delete[] m_buffer;
m_buffer = newBuffer;
}
protected:
// element type of the linked list
struct ListEl
{
StorageIndex next;
StorageIndex index;
Scalar value;
};
// used to store data in both mode
Scalar* m_buffer;
Scalar m_zero;
StorageIndex m_size;
StorageIndex m_start;
StorageIndex m_end;
StorageIndex m_allocatedSize;
StorageIndex m_allocatedElements;
StorageIndex m_mode;
// linked list mode
StorageIndex m_llStart;
StorageIndex m_llCurrent;
StorageIndex m_llSize;
};
/** \returns the number of non zeros in the current sub vector */
template<typename _Scalar,typename _StorageIndex>
Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
{
if (m_mode==IsSparse)
return m_llSize;
else
return m_end - m_start;
}
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
{
if (estimatedDensity>0.1)
init(IsDense);
else
init(IsSparse);
}
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
{
m_mode = mode;
// This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
// if (m_mode==IsSparse)
{
m_llSize = 0;
m_llStart = -1;
}
}
/** Must be called whenever we might perform a write access
* with an index smaller than the previous one.
*
* Don't worry, this function is extremely cheap.
*/
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::restart()
{
m_llCurrent = m_llStart;
}
/** Set all coefficients of current subvector to zero */
template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::setZero()
{
if (m_mode==IsDense)
{
for (Index i=m_start; i<m_end; ++i)
m_buffer[i] = Scalar(0);
}
else
{
eigen_assert(m_mode==IsSparse);
m_llSize = 0;
m_llStart = -1;
}
}
template<typename _Scalar,typename _StorageIndex>
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
// TODO factorize the following code to reduce code generation
eigen_assert(m_mode==IsSparse);
if (m_llSize==0)
{
// this is the first element
m_llStart = 0;
m_llCurrent = 0;
++m_llSize;
llElements[0].value = Scalar(0);
llElements[0].index = convert_index(i);
llElements[0].next = -1;
return llElements[0].value;
}
else if (i<llElements[m_llStart].index)
{
// this is going to be the new first element of the list
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
el.index = convert_index(i);
el.next = m_llStart;
m_llStart = m_llSize;
++m_llSize;
m_llCurrent = m_llStart;
return el.value;
}
else
{
StorageIndex nextel = llElements[m_llCurrent].next;
eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
while (nextel >= 0 && llElements[nextel].index<=i)
{
m_llCurrent = nextel;
nextel = llElements[nextel].next;
}
if (llElements[m_llCurrent].index==i)
{
// the coefficient already exists and we found it !
return llElements[m_llCurrent].value;
}
else
{
if (m_llSize>=m_allocatedElements)
{
reallocateSparse();
llElements = reinterpret_cast<ListEl*>(m_buffer);
}
eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
// let's insert a new coefficient
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
el.index = convert_index(i);
el.next = llElements[m_llCurrent].next;
llElements[m_llCurrent].next = m_llSize;
++m_llSize;
return el.value;
}
}
}
}
template<typename _Scalar,typename _StorageIndex>
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
eigen_assert(m_mode==IsSparse);
if ((m_llSize==0) || (i<llElements[m_llStart].index))
{
return m_zero;
}
else
{
Index elid = m_llStart;
while (elid >= 0 && llElements[elid].index<i)
elid = llElements[elid].next;
if (llElements[elid].index==i)
return llElements[m_llCurrent].value;
else
return m_zero;
}
}
}
/** Iterator over the nonzero coefficients */
template<typename _Scalar,typename _StorageIndex>
class AmbiVector<_Scalar,_StorageIndex>::Iterator
{
public:
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
/** Default constructor
* \param vec the vector on which we iterate
* \param epsilon the minimal value used to prune zero coefficients.
* In practice, all coefficients having a magnitude smaller than \a epsilon
* are skipped.
*/
explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
: m_vector(vec)
{
using std::abs;
m_epsilon = epsilon;
m_isDense = m_vector.m_mode==IsDense;
if (m_isDense)
{
m_currentEl = 0; // this is to avoid a compilation warning
m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = m_vector.m_start-1;
++(*this);
}
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
m_currentEl = m_vector.m_llStart;
while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
m_currentEl = llElements[m_currentEl].next;
if (m_currentEl<0)
{
m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = -1;
}
else
{
m_cachedIndex = llElements[m_currentEl].index;
m_cachedValue = llElements[m_currentEl].value;
}
}
}
StorageIndex index() const { return m_cachedIndex; }
Scalar value() const { return m_cachedValue; }
operator bool() const { return m_cachedIndex>=0; }
Iterator& operator++()
{
using std::abs;
if (m_isDense)
{
do {
++m_cachedIndex;
} while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
if (m_cachedIndex<m_vector.m_end)
m_cachedValue = m_vector.m_buffer[m_cachedIndex];
else
m_cachedIndex=-1;
}
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
do {
m_currentEl = llElements[m_currentEl].next;
} while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
if (m_currentEl<0)
{
m_cachedIndex = -1;
}
else
{
m_cachedIndex = llElements[m_currentEl].index;
m_cachedValue = llElements[m_currentEl].value;
}
}
return *this;
}
protected:
const AmbiVector& m_vector; // the target vector
StorageIndex m_currentEl; // the current element in sparse/linked-list mode
RealScalar m_epsilon; // epsilon used to prune zero coefficients
StorageIndex m_cachedIndex; // current coordinate
Scalar m_cachedValue; // current value
bool m_isDense; // mode of the vector
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_AMBIVECTOR_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseDiagonalProduct.h
|
.h
| 5,808
| 139
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
namespace Eigen {
// The product of a diagonal matrix with a sparse matrix can be easily
// implemented using expression template.
// We have two consider very different cases:
// 1 - diag * row-major sparse
// => each inner vector <=> scalar * sparse vector product
// => so we can reuse CwiseUnaryOp::InnerIterator
// 2 - diag * col-major sparse
// => each inner vector <=> densevector * sparse vector cwise product
// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
// for that particular case
// The two other cases are symmetric.
namespace internal {
enum {
SDP_AsScalarProduct,
SDP_AsCwiseProduct
};
template<typename SparseXprType, typename DiagonalCoeffType, int SDP_Tag>
struct sparse_diagonal_product_evaluator;
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, DiagonalShape, SparseShape>
: public sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct>
{
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
enum { CoeffReadCost = HugeCost, Flags = Rhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
typedef sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct> Base;
explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {}
};
template<typename Lhs, typename Rhs, int ProductTag>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseShape, DiagonalShape>
: public sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct>
{
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
enum { CoeffReadCost = HugeCost, Flags = Lhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
typedef sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> Base;
explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal().transpose()) {}
};
template<typename SparseXprType, typename DiagonalCoeffType>
struct sparse_diagonal_product_evaluator<SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct>
{
protected:
typedef typename evaluator<SparseXprType>::InnerIterator SparseXprInnerIterator;
typedef typename SparseXprType::Scalar Scalar;
public:
class InnerIterator : public SparseXprInnerIterator
{
public:
InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
: SparseXprInnerIterator(xprEval.m_sparseXprImpl, outer),
m_coeff(xprEval.m_diagCoeffImpl.coeff(outer))
{}
EIGEN_STRONG_INLINE Scalar value() const { return m_coeff * SparseXprInnerIterator::value(); }
protected:
typename DiagonalCoeffType::Scalar m_coeff;
};
sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagonalCoeffType &diagCoeff)
: m_sparseXprImpl(sparseXpr), m_diagCoeffImpl(diagCoeff)
{}
Index nonZerosEstimate() const { return m_sparseXprImpl.nonZerosEstimate(); }
protected:
evaluator<SparseXprType> m_sparseXprImpl;
evaluator<DiagonalCoeffType> m_diagCoeffImpl;
};
template<typename SparseXprType, typename DiagCoeffType>
struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwiseProduct>
{
typedef typename SparseXprType::Scalar Scalar;
typedef typename SparseXprType::StorageIndex StorageIndex;
typedef typename nested_eval<DiagCoeffType,SparseXprType::IsRowMajor ? SparseXprType::RowsAtCompileTime
: SparseXprType::ColsAtCompileTime>::type DiagCoeffNested;
class InnerIterator
{
typedef typename evaluator<SparseXprType>::InnerIterator SparseXprIter;
public:
InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
: m_sparseIter(xprEval.m_sparseXprEval, outer), m_diagCoeffNested(xprEval.m_diagCoeffNested)
{}
inline Scalar value() const { return m_sparseIter.value() * m_diagCoeffNested.coeff(index()); }
inline StorageIndex index() const { return m_sparseIter.index(); }
inline Index outer() const { return m_sparseIter.outer(); }
inline Index col() const { return SparseXprType::IsRowMajor ? m_sparseIter.index() : m_sparseIter.outer(); }
inline Index row() const { return SparseXprType::IsRowMajor ? m_sparseIter.outer() : m_sparseIter.index(); }
EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_sparseIter; return *this; }
inline operator bool() const { return m_sparseIter; }
protected:
SparseXprIter m_sparseIter;
DiagCoeffNested m_diagCoeffNested;
};
sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff)
: m_sparseXprEval(sparseXpr), m_diagCoeffNested(diagCoeff)
{}
Index nonZerosEstimate() const { return m_sparseXprEval.nonZerosEstimate(); }
protected:
evaluator<SparseXprType> m_sparseXprEval;
DiagCoeffNested m_diagCoeffNested;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/SparseTriangularView.h
|
.h
| 6,437
| 190
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
#define EIGEN_SPARSE_TRIANGULARVIEW_H
namespace Eigen {
/** \ingroup SparseCore_Module
*
* \brief Base class for a triangular part in a \b sparse matrix
*
* This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.
* It extends class TriangularView with additional methods which are available for sparse expressions only.
*
* \sa class TriangularView, SparseMatrixBase::triangularView()
*/
template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse>
: public SparseMatrixBase<TriangularView<MatrixType,Mode> >
{
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
SkipLast = !SkipFirst,
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
typedef TriangularView<MatrixType,Mode> TriangularViewType;
protected:
// dummy solve function to make TriangularView happy.
void solve() const;
typedef SparseMatrixBase<TriangularViewType> Base;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))
dst = rhs;
this->solveInPlace(dst);
}
/** Applies the inverse of \c *this to the dense vector or matrix \a other, "in-place" */
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
/** Applies the inverse of \c *this to the sparse vector or matrix \a other, "in-place" */
template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
};
namespace internal {
template<typename ArgType, unsigned int Mode>
struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>
: evaluator_base<TriangularView<ArgType,Mode> >
{
typedef TriangularView<ArgType,Mode> XprType;
protected:
typedef typename XprType::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))
|| ((Mode&Upper) && (ArgType::Flags&RowMajorBit)),
SkipLast = !SkipFirst,
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
public:
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
class InnerIterator : public EvalIterator
{
typedef EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
: Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())
{
if(SkipFirst)
{
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
Base::operator++();
if(HasUnitDiag)
m_returnOne = m_containsDiag;
}
else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
m_returnOne = m_containsDiag;
}
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
if(HasUnitDiag && m_returnOne)
m_returnOne = false;
else
{
Base::operator++();
if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
m_returnOne = m_containsDiag;
}
}
return *this;
}
EIGEN_STRONG_INLINE operator bool() const
{
if(HasUnitDiag && m_returnOne)
return true;
if(SkipFirst) return Base::operator bool();
else
{
if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
else return (Base::operator bool() && this->index() <= this->outer());
}
}
// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
inline StorageIndex index() const
{
if(HasUnitDiag && m_returnOne) return internal::convert_index<StorageIndex>(Base::outer());
else return Base::index();
}
inline Scalar value() const
{
if(HasUnitDiag && m_returnOne) return Scalar(1);
else return Base::value();
}
protected:
bool m_returnOne;
bool m_containsDiag;
private:
Scalar& valueRef();
};
protected:
evaluator<ArgType> m_argImpl;
const ArgType& m_arg;
};
} // end namespace internal
template<typename Derived>
template<int Mode>
inline const TriangularView<const Derived, Mode>
SparseMatrixBase<Derived>::triangularView() const
{
return TriangularView<const Derived, Mode>(derived());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/Eigen/src/SparseCore/TriangularSolver.h
|
.h
| 9,657
| 316
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
#define EIGEN_SPARSETRIANGULARSOLVER_H
namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, int Mode,
int UpLo = (Mode & Lower)
? Lower
: (Mode & Upper)
? Upper
: -1,
int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
struct sparse_solve_triangular_selector;
// forward substitution, row-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=0; i<lhs.rows(); ++i)
{
Scalar tmp = other.coeff(i,col);
Scalar lastVal(0);
Index lastIndex = 0;
for(LhsIterator it(lhsEval, i); it; ++it)
{
lastVal = it.value();
lastIndex = it.index();
if(lastIndex==i)
break;
tmp -= lastVal * other.coeff(lastIndex,col);
}
if (Mode & UnitDiag)
other.coeffRef(i,col) = tmp;
else
{
eigen_assert(lastIndex==i);
other.coeffRef(i,col) = tmp/lastVal;
}
}
}
}
};
// backward substitution, row-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=lhs.rows()-1 ; i>=0 ; --i)
{
Scalar tmp = other.coeff(i,col);
Scalar l_ii(0);
LhsIterator it(lhsEval, i);
while(it && it.index()<i)
++it;
if(!(Mode & UnitDiag))
{
eigen_assert(it && it.index()==i);
l_ii = it.value();
++it;
}
else if (it && it.index() == i)
++it;
for(; it; ++it)
{
tmp -= it.value() * other.coeff(it.index(),col);
}
if (Mode & UnitDiag) other.coeffRef(i,col) = tmp;
else other.coeffRef(i,col) = tmp/l_ii;
}
}
}
};
// forward substitution, col-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=0; i<lhs.cols(); ++i)
{
Scalar& tmp = other.coeffRef(i,col);
if (tmp!=Scalar(0)) // optimization when other is actually sparse
{
LhsIterator it(lhsEval, i);
while(it && it.index()<i)
++it;
if(!(Mode & UnitDiag))
{
eigen_assert(it && it.index()==i);
tmp /= it.value();
}
if (it && it.index()==i)
++it;
for(; it; ++it)
other.coeffRef(it.index(), col) -= tmp * it.value();
}
}
}
}
};
// backward substitution, col-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef evaluator<Lhs> LhsEval;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
static void run(const Lhs& lhs, Rhs& other)
{
LhsEval lhsEval(lhs);
for(Index col=0 ; col<other.cols() ; ++col)
{
for(Index i=lhs.cols()-1; i>=0; --i)
{
Scalar& tmp = other.coeffRef(i,col);
if (tmp!=Scalar(0)) // optimization when other is actually sparse
{
if(!(Mode & UnitDiag))
{
// TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
LhsIterator it(lhsEval, i);
while(it && it.index()!=i)
++it;
eigen_assert(it && it.index()==i);
other.coeffRef(i,col) /= it.value();
}
LhsIterator it(lhsEval, i);
for(; it && it.index()<i; ++it)
other.coeffRef(it.index(), col) -= tmp * it.value();
}
}
}
}
};
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(MatrixBase<OtherDerived>& other) const
{
eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
typedef typename internal::conditional<copy,
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
OtherCopy otherCopy(other.derived());
internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(derived().nestedExpression(), otherCopy);
if (copy)
other = otherCopy;
}
#endif
// pure sparse path
namespace internal {
template<typename Lhs, typename Rhs, int Mode,
int UpLo = (Mode & Lower)
? Lower
: (Mode & Upper)
? Upper
: -1,
int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
struct sparse_solve_triangular_sparse_selector;
// forward substitution, col-major
template<typename Lhs, typename Rhs, int Mode, int UpLo>
struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
{
typedef typename Rhs::Scalar Scalar;
typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
typename traits<Rhs>::StorageIndex>::type StorageIndex;
static void run(const Lhs& lhs, Rhs& other)
{
const bool IsLower = (UpLo==Lower);
AmbiVector<Scalar,StorageIndex> tempVector(other.rows()*2);
tempVector.setBounds(0,other.rows());
Rhs res(other.rows(), other.cols());
res.reserve(other.nonZeros());
for(Index col=0 ; col<other.cols() ; ++col)
{
// FIXME estimate number of non zeros
tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
tempVector.setZero();
tempVector.restart();
for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
{
tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
}
for(Index i=IsLower?0:lhs.cols()-1;
IsLower?i<lhs.cols():i>=0;
i+=IsLower?1:-1)
{
tempVector.restart();
Scalar& ci = tempVector.coeffRef(i);
if (ci!=Scalar(0))
{
// find
typename Lhs::InnerIterator it(lhs, i);
if(!(Mode & UnitDiag))
{
if (IsLower)
{
eigen_assert(it.index()==i);
ci /= it.value();
}
else
ci /= lhs.coeff(i,i);
}
tempVector.restart();
if (IsLower)
{
if (it.index()==i)
++it;
for(; it; ++it)
tempVector.coeffRef(it.index()) -= ci * it.value();
}
else
{
for(; it && it.index()<i; ++it)
tempVector.coeffRef(it.index()) -= ci * it.value();
}
}
}
Index count = 0;
// FIXME compute a reference value to filter zeros
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)
{
++ count;
// std::cerr << "fill " << it.index() << ", " << col << "\n";
// std::cout << it.value() << " ";
// FIXME use insertBack
res.insert(it.index(), col) = it.value();
}
// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
}
res.finalize();
other = res.markAsRValue();
}
};
} // end namespace internal
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
{
eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
// typedef typename internal::conditional<copy,
// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
// OtherCopy otherCopy(other.derived());
internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(derived().nestedExpression(), other.derived());
// if (copy)
// other = otherCopy;
}
#endif
} // end namespace Eigen
#endif // EIGEN_SPARSETRIANGULARSOLVER_H
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/redux.cpp
|
.cpp
| 7,906
| 179
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define TEST_ENABLE_TEMPORARY_TRACKING
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
// ^^ see bug 1449
#include "main.h"
template<typename MatrixType> void matrixRedux(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
Index rows = m.rows();
Index cols = m.cols();
MatrixType m1 = MatrixType::Random(rows, cols);
// The entries of m1 are uniformly distributed in [0,1], so m1.prod() is very small. This may lead to test
// failures if we underflow into denormals. Thus, we scale so that entries are close to 1.
MatrixType m1_for_prod = MatrixType::Ones(rows, cols) + RealScalar(0.2) * m1;
VERIFY_IS_MUCH_SMALLER_THAN(MatrixType::Zero(rows, cols).sum(), Scalar(1));
VERIFY_IS_APPROX(MatrixType::Ones(rows, cols).sum(), Scalar(float(rows*cols))); // the float() here to shut up excessive MSVC warning about int->complex conversion being lossy
Scalar s(0), p(1), minc(numext::real(m1.coeff(0))), maxc(numext::real(m1.coeff(0)));
for(int j = 0; j < cols; j++)
for(int i = 0; i < rows; i++)
{
s += m1(i,j);
p *= m1_for_prod(i,j);
minc = (std::min)(numext::real(minc), numext::real(m1(i,j)));
maxc = (std::max)(numext::real(maxc), numext::real(m1(i,j)));
}
const Scalar mean = s/Scalar(RealScalar(rows*cols));
VERIFY_IS_APPROX(m1.sum(), s);
VERIFY_IS_APPROX(m1.mean(), mean);
VERIFY_IS_APPROX(m1_for_prod.prod(), p);
VERIFY_IS_APPROX(m1.real().minCoeff(), numext::real(minc));
VERIFY_IS_APPROX(m1.real().maxCoeff(), numext::real(maxc));
// test slice vectorization assuming assign is ok
Index r0 = internal::random<Index>(0,rows-1);
Index c0 = internal::random<Index>(0,cols-1);
Index r1 = internal::random<Index>(r0+1,rows)-r0;
Index c1 = internal::random<Index>(c0+1,cols)-c0;
VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).sum(), m1.block(r0,c0,r1,c1).eval().sum());
VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).mean(), m1.block(r0,c0,r1,c1).eval().mean());
VERIFY_IS_APPROX(m1_for_prod.block(r0,c0,r1,c1).prod(), m1_for_prod.block(r0,c0,r1,c1).eval().prod());
VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).real().minCoeff(), m1.block(r0,c0,r1,c1).real().eval().minCoeff());
VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).real().maxCoeff(), m1.block(r0,c0,r1,c1).real().eval().maxCoeff());
// regression for bug 1090
const int R1 = MatrixType::RowsAtCompileTime>=2 ? MatrixType::RowsAtCompileTime/2 : 6;
const int C1 = MatrixType::ColsAtCompileTime>=2 ? MatrixType::ColsAtCompileTime/2 : 6;
if(R1<=rows-r0 && C1<=cols-c0)
{
VERIFY_IS_APPROX( (m1.template block<R1,C1>(r0,c0).sum()), m1.block(r0,c0,R1,C1).sum() );
}
// test empty objects
VERIFY_IS_APPROX(m1.block(r0,c0,0,0).sum(), Scalar(0));
VERIFY_IS_APPROX(m1.block(r0,c0,0,0).prod(), Scalar(1));
// test nesting complex expression
VERIFY_EVALUATION_COUNT( (m1.matrix()*m1.matrix().transpose()).sum(), (MatrixType::IsVectorAtCompileTime && MatrixType::SizeAtCompileTime!=1 ? 0 : 1) );
Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> m2(rows,rows);
m2.setRandom();
VERIFY_EVALUATION_COUNT( ((m1.matrix()*m1.matrix().transpose())+m2).sum(),(MatrixType::IsVectorAtCompileTime && MatrixType::SizeAtCompileTime!=1 ? 0 : 1));
}
template<typename VectorType> void vectorRedux(const VectorType& w)
{
using std::abs;
typedef typename VectorType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index size = w.size();
VectorType v = VectorType::Random(size);
VectorType v_for_prod = VectorType::Ones(size) + Scalar(0.2) * v; // see comment above declaration of m1_for_prod
for(int i = 1; i < size; i++)
{
Scalar s(0), p(1);
RealScalar minc(numext::real(v.coeff(0))), maxc(numext::real(v.coeff(0)));
for(int j = 0; j < i; j++)
{
s += v[j];
p *= v_for_prod[j];
minc = (std::min)(minc, numext::real(v[j]));
maxc = (std::max)(maxc, numext::real(v[j]));
}
VERIFY_IS_MUCH_SMALLER_THAN(abs(s - v.head(i).sum()), Scalar(1));
VERIFY_IS_APPROX(p, v_for_prod.head(i).prod());
VERIFY_IS_APPROX(minc, v.real().head(i).minCoeff());
VERIFY_IS_APPROX(maxc, v.real().head(i).maxCoeff());
}
for(int i = 0; i < size-1; i++)
{
Scalar s(0), p(1);
RealScalar minc(numext::real(v.coeff(i))), maxc(numext::real(v.coeff(i)));
for(int j = i; j < size; j++)
{
s += v[j];
p *= v_for_prod[j];
minc = (std::min)(minc, numext::real(v[j]));
maxc = (std::max)(maxc, numext::real(v[j]));
}
VERIFY_IS_MUCH_SMALLER_THAN(abs(s - v.tail(size-i).sum()), Scalar(1));
VERIFY_IS_APPROX(p, v_for_prod.tail(size-i).prod());
VERIFY_IS_APPROX(minc, v.real().tail(size-i).minCoeff());
VERIFY_IS_APPROX(maxc, v.real().tail(size-i).maxCoeff());
}
for(int i = 0; i < size/2; i++)
{
Scalar s(0), p(1);
RealScalar minc(numext::real(v.coeff(i))), maxc(numext::real(v.coeff(i)));
for(int j = i; j < size-i; j++)
{
s += v[j];
p *= v_for_prod[j];
minc = (std::min)(minc, numext::real(v[j]));
maxc = (std::max)(maxc, numext::real(v[j]));
}
VERIFY_IS_MUCH_SMALLER_THAN(abs(s - v.segment(i, size-2*i).sum()), Scalar(1));
VERIFY_IS_APPROX(p, v_for_prod.segment(i, size-2*i).prod());
VERIFY_IS_APPROX(minc, v.real().segment(i, size-2*i).minCoeff());
VERIFY_IS_APPROX(maxc, v.real().segment(i, size-2*i).maxCoeff());
}
// test empty objects
VERIFY_IS_APPROX(v.head(0).sum(), Scalar(0));
VERIFY_IS_APPROX(v.tail(0).prod(), Scalar(1));
VERIFY_RAISES_ASSERT(v.head(0).mean());
VERIFY_RAISES_ASSERT(v.head(0).minCoeff());
VERIFY_RAISES_ASSERT(v.head(0).maxCoeff());
}
void test_redux()
{
// the max size cannot be too large, otherwise reduxion operations obviously generate large errors.
int maxsize = (std::min)(100,EIGEN_TEST_MAX_SIZE);
TEST_SET_BUT_UNUSED_VARIABLE(maxsize);
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( matrixRedux(Matrix<float, 1, 1>()) );
CALL_SUBTEST_1( matrixRedux(Array<float, 1, 1>()) );
CALL_SUBTEST_2( matrixRedux(Matrix2f()) );
CALL_SUBTEST_2( matrixRedux(Array2f()) );
CALL_SUBTEST_2( matrixRedux(Array22f()) );
CALL_SUBTEST_3( matrixRedux(Matrix4d()) );
CALL_SUBTEST_3( matrixRedux(Array4d()) );
CALL_SUBTEST_3( matrixRedux(Array44d()) );
CALL_SUBTEST_4( matrixRedux(MatrixXcf(internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
CALL_SUBTEST_4( matrixRedux(ArrayXXcf(internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
CALL_SUBTEST_5( matrixRedux(MatrixXd (internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
CALL_SUBTEST_5( matrixRedux(ArrayXXd (internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
CALL_SUBTEST_6( matrixRedux(MatrixXi (internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
CALL_SUBTEST_6( matrixRedux(ArrayXXi (internal::random<int>(1,maxsize), internal::random<int>(1,maxsize))) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_7( vectorRedux(Vector4f()) );
CALL_SUBTEST_7( vectorRedux(Array4f()) );
CALL_SUBTEST_5( vectorRedux(VectorXd(internal::random<int>(1,maxsize))) );
CALL_SUBTEST_5( vectorRedux(ArrayXd(internal::random<int>(1,maxsize))) );
CALL_SUBTEST_8( vectorRedux(VectorXf(internal::random<int>(1,maxsize))) );
CALL_SUBTEST_8( vectorRedux(ArrayXf(internal::random<int>(1,maxsize))) );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/numext.cpp
|
.cpp
| 1,704
| 55
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<typename T>
void check_abs() {
typedef typename NumTraits<T>::Real Real;
Real zero(0);
if(NumTraits<T>::IsSigned)
VERIFY_IS_EQUAL(numext::abs(-T(1)), T(1));
VERIFY_IS_EQUAL(numext::abs(T(0)), T(0));
VERIFY_IS_EQUAL(numext::abs(T(1)), T(1));
for(int k=0; k<g_repeat*100; ++k)
{
T x = internal::random<T>();
if(!internal::is_same<T,bool>::value)
x = x/Real(2);
if(NumTraits<T>::IsSigned)
{
VERIFY_IS_EQUAL(numext::abs(x), numext::abs(-x));
VERIFY( numext::abs(-x) >= zero );
}
VERIFY( numext::abs(x) >= zero );
VERIFY_IS_APPROX( numext::abs2(x), numext::abs2(numext::abs(x)) );
}
}
void test_numext() {
CALL_SUBTEST( check_abs<bool>() );
CALL_SUBTEST( check_abs<signed char>() );
CALL_SUBTEST( check_abs<unsigned char>() );
CALL_SUBTEST( check_abs<short>() );
CALL_SUBTEST( check_abs<unsigned short>() );
CALL_SUBTEST( check_abs<int>() );
CALL_SUBTEST( check_abs<unsigned int>() );
CALL_SUBTEST( check_abs<long>() );
CALL_SUBTEST( check_abs<unsigned long>() );
CALL_SUBTEST( check_abs<half>() );
CALL_SUBTEST( check_abs<float>() );
CALL_SUBTEST( check_abs<double>() );
CALL_SUBTEST( check_abs<long double>() );
CALL_SUBTEST( check_abs<std::complex<float> >() );
CALL_SUBTEST( check_abs<std::complex<double> >() );
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/dontalign.cpp
|
.cpp
| 2,221
| 63
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#if defined EIGEN_TEST_PART_1 || defined EIGEN_TEST_PART_2 || defined EIGEN_TEST_PART_3 || defined EIGEN_TEST_PART_4
#define EIGEN_DONT_ALIGN
#elif defined EIGEN_TEST_PART_5 || defined EIGEN_TEST_PART_6 || defined EIGEN_TEST_PART_7 || defined EIGEN_TEST_PART_8
#define EIGEN_DONT_ALIGN_STATICALLY
#endif
#include "main.h"
#include <Eigen/Dense>
template<typename MatrixType>
void dontalign(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
Index rows = m.rows();
Index cols = m.cols();
MatrixType a = MatrixType::Random(rows,cols);
SquareMatrixType square = SquareMatrixType::Random(rows,rows);
VectorType v = VectorType::Random(rows);
VERIFY_IS_APPROX(v, square * square.colPivHouseholderQr().solve(v));
square = square.inverse().eval();
a = square * a;
square = square*square;
v = square * v;
v = a.adjoint() * v;
VERIFY(square.determinant() != Scalar(0));
// bug 219: MapAligned() was giving an assert with EIGEN_DONT_ALIGN, because Map Flags were miscomputed
Scalar* array = internal::aligned_new<Scalar>(rows);
v = VectorType::MapAligned(array, rows);
internal::aligned_delete(array, rows);
}
void test_dontalign()
{
#if defined EIGEN_TEST_PART_1 || defined EIGEN_TEST_PART_5
dontalign(Matrix3d());
dontalign(Matrix4f());
#elif defined EIGEN_TEST_PART_2 || defined EIGEN_TEST_PART_6
dontalign(Matrix3cd());
dontalign(Matrix4cf());
#elif defined EIGEN_TEST_PART_3 || defined EIGEN_TEST_PART_7
dontalign(Matrix<float, 32, 32>());
dontalign(Matrix<std::complex<float>, 32, 32>());
#elif defined EIGEN_TEST_PART_4 || defined EIGEN_TEST_PART_8
dontalign(MatrixXd(32, 32));
dontalign(MatrixXcf(32, 32));
#endif
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/array_cwise.cpp
|
.cpp
| 19,209
| 491
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<typename ArrayType> void array(const ArrayType& m)
{
typedef typename ArrayType::Scalar Scalar;
typedef typename ArrayType::RealScalar RealScalar;
typedef Array<Scalar, ArrayType::RowsAtCompileTime, 1> ColVectorType;
typedef Array<Scalar, 1, ArrayType::ColsAtCompileTime> RowVectorType;
Index rows = m.rows();
Index cols = m.cols();
ArrayType m1 = ArrayType::Random(rows, cols),
m2 = ArrayType::Random(rows, cols),
m3(rows, cols);
ArrayType m4 = m1; // copy constructor
VERIFY_IS_APPROX(m1, m4);
ColVectorType cv1 = ColVectorType::Random(rows);
RowVectorType rv1 = RowVectorType::Random(cols);
Scalar s1 = internal::random<Scalar>(),
s2 = internal::random<Scalar>();
// scalar addition
VERIFY_IS_APPROX(m1 + s1, s1 + m1);
VERIFY_IS_APPROX(m1 + s1, ArrayType::Constant(rows,cols,s1) + m1);
VERIFY_IS_APPROX(s1 - m1, (-m1)+s1 );
VERIFY_IS_APPROX(m1 - s1, m1 - ArrayType::Constant(rows,cols,s1));
VERIFY_IS_APPROX(s1 - m1, ArrayType::Constant(rows,cols,s1) - m1);
VERIFY_IS_APPROX((m1*Scalar(2)) - s2, (m1+m1) - ArrayType::Constant(rows,cols,s2) );
m3 = m1;
m3 += s2;
VERIFY_IS_APPROX(m3, m1 + s2);
m3 = m1;
m3 -= s1;
VERIFY_IS_APPROX(m3, m1 - s1);
// scalar operators via Maps
m3 = m1;
ArrayType::Map(m1.data(), m1.rows(), m1.cols()) -= ArrayType::Map(m2.data(), m2.rows(), m2.cols());
VERIFY_IS_APPROX(m1, m3 - m2);
m3 = m1;
ArrayType::Map(m1.data(), m1.rows(), m1.cols()) += ArrayType::Map(m2.data(), m2.rows(), m2.cols());
VERIFY_IS_APPROX(m1, m3 + m2);
m3 = m1;
ArrayType::Map(m1.data(), m1.rows(), m1.cols()) *= ArrayType::Map(m2.data(), m2.rows(), m2.cols());
VERIFY_IS_APPROX(m1, m3 * m2);
m3 = m1;
m2 = ArrayType::Random(rows,cols);
m2 = (m2==0).select(1,m2);
ArrayType::Map(m1.data(), m1.rows(), m1.cols()) /= ArrayType::Map(m2.data(), m2.rows(), m2.cols());
VERIFY_IS_APPROX(m1, m3 / m2);
// reductions
VERIFY_IS_APPROX(m1.abs().colwise().sum().sum(), m1.abs().sum());
VERIFY_IS_APPROX(m1.abs().rowwise().sum().sum(), m1.abs().sum());
using std::abs;
VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.colwise().sum().sum() - m1.sum()), m1.abs().sum());
VERIFY_IS_MUCH_SMALLER_THAN(abs(m1.rowwise().sum().sum() - m1.sum()), m1.abs().sum());
if (!internal::isMuchSmallerThan(abs(m1.sum() - (m1+m2).sum()), m1.abs().sum(), test_precision<Scalar>()))
VERIFY_IS_NOT_APPROX(((m1+m2).rowwise().sum()).sum(), m1.sum());
VERIFY_IS_APPROX(m1.colwise().sum(), m1.colwise().redux(internal::scalar_sum_op<Scalar,Scalar>()));
// vector-wise ops
m3 = m1;
VERIFY_IS_APPROX(m3.colwise() += cv1, m1.colwise() + cv1);
m3 = m1;
VERIFY_IS_APPROX(m3.colwise() -= cv1, m1.colwise() - cv1);
m3 = m1;
VERIFY_IS_APPROX(m3.rowwise() += rv1, m1.rowwise() + rv1);
m3 = m1;
VERIFY_IS_APPROX(m3.rowwise() -= rv1, m1.rowwise() - rv1);
// Conversion from scalar
VERIFY_IS_APPROX((m3 = s1), ArrayType::Constant(rows,cols,s1));
VERIFY_IS_APPROX((m3 = 1), ArrayType::Constant(rows,cols,1));
VERIFY_IS_APPROX((m3.topLeftCorner(rows,cols) = 1), ArrayType::Constant(rows,cols,1));
typedef Array<Scalar,
ArrayType::RowsAtCompileTime==Dynamic?2:ArrayType::RowsAtCompileTime,
ArrayType::ColsAtCompileTime==Dynamic?2:ArrayType::ColsAtCompileTime,
ArrayType::Options> FixedArrayType;
FixedArrayType f1(s1);
VERIFY_IS_APPROX(f1, FixedArrayType::Constant(s1));
FixedArrayType f2(numext::real(s1));
VERIFY_IS_APPROX(f2, FixedArrayType::Constant(numext::real(s1)));
FixedArrayType f3((int)100*numext::real(s1));
VERIFY_IS_APPROX(f3, FixedArrayType::Constant((int)100*numext::real(s1)));
f1.setRandom();
FixedArrayType f4(f1.data());
VERIFY_IS_APPROX(f4, f1);
// pow
VERIFY_IS_APPROX(m1.pow(2), m1.square());
VERIFY_IS_APPROX(pow(m1,2), m1.square());
VERIFY_IS_APPROX(m1.pow(3), m1.cube());
VERIFY_IS_APPROX(pow(m1,3), m1.cube());
VERIFY_IS_APPROX((-m1).pow(3), -m1.cube());
VERIFY_IS_APPROX(pow(2*m1,3), 8*m1.cube());
ArrayType exponents = ArrayType::Constant(rows, cols, RealScalar(2));
VERIFY_IS_APPROX(Eigen::pow(m1,exponents), m1.square());
VERIFY_IS_APPROX(m1.pow(exponents), m1.square());
VERIFY_IS_APPROX(Eigen::pow(2*m1,exponents), 4*m1.square());
VERIFY_IS_APPROX((2*m1).pow(exponents), 4*m1.square());
VERIFY_IS_APPROX(Eigen::pow(m1,2*exponents), m1.square().square());
VERIFY_IS_APPROX(m1.pow(2*exponents), m1.square().square());
VERIFY_IS_APPROX(Eigen::pow(m1(0,0), exponents), ArrayType::Constant(rows,cols,m1(0,0)*m1(0,0)));
// Check possible conflicts with 1D ctor
typedef Array<Scalar, Dynamic, 1> OneDArrayType;
OneDArrayType o1(rows);
VERIFY(o1.size()==rows);
OneDArrayType o4((int)rows);
VERIFY(o4.size()==rows);
}
template<typename ArrayType> void comparisons(const ArrayType& m)
{
using std::abs;
typedef typename ArrayType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index rows = m.rows();
Index cols = m.cols();
Index r = internal::random<Index>(0, rows-1),
c = internal::random<Index>(0, cols-1);
ArrayType m1 = ArrayType::Random(rows, cols),
m2 = ArrayType::Random(rows, cols),
m3(rows, cols),
m4 = m1;
m4 = (m4.abs()==Scalar(0)).select(1,m4);
VERIFY(((m1 + Scalar(1)) > m1).all());
VERIFY(((m1 - Scalar(1)) < m1).all());
if (rows*cols>1)
{
m3 = m1;
m3(r,c) += 1;
VERIFY(! (m1 < m3).all() );
VERIFY(! (m1 > m3).all() );
}
VERIFY(!(m1 > m2 && m1 < m2).any());
VERIFY((m1 <= m2 || m1 >= m2).all());
// comparisons array to scalar
VERIFY( (m1 != (m1(r,c)+1) ).any() );
VERIFY( (m1 > (m1(r,c)-1) ).any() );
VERIFY( (m1 < (m1(r,c)+1) ).any() );
VERIFY( (m1 == m1(r,c) ).any() );
// comparisons scalar to array
VERIFY( ( (m1(r,c)+1) != m1).any() );
VERIFY( ( (m1(r,c)-1) < m1).any() );
VERIFY( ( (m1(r,c)+1) > m1).any() );
VERIFY( ( m1(r,c) == m1).any() );
// test Select
VERIFY_IS_APPROX( (m1<m2).select(m1,m2), m1.cwiseMin(m2) );
VERIFY_IS_APPROX( (m1>m2).select(m1,m2), m1.cwiseMax(m2) );
Scalar mid = (m1.cwiseAbs().minCoeff() + m1.cwiseAbs().maxCoeff())/Scalar(2);
for (int j=0; j<cols; ++j)
for (int i=0; i<rows; ++i)
m3(i,j) = abs(m1(i,j))<mid ? 0 : m1(i,j);
VERIFY_IS_APPROX( (m1.abs()<ArrayType::Constant(rows,cols,mid))
.select(ArrayType::Zero(rows,cols),m1), m3);
// shorter versions:
VERIFY_IS_APPROX( (m1.abs()<ArrayType::Constant(rows,cols,mid))
.select(0,m1), m3);
VERIFY_IS_APPROX( (m1.abs()>=ArrayType::Constant(rows,cols,mid))
.select(m1,0), m3);
// even shorter version:
VERIFY_IS_APPROX( (m1.abs()<mid).select(0,m1), m3);
// count
VERIFY(((m1.abs()+1)>RealScalar(0.1)).count() == rows*cols);
// and/or
VERIFY( (m1<RealScalar(0) && m1>RealScalar(0)).count() == 0);
VERIFY( (m1<RealScalar(0) || m1>=RealScalar(0)).count() == rows*cols);
RealScalar a = m1.abs().mean();
VERIFY( (m1<-a || m1>a).count() == (m1.abs()>a).count());
typedef Array<typename ArrayType::Index, Dynamic, 1> ArrayOfIndices;
// TODO allows colwise/rowwise for array
VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).colwise().count(), ArrayOfIndices::Constant(cols,rows).transpose());
VERIFY_IS_APPROX(((m1.abs()+1)>RealScalar(0.1)).rowwise().count(), ArrayOfIndices::Constant(rows, cols));
}
template<typename ArrayType> void array_real(const ArrayType& m)
{
using std::abs;
using std::sqrt;
typedef typename ArrayType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index rows = m.rows();
Index cols = m.cols();
ArrayType m1 = ArrayType::Random(rows, cols),
m2 = ArrayType::Random(rows, cols),
m3(rows, cols),
m4 = m1;
m4 = (m4.abs()==Scalar(0)).select(1,m4);
Scalar s1 = internal::random<Scalar>();
// these tests are mostly to check possible compilation issues with free-functions.
VERIFY_IS_APPROX(m1.sin(), sin(m1));
VERIFY_IS_APPROX(m1.cos(), cos(m1));
VERIFY_IS_APPROX(m1.tan(), tan(m1));
VERIFY_IS_APPROX(m1.asin(), asin(m1));
VERIFY_IS_APPROX(m1.acos(), acos(m1));
VERIFY_IS_APPROX(m1.atan(), atan(m1));
VERIFY_IS_APPROX(m1.sinh(), sinh(m1));
VERIFY_IS_APPROX(m1.cosh(), cosh(m1));
VERIFY_IS_APPROX(m1.tanh(), tanh(m1));
VERIFY_IS_APPROX(m1.arg(), arg(m1));
VERIFY_IS_APPROX(m1.round(), round(m1));
VERIFY_IS_APPROX(m1.floor(), floor(m1));
VERIFY_IS_APPROX(m1.ceil(), ceil(m1));
VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all());
VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all());
VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all());
VERIFY_IS_APPROX(m1.inverse(), inverse(m1));
VERIFY_IS_APPROX(m1.abs(), abs(m1));
VERIFY_IS_APPROX(m1.abs2(), abs2(m1));
VERIFY_IS_APPROX(m1.square(), square(m1));
VERIFY_IS_APPROX(m1.cube(), cube(m1));
VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval()));
VERIFY_IS_APPROX(m1.sign(), sign(m1));
// avoid NaNs with abs() so verification doesn't fail
m3 = m1.abs();
VERIFY_IS_APPROX(m3.sqrt(), sqrt(abs(m1)));
VERIFY_IS_APPROX(m3.rsqrt(), Scalar(1)/sqrt(abs(m1)));
VERIFY_IS_APPROX(rsqrt(m3), Scalar(1)/sqrt(abs(m1)));
VERIFY_IS_APPROX(m3.log(), log(m3));
VERIFY_IS_APPROX(m3.log1p(), log1p(m3));
VERIFY_IS_APPROX(m3.log10(), log10(m3));
VERIFY((!(m1>m2) == (m1<=m2)).all());
VERIFY_IS_APPROX(sin(m1.asin()), m1);
VERIFY_IS_APPROX(cos(m1.acos()), m1);
VERIFY_IS_APPROX(tan(m1.atan()), m1);
VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1)));
VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1)));
VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1))));
VERIFY_IS_APPROX(arg(m1), ((m1<0).template cast<Scalar>())*std::acos(-1.0));
VERIFY((round(m1) <= ceil(m1) && round(m1) >= floor(m1)).all());
VERIFY((Eigen::isnan)((m1*0.0)/0.0).all());
VERIFY((Eigen::isinf)(m4/0.0).all());
VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*0.0/0.0)) && (!(Eigen::isfinite)(m4/0.0))).all());
VERIFY_IS_APPROX(inverse(inverse(m1)),m1);
VERIFY((abs(m1) == m1 || abs(m1) == -m1).all());
VERIFY_IS_APPROX(m3, sqrt(abs2(m1)));
VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() );
VERIFY_IS_APPROX( m1*m1.sign(),m1.abs());
VERIFY_IS_APPROX(m1.sign() * m1.abs(), m1);
VERIFY_IS_APPROX(numext::abs2(numext::real(m1)) + numext::abs2(numext::imag(m1)), numext::abs2(m1));
VERIFY_IS_APPROX(numext::abs2(Eigen::real(m1)) + numext::abs2(Eigen::imag(m1)), numext::abs2(m1));
if(!NumTraits<Scalar>::IsComplex)
VERIFY_IS_APPROX(numext::real(m1), m1);
// shift argument of logarithm so that it is not zero
Scalar smallNumber = NumTraits<Scalar>::dummy_precision();
VERIFY_IS_APPROX((m3 + smallNumber).log() , log(abs(m1) + smallNumber));
VERIFY_IS_APPROX((m3 + smallNumber + 1).log() , log1p(abs(m1) + smallNumber));
VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2));
VERIFY_IS_APPROX(m1.exp(), exp(m1));
VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp());
VERIFY_IS_APPROX(m3.pow(RealScalar(0.5)), m3.sqrt());
VERIFY_IS_APPROX(pow(m3,RealScalar(0.5)), m3.sqrt());
VERIFY_IS_APPROX(m3.pow(RealScalar(-0.5)), m3.rsqrt());
VERIFY_IS_APPROX(pow(m3,RealScalar(-0.5)), m3.rsqrt());
VERIFY_IS_APPROX(log10(m3), log(m3)/log(10));
// scalar by array division
const RealScalar tiny = sqrt(std::numeric_limits<RealScalar>::epsilon());
s1 += Scalar(tiny);
m1 += ArrayType::Constant(rows,cols,Scalar(tiny));
VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse());
// check inplace transpose
m3 = m1;
m3.transposeInPlace();
VERIFY_IS_APPROX(m3, m1.transpose());
m3.transposeInPlace();
VERIFY_IS_APPROX(m3, m1);
}
template<typename ArrayType> void array_complex(const ArrayType& m)
{
typedef typename ArrayType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index rows = m.rows();
Index cols = m.cols();
ArrayType m1 = ArrayType::Random(rows, cols),
m2(rows, cols),
m4 = m1;
m4.real() = (m4.real().abs()==RealScalar(0)).select(RealScalar(1),m4.real());
m4.imag() = (m4.imag().abs()==RealScalar(0)).select(RealScalar(1),m4.imag());
Array<RealScalar, -1, -1> m3(rows, cols);
for (Index i = 0; i < m.rows(); ++i)
for (Index j = 0; j < m.cols(); ++j)
m2(i,j) = sqrt(m1(i,j));
// these tests are mostly to check possible compilation issues with free-functions.
VERIFY_IS_APPROX(m1.sin(), sin(m1));
VERIFY_IS_APPROX(m1.cos(), cos(m1));
VERIFY_IS_APPROX(m1.tan(), tan(m1));
VERIFY_IS_APPROX(m1.sinh(), sinh(m1));
VERIFY_IS_APPROX(m1.cosh(), cosh(m1));
VERIFY_IS_APPROX(m1.tanh(), tanh(m1));
VERIFY_IS_APPROX(m1.arg(), arg(m1));
VERIFY((m1.isNaN() == (Eigen::isnan)(m1)).all());
VERIFY((m1.isInf() == (Eigen::isinf)(m1)).all());
VERIFY((m1.isFinite() == (Eigen::isfinite)(m1)).all());
VERIFY_IS_APPROX(m1.inverse(), inverse(m1));
VERIFY_IS_APPROX(m1.log(), log(m1));
VERIFY_IS_APPROX(m1.log10(), log10(m1));
VERIFY_IS_APPROX(m1.abs(), abs(m1));
VERIFY_IS_APPROX(m1.abs2(), abs2(m1));
VERIFY_IS_APPROX(m1.sqrt(), sqrt(m1));
VERIFY_IS_APPROX(m1.square(), square(m1));
VERIFY_IS_APPROX(m1.cube(), cube(m1));
VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval()));
VERIFY_IS_APPROX(m1.sign(), sign(m1));
VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2));
VERIFY_IS_APPROX(m1.exp(), exp(m1));
VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp());
VERIFY_IS_APPROX(sinh(m1), 0.5*(exp(m1)-exp(-m1)));
VERIFY_IS_APPROX(cosh(m1), 0.5*(exp(m1)+exp(-m1)));
VERIFY_IS_APPROX(tanh(m1), (0.5*(exp(m1)-exp(-m1)))/(0.5*(exp(m1)+exp(-m1))));
for (Index i = 0; i < m.rows(); ++i)
for (Index j = 0; j < m.cols(); ++j)
m3(i,j) = std::atan2(m1(i,j).imag(), m1(i,j).real());
VERIFY_IS_APPROX(arg(m1), m3);
std::complex<RealScalar> zero(0.0,0.0);
VERIFY((Eigen::isnan)(m1*zero/zero).all());
#if EIGEN_COMP_MSVC
// msvc complex division is not robust
VERIFY((Eigen::isinf)(m4/RealScalar(0)).all());
#else
#if EIGEN_COMP_CLANG
// clang's complex division is notoriously broken too
if((numext::isinf)(m4(0,0)/RealScalar(0))) {
#endif
VERIFY((Eigen::isinf)(m4/zero).all());
#if EIGEN_COMP_CLANG
}
else
{
VERIFY((Eigen::isinf)(m4.real()/zero.real()).all());
}
#endif
#endif // MSVC
VERIFY(((Eigen::isfinite)(m1) && (!(Eigen::isfinite)(m1*zero/zero)) && (!(Eigen::isfinite)(m1/zero))).all());
VERIFY_IS_APPROX(inverse(inverse(m1)),m1);
VERIFY_IS_APPROX(conj(m1.conjugate()), m1);
VERIFY_IS_APPROX(abs(m1), sqrt(square(m1.real())+square(m1.imag())));
VERIFY_IS_APPROX(abs(m1), sqrt(abs2(m1)));
VERIFY_IS_APPROX(log10(m1), log(m1)/log(10));
VERIFY_IS_APPROX( m1.sign(), -(-m1).sign() );
VERIFY_IS_APPROX( m1.sign() * m1.abs(), m1);
// scalar by array division
Scalar s1 = internal::random<Scalar>();
const RealScalar tiny = std::sqrt(std::numeric_limits<RealScalar>::epsilon());
s1 += Scalar(tiny);
m1 += ArrayType::Constant(rows,cols,Scalar(tiny));
VERIFY_IS_APPROX(s1/m1, s1 * m1.inverse());
// check inplace transpose
m2 = m1;
m2.transposeInPlace();
VERIFY_IS_APPROX(m2, m1.transpose());
m2.transposeInPlace();
VERIFY_IS_APPROX(m2, m1);
}
template<typename ArrayType> void min_max(const ArrayType& m)
{
typedef typename ArrayType::Scalar Scalar;
Index rows = m.rows();
Index cols = m.cols();
ArrayType m1 = ArrayType::Random(rows, cols);
// min/max with array
Scalar maxM1 = m1.maxCoeff();
Scalar minM1 = m1.minCoeff();
VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)(ArrayType::Constant(rows,cols, minM1)));
VERIFY_IS_APPROX(m1, (m1.min)(ArrayType::Constant(rows,cols, maxM1)));
VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)(ArrayType::Constant(rows,cols, maxM1)));
VERIFY_IS_APPROX(m1, (m1.max)(ArrayType::Constant(rows,cols, minM1)));
// min/max with scalar input
VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, minM1), (m1.min)( minM1));
VERIFY_IS_APPROX(m1, (m1.min)( maxM1));
VERIFY_IS_APPROX(ArrayType::Constant(rows,cols, maxM1), (m1.max)( maxM1));
VERIFY_IS_APPROX(m1, (m1.max)( minM1));
}
void test_array_cwise()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( array(Array<float, 1, 1>()) );
CALL_SUBTEST_2( array(Array22f()) );
CALL_SUBTEST_3( array(Array44d()) );
CALL_SUBTEST_4( array(ArrayXXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_5( array(ArrayXXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_6( array(ArrayXXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( comparisons(Array<float, 1, 1>()) );
CALL_SUBTEST_2( comparisons(Array22f()) );
CALL_SUBTEST_3( comparisons(Array44d()) );
CALL_SUBTEST_5( comparisons(ArrayXXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_6( comparisons(ArrayXXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( min_max(Array<float, 1, 1>()) );
CALL_SUBTEST_2( min_max(Array22f()) );
CALL_SUBTEST_3( min_max(Array44d()) );
CALL_SUBTEST_5( min_max(ArrayXXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_6( min_max(ArrayXXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( array_real(Array<float, 1, 1>()) );
CALL_SUBTEST_2( array_real(Array22f()) );
CALL_SUBTEST_3( array_real(Array44d()) );
CALL_SUBTEST_5( array_real(ArrayXXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_4( array_complex(ArrayXXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<int>::type, int >::value));
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<float>::type, float >::value));
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<Array2i>::type, ArrayBase<Array2i> >::value));
typedef CwiseUnaryOp<internal::scalar_abs_op<double>, ArrayXd > Xpr;
VERIFY((internal::is_same< internal::global_math_functions_filtering_base<Xpr>::type,
ArrayBase<Xpr>
>::value));
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/rand.cpp
|
.cpp
| 4,365
| 119
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
typedef long long int64;
template<typename Scalar> Scalar check_in_range(Scalar x, Scalar y)
{
Scalar r = internal::random<Scalar>(x,y);
VERIFY(r>=x);
if(y>=x)
{
VERIFY(r<=y);
}
return r;
}
template<typename Scalar> void check_all_in_range(Scalar x, Scalar y)
{
Array<int,1,Dynamic> mask(y-x+1);
mask.fill(0);
long n = (y-x+1)*32;
for(long k=0; k<n; ++k)
{
mask( check_in_range(x,y)-x )++;
}
for(Index i=0; i<mask.size(); ++i)
if(mask(i)==0)
std::cout << "WARNING: value " << x+i << " not reached." << std::endl;
VERIFY( (mask>0).all() );
}
template<typename Scalar> void check_histogram(Scalar x, Scalar y, int bins)
{
Array<int,1,Dynamic> hist(bins);
hist.fill(0);
int f = 100000;
int n = bins*f;
int64 range = int64(y)-int64(x);
int divisor = int((range+1)/bins);
assert(((range+1)%bins)==0);
for(int k=0; k<n; ++k)
{
Scalar r = check_in_range(x,y);
hist( int((int64(r)-int64(x))/divisor) )++;
}
VERIFY( (((hist.cast<double>()/double(f))-1.0).abs()<0.02).all() );
}
void test_rand()
{
long long_ref = NumTraits<long>::highest()/10;
signed char char_offset = (std::min)(g_repeat,64);
signed char short_offset = (std::min)(g_repeat,16000);
for(int i = 0; i < g_repeat*10000; i++) {
CALL_SUBTEST(check_in_range<float>(10,11));
CALL_SUBTEST(check_in_range<float>(1.24234523,1.24234523));
CALL_SUBTEST(check_in_range<float>(-1,1));
CALL_SUBTEST(check_in_range<float>(-1432.2352,-1432.2352));
CALL_SUBTEST(check_in_range<double>(10,11));
CALL_SUBTEST(check_in_range<double>(1.24234523,1.24234523));
CALL_SUBTEST(check_in_range<double>(-1,1));
CALL_SUBTEST(check_in_range<double>(-1432.2352,-1432.2352));
CALL_SUBTEST(check_in_range<int>(0,-1));
CALL_SUBTEST(check_in_range<short>(0,-1));
CALL_SUBTEST(check_in_range<long>(0,-1));
CALL_SUBTEST(check_in_range<int>(-673456,673456));
CALL_SUBTEST(check_in_range<int>(-RAND_MAX+10,RAND_MAX-10));
CALL_SUBTEST(check_in_range<short>(-24345,24345));
CALL_SUBTEST(check_in_range<long>(-long_ref,long_ref));
}
CALL_SUBTEST(check_all_in_range<signed char>(11,11));
CALL_SUBTEST(check_all_in_range<signed char>(11,11+char_offset));
CALL_SUBTEST(check_all_in_range<signed char>(-5,5));
CALL_SUBTEST(check_all_in_range<signed char>(-11-char_offset,-11));
CALL_SUBTEST(check_all_in_range<signed char>(-126,-126+char_offset));
CALL_SUBTEST(check_all_in_range<signed char>(126-char_offset,126));
CALL_SUBTEST(check_all_in_range<signed char>(-126,126));
CALL_SUBTEST(check_all_in_range<short>(11,11));
CALL_SUBTEST(check_all_in_range<short>(11,11+short_offset));
CALL_SUBTEST(check_all_in_range<short>(-5,5));
CALL_SUBTEST(check_all_in_range<short>(-11-short_offset,-11));
CALL_SUBTEST(check_all_in_range<short>(-24345,-24345+short_offset));
CALL_SUBTEST(check_all_in_range<short>(24345,24345+short_offset));
CALL_SUBTEST(check_all_in_range<int>(11,11));
CALL_SUBTEST(check_all_in_range<int>(11,11+g_repeat));
CALL_SUBTEST(check_all_in_range<int>(-5,5));
CALL_SUBTEST(check_all_in_range<int>(-11-g_repeat,-11));
CALL_SUBTEST(check_all_in_range<int>(-673456,-673456+g_repeat));
CALL_SUBTEST(check_all_in_range<int>(673456,673456+g_repeat));
CALL_SUBTEST(check_all_in_range<long>(11,11));
CALL_SUBTEST(check_all_in_range<long>(11,11+g_repeat));
CALL_SUBTEST(check_all_in_range<long>(-5,5));
CALL_SUBTEST(check_all_in_range<long>(-11-g_repeat,-11));
CALL_SUBTEST(check_all_in_range<long>(-long_ref,-long_ref+g_repeat));
CALL_SUBTEST(check_all_in_range<long>( long_ref, long_ref+g_repeat));
CALL_SUBTEST(check_histogram<int>(-5,5,11));
int bins = 100;
CALL_SUBTEST(check_histogram<int>(-3333,-3333+bins*(3333/bins)-1,bins));
bins = 1000;
CALL_SUBTEST(check_histogram<int>(-RAND_MAX+10,-RAND_MAX+10+bins*(RAND_MAX/bins)-1,bins));
CALL_SUBTEST(check_histogram<int>(-RAND_MAX+10,-int64(RAND_MAX)+10+bins*(2*int64(RAND_MAX)/bins)-1,bins));
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/corners.cpp
|
.cpp
| 6,440
| 118
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#define COMPARE_CORNER(A,B) \
VERIFY_IS_EQUAL(matrix.A, matrix.B); \
VERIFY_IS_EQUAL(const_matrix.A, const_matrix.B);
template<typename MatrixType> void corners(const MatrixType& m)
{
Index rows = m.rows();
Index cols = m.cols();
Index r = internal::random<Index>(1,rows);
Index c = internal::random<Index>(1,cols);
MatrixType matrix = MatrixType::Random(rows,cols);
const MatrixType const_matrix = MatrixType::Random(rows,cols);
COMPARE_CORNER(topLeftCorner(r,c), block(0,0,r,c));
COMPARE_CORNER(topRightCorner(r,c), block(0,cols-c,r,c));
COMPARE_CORNER(bottomLeftCorner(r,c), block(rows-r,0,r,c));
COMPARE_CORNER(bottomRightCorner(r,c), block(rows-r,cols-c,r,c));
Index sr = internal::random<Index>(1,rows) - 1;
Index nr = internal::random<Index>(1,rows-sr);
Index sc = internal::random<Index>(1,cols) - 1;
Index nc = internal::random<Index>(1,cols-sc);
COMPARE_CORNER(topRows(r), block(0,0,r,cols));
COMPARE_CORNER(middleRows(sr,nr), block(sr,0,nr,cols));
COMPARE_CORNER(bottomRows(r), block(rows-r,0,r,cols));
COMPARE_CORNER(leftCols(c), block(0,0,rows,c));
COMPARE_CORNER(middleCols(sc,nc), block(0,sc,rows,nc));
COMPARE_CORNER(rightCols(c), block(0,cols-c,rows,c));
}
template<typename MatrixType, int CRows, int CCols, int SRows, int SCols> void corners_fixedsize()
{
MatrixType matrix = MatrixType::Random();
const MatrixType const_matrix = MatrixType::Random();
enum {
rows = MatrixType::RowsAtCompileTime,
cols = MatrixType::ColsAtCompileTime,
r = CRows,
c = CCols,
sr = SRows,
sc = SCols
};
VERIFY_IS_EQUAL((matrix.template topLeftCorner<r,c>()), (matrix.template block<r,c>(0,0)));
VERIFY_IS_EQUAL((matrix.template topRightCorner<r,c>()), (matrix.template block<r,c>(0,cols-c)));
VERIFY_IS_EQUAL((matrix.template bottomLeftCorner<r,c>()), (matrix.template block<r,c>(rows-r,0)));
VERIFY_IS_EQUAL((matrix.template bottomRightCorner<r,c>()), (matrix.template block<r,c>(rows-r,cols-c)));
VERIFY_IS_EQUAL((matrix.template topLeftCorner<r,c>()), (matrix.template topLeftCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((matrix.template topRightCorner<r,c>()), (matrix.template topRightCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((matrix.template bottomLeftCorner<r,c>()), (matrix.template bottomLeftCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((matrix.template bottomRightCorner<r,c>()), (matrix.template bottomRightCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((matrix.template topLeftCorner<r,c>()), (matrix.template topLeftCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((matrix.template topRightCorner<r,c>()), (matrix.template topRightCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((matrix.template bottomLeftCorner<r,c>()), (matrix.template bottomLeftCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((matrix.template bottomRightCorner<r,c>()), (matrix.template bottomRightCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((matrix.template topRows<r>()), (matrix.template block<r,cols>(0,0)));
VERIFY_IS_EQUAL((matrix.template middleRows<r>(sr)), (matrix.template block<r,cols>(sr,0)));
VERIFY_IS_EQUAL((matrix.template bottomRows<r>()), (matrix.template block<r,cols>(rows-r,0)));
VERIFY_IS_EQUAL((matrix.template leftCols<c>()), (matrix.template block<rows,c>(0,0)));
VERIFY_IS_EQUAL((matrix.template middleCols<c>(sc)), (matrix.template block<rows,c>(0,sc)));
VERIFY_IS_EQUAL((matrix.template rightCols<c>()), (matrix.template block<rows,c>(0,cols-c)));
VERIFY_IS_EQUAL((const_matrix.template topLeftCorner<r,c>()), (const_matrix.template block<r,c>(0,0)));
VERIFY_IS_EQUAL((const_matrix.template topRightCorner<r,c>()), (const_matrix.template block<r,c>(0,cols-c)));
VERIFY_IS_EQUAL((const_matrix.template bottomLeftCorner<r,c>()), (const_matrix.template block<r,c>(rows-r,0)));
VERIFY_IS_EQUAL((const_matrix.template bottomRightCorner<r,c>()), (const_matrix.template block<r,c>(rows-r,cols-c)));
VERIFY_IS_EQUAL((const_matrix.template topLeftCorner<r,c>()), (const_matrix.template topLeftCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template topRightCorner<r,c>()), (const_matrix.template topRightCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template bottomLeftCorner<r,c>()), (const_matrix.template bottomLeftCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template bottomRightCorner<r,c>()), (const_matrix.template bottomRightCorner<r,Dynamic>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template topLeftCorner<r,c>()), (const_matrix.template topLeftCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template topRightCorner<r,c>()), (const_matrix.template topRightCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template bottomLeftCorner<r,c>()), (const_matrix.template bottomLeftCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template bottomRightCorner<r,c>()), (const_matrix.template bottomRightCorner<Dynamic,c>(r,c)));
VERIFY_IS_EQUAL((const_matrix.template topRows<r>()), (const_matrix.template block<r,cols>(0,0)));
VERIFY_IS_EQUAL((const_matrix.template middleRows<r>(sr)), (const_matrix.template block<r,cols>(sr,0)));
VERIFY_IS_EQUAL((const_matrix.template bottomRows<r>()), (const_matrix.template block<r,cols>(rows-r,0)));
VERIFY_IS_EQUAL((const_matrix.template leftCols<c>()), (const_matrix.template block<rows,c>(0,0)));
VERIFY_IS_EQUAL((const_matrix.template middleCols<c>(sc)), (const_matrix.template block<rows,c>(0,sc)));
VERIFY_IS_EQUAL((const_matrix.template rightCols<c>()), (const_matrix.template block<rows,c>(0,cols-c)));
}
void test_corners()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( corners(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( corners(Matrix4d()) );
CALL_SUBTEST_3( corners(Matrix<int,10,12>()) );
CALL_SUBTEST_4( corners(MatrixXcf(5, 7)) );
CALL_SUBTEST_5( corners(MatrixXf(21, 20)) );
CALL_SUBTEST_1(( corners_fixedsize<Matrix<float, 1, 1>, 1, 1, 0, 0>() ));
CALL_SUBTEST_2(( corners_fixedsize<Matrix4d,2,2,1,1>() ));
CALL_SUBTEST_3(( corners_fixedsize<Matrix<int,10,12>,4,7,5,2>() ));
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/sparse_ref.cpp
|
.cpp
| 6,117
| 140
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 20015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This unit test cannot be easily written to work with EIGEN_DEFAULT_TO_ROW_MAJOR
#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
#undef EIGEN_DEFAULT_TO_ROW_MAJOR
#endif
static long int nb_temporaries;
inline void on_temporary_creation() {
// here's a great place to set a breakpoint when debugging failures in this test!
nb_temporaries++;
}
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN { on_temporary_creation(); }
#include "main.h"
#include <Eigen/SparseCore>
#define VERIFY_EVALUATION_COUNT(XPR,N) {\
nb_temporaries = 0; \
CALL_SUBTEST( XPR ); \
if(nb_temporaries!=N) std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; \
VERIFY( (#XPR) && nb_temporaries==N ); \
}
template<typename PlainObjectType> void check_const_correctness(const PlainObjectType&)
{
// verify that ref-to-const don't have LvalueBit
typedef typename internal::add_const<PlainObjectType>::type ConstPlainObjectType;
VERIFY( !(internal::traits<Ref<ConstPlainObjectType> >::Flags & LvalueBit) );
VERIFY( !(internal::traits<Ref<ConstPlainObjectType, Aligned> >::Flags & LvalueBit) );
VERIFY( !(Ref<ConstPlainObjectType>::Flags & LvalueBit) );
VERIFY( !(Ref<ConstPlainObjectType, Aligned>::Flags & LvalueBit) );
}
template<typename B>
EIGEN_DONT_INLINE void call_ref_1(Ref<SparseMatrix<float> > a, const B &b) { VERIFY_IS_EQUAL(a.toDense(),b.toDense()); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_2(const Ref<const SparseMatrix<float> >& a, const B &b) { VERIFY_IS_EQUAL(a.toDense(),b.toDense()); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_3(const Ref<const SparseMatrix<float>, StandardCompressedFormat>& a, const B &b) {
VERIFY(a.isCompressed());
VERIFY_IS_EQUAL(a.toDense(),b.toDense());
}
template<typename B>
EIGEN_DONT_INLINE void call_ref_4(Ref<SparseVector<float> > a, const B &b) { VERIFY_IS_EQUAL(a.toDense(),b.toDense()); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_5(const Ref<const SparseVector<float> >& a, const B &b) { VERIFY_IS_EQUAL(a.toDense(),b.toDense()); }
void call_ref()
{
SparseMatrix<float> A = MatrixXf::Random(10,10).sparseView(0.5,1);
SparseMatrix<float,RowMajor> B = MatrixXf::Random(10,10).sparseView(0.5,1);
SparseMatrix<float> C = MatrixXf::Random(10,10).sparseView(0.5,1);
C.reserve(VectorXi::Constant(C.outerSize(), 2));
const SparseMatrix<float>& Ac(A);
Block<SparseMatrix<float> > Ab(A,0,1, 3,3);
const Block<SparseMatrix<float> > Abc(A,0,1,3,3);
SparseVector<float> vc = VectorXf::Random(10).sparseView(0.5,1);
SparseVector<float,RowMajor> vr = VectorXf::Random(10).sparseView(0.5,1);
SparseMatrix<float> AA = A*A;
VERIFY_EVALUATION_COUNT( call_ref_1(A, A), 0);
// VERIFY_EVALUATION_COUNT( call_ref_1(Ac, Ac), 0); // does not compile on purpose
VERIFY_EVALUATION_COUNT( call_ref_2(A, A), 0);
VERIFY_EVALUATION_COUNT( call_ref_3(A, A), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(A.transpose(), A.transpose()), 1);
VERIFY_EVALUATION_COUNT( call_ref_3(A.transpose(), A.transpose()), 1);
VERIFY_EVALUATION_COUNT( call_ref_2(Ac,Ac), 0);
VERIFY_EVALUATION_COUNT( call_ref_3(Ac,Ac), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(A+A,2*Ac), 1);
VERIFY_EVALUATION_COUNT( call_ref_3(A+A,2*Ac), 1);
VERIFY_EVALUATION_COUNT( call_ref_2(B, B), 1);
VERIFY_EVALUATION_COUNT( call_ref_3(B, B), 1);
VERIFY_EVALUATION_COUNT( call_ref_2(B.transpose(), B.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_3(B.transpose(), B.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(A*A, AA), 3);
VERIFY_EVALUATION_COUNT( call_ref_3(A*A, AA), 3);
VERIFY(!C.isCompressed());
VERIFY_EVALUATION_COUNT( call_ref_3(C, C), 1);
Ref<SparseMatrix<float> > Ar(A);
VERIFY_IS_APPROX(Ar+Ar, A+A);
VERIFY_EVALUATION_COUNT( call_ref_1(Ar, A), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(Ar, A), 0);
Ref<SparseMatrix<float,RowMajor> > Br(B);
VERIFY_EVALUATION_COUNT( call_ref_1(Br.transpose(), Br.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(Br, Br), 1);
VERIFY_EVALUATION_COUNT( call_ref_2(Br.transpose(), Br.transpose()), 0);
Ref<const SparseMatrix<float> > Arc(A);
// VERIFY_EVALUATION_COUNT( call_ref_1(Arc, Arc), 0); // does not compile on purpose
VERIFY_EVALUATION_COUNT( call_ref_2(Arc, Arc), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(A.middleCols(1,3), A.middleCols(1,3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(A.col(2), A.col(2)), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(vc, vc), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(vr.transpose(), vr.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(vr, vr.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(A.block(1,1,3,3), A.block(1,1,3,3)), 1); // should be 0 (allocate starts/nnz only)
VERIFY_EVALUATION_COUNT( call_ref_4(vc, vc), 0);
VERIFY_EVALUATION_COUNT( call_ref_4(vr, vr.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(vc, vc), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(vr, vr.transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_4(A.col(2), A.col(2)), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(A.col(2), A.col(2)), 0);
// VERIFY_EVALUATION_COUNT( call_ref_4(A.row(2), A.row(2).transpose()), 1); // does not compile on purpose
VERIFY_EVALUATION_COUNT( call_ref_5(A.row(2), A.row(2).transpose()), 1);
}
void test_sparse_ref()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( check_const_correctness(SparseMatrix<float>()) );
CALL_SUBTEST_1( check_const_correctness(SparseMatrix<double,RowMajor>()) );
CALL_SUBTEST_2( call_ref() );
CALL_SUBTEST_3( check_const_correctness(SparseVector<float>()) );
CALL_SUBTEST_3( check_const_correctness(SparseVector<double,RowMajor>()) );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/sparse_block.cpp
|
.cpp
| 11,867
| 318
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "sparse.h"
template<typename T>
typename Eigen::internal::enable_if<(T::Flags&RowMajorBit)==RowMajorBit, typename T::RowXpr>::type
innervec(T& A, Index i)
{
return A.row(i);
}
template<typename T>
typename Eigen::internal::enable_if<(T::Flags&RowMajorBit)==0, typename T::ColXpr>::type
innervec(T& A, Index i)
{
return A.col(i);
}
template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& ref)
{
const Index rows = ref.rows();
const Index cols = ref.cols();
const Index inner = ref.innerSize();
const Index outer = ref.outerSize();
typedef typename SparseMatrixType::Scalar Scalar;
typedef typename SparseMatrixType::StorageIndex StorageIndex;
double density = (std::max)(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic,SparseMatrixType::IsRowMajor?RowMajor:ColMajor> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
typedef Matrix<Scalar,1,Dynamic> RowDenseVector;
typedef SparseVector<Scalar> SparseVectorType;
Scalar s1 = internal::random<Scalar>();
{
SparseMatrixType m(rows, cols);
DenseMatrix refMat = DenseMatrix::Zero(rows, cols);
initSparse<Scalar>(density, refMat, m);
VERIFY_IS_APPROX(m, refMat);
// test InnerIterators and Block expressions
for (int t=0; t<10; ++t)
{
Index j = internal::random<Index>(0,cols-2);
Index i = internal::random<Index>(0,rows-2);
Index w = internal::random<Index>(1,cols-j);
Index h = internal::random<Index>(1,rows-i);
VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w));
for(Index c=0; c<w; c++)
{
VERIFY_IS_APPROX(m.block(i,j,h,w).col(c), refMat.block(i,j,h,w).col(c));
for(Index r=0; r<h; r++)
{
VERIFY_IS_APPROX(m.block(i,j,h,w).col(c).coeff(r), refMat.block(i,j,h,w).col(c).coeff(r));
VERIFY_IS_APPROX(m.block(i,j,h,w).coeff(r,c), refMat.block(i,j,h,w).coeff(r,c));
}
}
for(Index r=0; r<h; r++)
{
VERIFY_IS_APPROX(m.block(i,j,h,w).row(r), refMat.block(i,j,h,w).row(r));
for(Index c=0; c<w; c++)
{
VERIFY_IS_APPROX(m.block(i,j,h,w).row(r).coeff(c), refMat.block(i,j,h,w).row(r).coeff(c));
VERIFY_IS_APPROX(m.block(i,j,h,w).coeff(r,c), refMat.block(i,j,h,w).coeff(r,c));
}
}
VERIFY_IS_APPROX(m.middleCols(j,w), refMat.middleCols(j,w));
VERIFY_IS_APPROX(m.middleRows(i,h), refMat.middleRows(i,h));
for(Index r=0; r<h; r++)
{
VERIFY_IS_APPROX(m.middleCols(j,w).row(r), refMat.middleCols(j,w).row(r));
VERIFY_IS_APPROX(m.middleRows(i,h).row(r), refMat.middleRows(i,h).row(r));
for(Index c=0; c<w; c++)
{
VERIFY_IS_APPROX(m.col(c).coeff(r), refMat.col(c).coeff(r));
VERIFY_IS_APPROX(m.row(r).coeff(c), refMat.row(r).coeff(c));
VERIFY_IS_APPROX(m.middleCols(j,w).coeff(r,c), refMat.middleCols(j,w).coeff(r,c));
VERIFY_IS_APPROX(m.middleRows(i,h).coeff(r,c), refMat.middleRows(i,h).coeff(r,c));
if(m.middleCols(j,w).coeff(r,c) != Scalar(0))
{
VERIFY_IS_APPROX(m.middleCols(j,w).coeffRef(r,c), refMat.middleCols(j,w).coeff(r,c));
}
if(m.middleRows(i,h).coeff(r,c) != Scalar(0))
{
VERIFY_IS_APPROX(m.middleRows(i,h).coeff(r,c), refMat.middleRows(i,h).coeff(r,c));
}
}
}
for(Index c=0; c<w; c++)
{
VERIFY_IS_APPROX(m.middleCols(j,w).col(c), refMat.middleCols(j,w).col(c));
VERIFY_IS_APPROX(m.middleRows(i,h).col(c), refMat.middleRows(i,h).col(c));
}
}
for(Index c=0; c<cols; c++)
{
VERIFY_IS_APPROX(m.col(c) + m.col(c), (m + m).col(c));
VERIFY_IS_APPROX(m.col(c) + m.col(c), refMat.col(c) + refMat.col(c));
}
for(Index r=0; r<rows; r++)
{
VERIFY_IS_APPROX(m.row(r) + m.row(r), (m + m).row(r));
VERIFY_IS_APPROX(m.row(r) + m.row(r), refMat.row(r) + refMat.row(r));
}
}
// test innerVector()
{
DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);
SparseMatrixType m2(rows, cols);
initSparse<Scalar>(density, refMat2, m2);
Index j0 = internal::random<Index>(0,outer-1);
Index j1 = internal::random<Index>(0,outer-1);
Index r0 = internal::random<Index>(0,rows-1);
Index c0 = internal::random<Index>(0,cols-1);
VERIFY_IS_APPROX(m2.innerVector(j0), innervec(refMat2,j0));
VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), innervec(refMat2,j0)+innervec(refMat2,j1));
m2.innerVector(j0) *= Scalar(2);
innervec(refMat2,j0) *= Scalar(2);
VERIFY_IS_APPROX(m2, refMat2);
m2.row(r0) *= Scalar(3);
refMat2.row(r0) *= Scalar(3);
VERIFY_IS_APPROX(m2, refMat2);
m2.col(c0) *= Scalar(4);
refMat2.col(c0) *= Scalar(4);
VERIFY_IS_APPROX(m2, refMat2);
m2.row(r0) /= Scalar(3);
refMat2.row(r0) /= Scalar(3);
VERIFY_IS_APPROX(m2, refMat2);
m2.col(c0) /= Scalar(4);
refMat2.col(c0) /= Scalar(4);
VERIFY_IS_APPROX(m2, refMat2);
SparseVectorType v1;
VERIFY_IS_APPROX(v1 = m2.col(c0) * 4, refMat2.col(c0)*4);
VERIFY_IS_APPROX(v1 = m2.row(r0) * 4, refMat2.row(r0).transpose()*4);
SparseMatrixType m3(rows,cols);
m3.reserve(VectorXi::Constant(outer,int(inner/2)));
for(Index j=0; j<outer; ++j)
for(Index k=0; k<(std::min)(j,inner); ++k)
m3.insertByOuterInner(j,k) = internal::convert_index<StorageIndex>(k+1);
for(Index j=0; j<(std::min)(outer, inner); ++j)
{
VERIFY(j==numext::real(m3.innerVector(j).nonZeros()));
if(j>0)
VERIFY(j==numext::real(m3.innerVector(j).lastCoeff()));
}
m3.makeCompressed();
for(Index j=0; j<(std::min)(outer, inner); ++j)
{
VERIFY(j==numext::real(m3.innerVector(j).nonZeros()));
if(j>0)
VERIFY(j==numext::real(m3.innerVector(j).lastCoeff()));
}
VERIFY(m3.innerVector(j0).nonZeros() == m3.transpose().innerVector(j0).nonZeros());
// m2.innerVector(j0) = 2*m2.innerVector(j1);
// refMat2.col(j0) = 2*refMat2.col(j1);
// VERIFY_IS_APPROX(m2, refMat2);
}
// test innerVectors()
{
DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);
SparseMatrixType m2(rows, cols);
initSparse<Scalar>(density, refMat2, m2);
if(internal::random<float>(0,1)>0.5f) m2.makeCompressed();
Index j0 = internal::random<Index>(0,outer-2);
Index j1 = internal::random<Index>(0,outer-2);
Index n0 = internal::random<Index>(1,outer-(std::max)(j0,j1));
if(SparseMatrixType::IsRowMajor)
VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(j0,0,n0,cols));
else
VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(0,j0,rows,n0));
if(SparseMatrixType::IsRowMajor)
VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
refMat2.middleRows(j0,n0)+refMat2.middleRows(j1,n0));
else
VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0));
VERIFY_IS_APPROX(m2, refMat2);
VERIFY(m2.innerVectors(j0,n0).nonZeros() == m2.transpose().innerVectors(j0,n0).nonZeros());
m2.innerVectors(j0,n0) = m2.innerVectors(j0,n0) + m2.innerVectors(j1,n0);
if(SparseMatrixType::IsRowMajor)
refMat2.middleRows(j0,n0) = (refMat2.middleRows(j0,n0) + refMat2.middleRows(j1,n0)).eval();
else
refMat2.middleCols(j0,n0) = (refMat2.middleCols(j0,n0) + refMat2.middleCols(j1,n0)).eval();
VERIFY_IS_APPROX(m2, refMat2);
}
// test generic blocks
{
DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);
SparseMatrixType m2(rows, cols);
initSparse<Scalar>(density, refMat2, m2);
Index j0 = internal::random<Index>(0,outer-2);
Index j1 = internal::random<Index>(0,outer-2);
Index n0 = internal::random<Index>(1,outer-(std::max)(j0,j1));
if(SparseMatrixType::IsRowMajor)
VERIFY_IS_APPROX(m2.block(j0,0,n0,cols), refMat2.block(j0,0,n0,cols));
else
VERIFY_IS_APPROX(m2.block(0,j0,rows,n0), refMat2.block(0,j0,rows,n0));
if(SparseMatrixType::IsRowMajor)
VERIFY_IS_APPROX(m2.block(j0,0,n0,cols)+m2.block(j1,0,n0,cols),
refMat2.block(j0,0,n0,cols)+refMat2.block(j1,0,n0,cols));
else
VERIFY_IS_APPROX(m2.block(0,j0,rows,n0)+m2.block(0,j1,rows,n0),
refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0));
Index i = internal::random<Index>(0,m2.outerSize()-1);
if(SparseMatrixType::IsRowMajor) {
m2.innerVector(i) = m2.innerVector(i) * s1;
refMat2.row(i) = refMat2.row(i) * s1;
VERIFY_IS_APPROX(m2,refMat2);
} else {
m2.innerVector(i) = m2.innerVector(i) * s1;
refMat2.col(i) = refMat2.col(i) * s1;
VERIFY_IS_APPROX(m2,refMat2);
}
Index r0 = internal::random<Index>(0,rows-2);
Index c0 = internal::random<Index>(0,cols-2);
Index r1 = internal::random<Index>(1,rows-r0);
Index c1 = internal::random<Index>(1,cols-c0);
VERIFY_IS_APPROX(DenseVector(m2.col(c0)), refMat2.col(c0));
VERIFY_IS_APPROX(m2.col(c0), refMat2.col(c0));
VERIFY_IS_APPROX(RowDenseVector(m2.row(r0)), refMat2.row(r0));
VERIFY_IS_APPROX(m2.row(r0), refMat2.row(r0));
VERIFY_IS_APPROX(m2.block(r0,c0,r1,c1), refMat2.block(r0,c0,r1,c1));
VERIFY_IS_APPROX((2*m2).block(r0,c0,r1,c1), (2*refMat2).block(r0,c0,r1,c1));
if(m2.nonZeros()>0)
{
VERIFY_IS_APPROX(m2, refMat2);
SparseMatrixType m3(rows, cols);
DenseMatrix refMat3(rows, cols); refMat3.setZero();
Index n = internal::random<Index>(1,10);
for(Index k=0; k<n; ++k)
{
Index o1 = internal::random<Index>(0,outer-1);
Index o2 = internal::random<Index>(0,outer-1);
if(SparseMatrixType::IsRowMajor)
{
m3.innerVector(o1) = m2.row(o2);
refMat3.row(o1) = refMat2.row(o2);
}
else
{
m3.innerVector(o1) = m2.col(o2);
refMat3.col(o1) = refMat2.col(o2);
}
if(internal::random<bool>())
m3.makeCompressed();
}
if(m3.nonZeros()>0)
VERIFY_IS_APPROX(m3, refMat3);
}
}
}
void test_sparse_block()
{
for(int i = 0; i < g_repeat; i++) {
int r = Eigen::internal::random<int>(1,200), c = Eigen::internal::random<int>(1,200);
if(Eigen::internal::random<int>(0,4) == 0) {
r = c; // check square matrices in 25% of tries
}
EIGEN_UNUSED_VARIABLE(r+c);
CALL_SUBTEST_1(( sparse_block(SparseMatrix<double>(1, 1)) ));
CALL_SUBTEST_1(( sparse_block(SparseMatrix<double>(8, 8)) ));
CALL_SUBTEST_1(( sparse_block(SparseMatrix<double>(r, c)) ));
CALL_SUBTEST_2(( sparse_block(SparseMatrix<std::complex<double>, ColMajor>(r, c)) ));
CALL_SUBTEST_2(( sparse_block(SparseMatrix<std::complex<double>, RowMajor>(r, c)) ));
CALL_SUBTEST_3(( sparse_block(SparseMatrix<double,ColMajor,long int>(r, c)) ));
CALL_SUBTEST_3(( sparse_block(SparseMatrix<double,RowMajor,long int>(r, c)) ));
r = Eigen::internal::random<int>(1,100);
c = Eigen::internal::random<int>(1,100);
if(Eigen::internal::random<int>(0,4) == 0) {
r = c; // check square matrices in 25% of tries
}
CALL_SUBTEST_4(( sparse_block(SparseMatrix<double,ColMajor,short int>(short(r), short(c))) ));
CALL_SUBTEST_4(( sparse_block(SparseMatrix<double,RowMajor,short int>(short(r), short(c))) ));
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/householder.cpp
|
.cpp
| 5,940
| 138
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/QR>
template<typename MatrixType> void householder(const MatrixType& m)
{
static bool even = true;
even = !even;
/* this test covers the following files:
Householder.h
*/
Index rows = m.rows();
Index cols = m.cols();
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, internal::decrement_size<MatrixType::RowsAtCompileTime>::ret, 1> EssentialVectorType;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
typedef Matrix<Scalar, Dynamic, MatrixType::ColsAtCompileTime> HBlockMatrixType;
typedef Matrix<Scalar, Dynamic, 1> HCoeffsVectorType;
typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime> TMatrixType;
Matrix<Scalar, EIGEN_SIZE_MAX(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime), 1> _tmp((std::max)(rows,cols));
Scalar* tmp = &_tmp.coeffRef(0,0);
Scalar beta;
RealScalar alpha;
EssentialVectorType essential;
VectorType v1 = VectorType::Random(rows), v2;
v2 = v1;
v1.makeHouseholder(essential, beta, alpha);
v1.applyHouseholderOnTheLeft(essential,beta,tmp);
VERIFY_IS_APPROX(v1.norm(), v2.norm());
if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(v1.tail(rows-1).norm(), v1.norm());
v1 = VectorType::Random(rows);
v2 = v1;
v1.applyHouseholderOnTheLeft(essential,beta,tmp);
VERIFY_IS_APPROX(v1.norm(), v2.norm());
MatrixType m1(rows, cols),
m2(rows, cols);
v1 = VectorType::Random(rows);
if(even) v1.tail(rows-1).setZero();
m1.colwise() = v1;
m2 = m1;
m1.col(0).makeHouseholder(essential, beta, alpha);
m1.applyHouseholderOnTheLeft(essential,beta,tmp);
VERIFY_IS_APPROX(m1.norm(), m2.norm());
if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(m1.block(1,0,rows-1,cols).norm(), m1.norm());
VERIFY_IS_MUCH_SMALLER_THAN(numext::imag(m1(0,0)), numext::real(m1(0,0)));
VERIFY_IS_APPROX(numext::real(m1(0,0)), alpha);
v1 = VectorType::Random(rows);
if(even) v1.tail(rows-1).setZero();
SquareMatrixType m3(rows,rows), m4(rows,rows);
m3.rowwise() = v1.transpose();
m4 = m3;
m3.row(0).makeHouseholder(essential, beta, alpha);
m3.applyHouseholderOnTheRight(essential,beta,tmp);
VERIFY_IS_APPROX(m3.norm(), m4.norm());
if(rows>=2) VERIFY_IS_MUCH_SMALLER_THAN(m3.block(0,1,rows,rows-1).norm(), m3.norm());
VERIFY_IS_MUCH_SMALLER_THAN(numext::imag(m3(0,0)), numext::real(m3(0,0)));
VERIFY_IS_APPROX(numext::real(m3(0,0)), alpha);
// test householder sequence on the left with a shift
Index shift = internal::random<Index>(0, std::max<Index>(rows-2,0));
Index brows = rows - shift;
m1.setRandom(rows, cols);
HBlockMatrixType hbm = m1.block(shift,0,brows,cols);
HouseholderQR<HBlockMatrixType> qr(hbm);
m2 = m1;
m2.block(shift,0,brows,cols) = qr.matrixQR();
HCoeffsVectorType hc = qr.hCoeffs().conjugate();
HouseholderSequence<MatrixType, HCoeffsVectorType> hseq(m2, hc);
hseq.setLength(hc.size()).setShift(shift);
VERIFY(hseq.length() == hc.size());
VERIFY(hseq.shift() == shift);
MatrixType m5 = m2;
m5.block(shift,0,brows,cols).template triangularView<StrictlyLower>().setZero();
VERIFY_IS_APPROX(hseq * m5, m1); // test applying hseq directly
m3 = hseq;
VERIFY_IS_APPROX(m3 * m5, m1); // test evaluating hseq to a dense matrix, then applying
SquareMatrixType hseq_mat = hseq;
SquareMatrixType hseq_mat_conj = hseq.conjugate();
SquareMatrixType hseq_mat_adj = hseq.adjoint();
SquareMatrixType hseq_mat_trans = hseq.transpose();
SquareMatrixType m6 = SquareMatrixType::Random(rows, rows);
VERIFY_IS_APPROX(hseq_mat.adjoint(), hseq_mat_adj);
VERIFY_IS_APPROX(hseq_mat.conjugate(), hseq_mat_conj);
VERIFY_IS_APPROX(hseq_mat.transpose(), hseq_mat_trans);
VERIFY_IS_APPROX(hseq_mat * m6, hseq_mat * m6);
VERIFY_IS_APPROX(hseq_mat.adjoint() * m6, hseq_mat_adj * m6);
VERIFY_IS_APPROX(hseq_mat.conjugate() * m6, hseq_mat_conj * m6);
VERIFY_IS_APPROX(hseq_mat.transpose() * m6, hseq_mat_trans * m6);
VERIFY_IS_APPROX(m6 * hseq_mat, m6 * hseq_mat);
VERIFY_IS_APPROX(m6 * hseq_mat.adjoint(), m6 * hseq_mat_adj);
VERIFY_IS_APPROX(m6 * hseq_mat.conjugate(), m6 * hseq_mat_conj);
VERIFY_IS_APPROX(m6 * hseq_mat.transpose(), m6 * hseq_mat_trans);
// test householder sequence on the right with a shift
TMatrixType tm2 = m2.transpose();
HouseholderSequence<TMatrixType, HCoeffsVectorType, OnTheRight> rhseq(tm2, hc);
rhseq.setLength(hc.size()).setShift(shift);
VERIFY_IS_APPROX(rhseq * m5, m1); // test applying rhseq directly
m3 = rhseq;
VERIFY_IS_APPROX(m3 * m5, m1); // test evaluating rhseq to a dense matrix, then applying
}
void test_householder()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( householder(Matrix<double,2,2>()) );
CALL_SUBTEST_2( householder(Matrix<float,2,3>()) );
CALL_SUBTEST_3( householder(Matrix<double,3,5>()) );
CALL_SUBTEST_4( householder(Matrix<float,4,4>()) );
CALL_SUBTEST_5( householder(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_6( householder(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_7( householder(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_8( householder(Matrix<double,1,1>()) );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/sparseqr.cpp
|
.cpp
| 3,815
| 129
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
#include "sparse.h"
#include <Eigen/SparseQR>
template<typename MatrixType,typename DenseMat>
int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 150)
{
eigen_assert(maxRows >= maxCols);
typedef typename MatrixType::Scalar Scalar;
int rows = internal::random<int>(1,maxRows);
int cols = internal::random<int>(1,maxCols);
double density = (std::max)(8./(rows*cols), 0.01);
A.resize(rows,cols);
dA.resize(rows,cols);
initSparse<Scalar>(density, dA, A,ForceNonZeroDiag);
A.makeCompressed();
int nop = internal::random<int>(0, internal::random<double>(0,1) > 0.5 ? cols/2 : 0);
for(int k=0; k<nop; ++k)
{
int j0 = internal::random<int>(0,cols-1);
int j1 = internal::random<int>(0,cols-1);
Scalar s = internal::random<Scalar>();
A.col(j0) = s * A.col(j1);
dA.col(j0) = s * dA.col(j1);
}
// if(rows<cols) {
// A.conservativeResize(cols,cols);
// dA.conservativeResize(cols,cols);
// dA.bottomRows(cols-rows).setZero();
// }
return rows;
}
template<typename Scalar> void test_sparseqr_scalar()
{
typedef SparseMatrix<Scalar,ColMajor> MatrixType;
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMat;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
MatrixType A;
DenseMat dA;
DenseVector refX,x,b;
SparseQR<MatrixType, COLAMDOrdering<int> > solver;
generate_sparse_rectangular_problem(A,dA);
b = dA * DenseVector::Random(A.cols());
solver.compute(A);
// Q should be MxM
VERIFY_IS_EQUAL(solver.matrixQ().rows(), A.rows());
VERIFY_IS_EQUAL(solver.matrixQ().cols(), A.rows());
// R should be MxN
VERIFY_IS_EQUAL(solver.matrixR().rows(), A.rows());
VERIFY_IS_EQUAL(solver.matrixR().cols(), A.cols());
// Q and R can be multiplied
DenseMat recoveredA = solver.matrixQ()
* DenseMat(solver.matrixR().template triangularView<Upper>())
* solver.colsPermutation().transpose();
VERIFY_IS_EQUAL(recoveredA.rows(), A.rows());
VERIFY_IS_EQUAL(recoveredA.cols(), A.cols());
// and in the full rank case the original matrix is recovered
if (solver.rank() == A.cols())
{
VERIFY_IS_APPROX(A, recoveredA);
}
if(internal::random<float>(0,1)>0.5f)
solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change.
if (solver.info() != Success)
{
std::cerr << "sparse QR factorization failed\n";
exit(0);
return;
}
x = solver.solve(b);
if (solver.info() != Success)
{
std::cerr << "sparse QR factorization failed\n";
exit(0);
return;
}
VERIFY_IS_APPROX(A * x, b);
//Compare with a dense QR solver
ColPivHouseholderQR<DenseMat> dqr(dA);
refX = dqr.solve(b);
VERIFY_IS_EQUAL(dqr.rank(), solver.rank());
if(solver.rank()==A.cols()) // full rank
VERIFY_IS_APPROX(x, refX);
// else
// VERIFY((dA * refX - b).norm() * 2 > (A * x - b).norm() );
// Compute explicitly the matrix Q
MatrixType Q, QtQ, idM;
Q = solver.matrixQ();
//Check ||Q' * Q - I ||
QtQ = Q * Q.adjoint();
idM.resize(Q.rows(), Q.rows()); idM.setIdentity();
VERIFY(idM.isApprox(QtQ));
// Q to dense
DenseMat dQ;
dQ = solver.matrixQ();
VERIFY_IS_APPROX(Q, dQ);
}
void test_sparseqr()
{
for(int i=0; i<g_repeat; ++i)
{
CALL_SUBTEST_1(test_sparseqr_scalar<double>());
CALL_SUBTEST_2(test_sparseqr_scalar<std::complex<double> >());
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/stddeque_overload.cpp
|
.cpp
| 4,772
| 159
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/StdDeque>
#include <Eigen/Geometry>
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Vector4f)
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Matrix2f)
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Matrix4f)
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Matrix4d)
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Affine3f)
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Affine3d)
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Quaternionf)
EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Quaterniond)
template<typename MatrixType>
void check_stddeque_matrix(const MatrixType& m)
{
typename MatrixType::Index rows = m.rows();
typename MatrixType::Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
std::deque<MatrixType> v(10, MatrixType::Zero(rows,cols)), w(20, y);
v[5] = x;
w[6] = v[5];
VERIFY_IS_APPROX(w[6], v[5]);
v = w;
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(w[i], v[i]);
}
v.resize(21);
v[20] = x;
VERIFY_IS_APPROX(v[20], x);
v.resize(22,y);
VERIFY_IS_APPROX(v[21], y);
v.push_back(x);
VERIFY_IS_APPROX(v[22], x);
// do a lot of push_back such that the deque gets internally resized
// (with memory reallocation)
MatrixType* ref = &w[0];
for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
v.push_back(w[i%w.size()]);
for(unsigned int i=23; i<v.size(); ++i)
{
VERIFY(v[i]==w[(i-23)%w.size()]);
}
}
template<typename TransformType>
void check_stddeque_transform(const TransformType&)
{
typedef typename TransformType::MatrixType MatrixType;
TransformType x(MatrixType::Random()), y(MatrixType::Random()), ti=TransformType::Identity();
std::deque<TransformType> v(10,ti), w(20, y);
v[5] = x;
w[6] = v[5];
VERIFY_IS_APPROX(w[6], v[5]);
v = w;
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(w[i], v[i]);
}
v.resize(21,ti);
v[20] = x;
VERIFY_IS_APPROX(v[20], x);
v.resize(22,y);
VERIFY_IS_APPROX(v[21], y);
v.push_back(x);
VERIFY_IS_APPROX(v[22], x);
// do a lot of push_back such that the deque gets internally resized
// (with memory reallocation)
TransformType* ref = &w[0];
for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
v.push_back(w[i%w.size()]);
for(unsigned int i=23; i<v.size(); ++i)
{
VERIFY(v[i].matrix()==w[(i-23)%w.size()].matrix());
}
}
template<typename QuaternionType>
void check_stddeque_quaternion(const QuaternionType&)
{
typedef typename QuaternionType::Coefficients Coefficients;
QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity();
std::deque<QuaternionType> v(10,qi), w(20, y);
v[5] = x;
w[6] = v[5];
VERIFY_IS_APPROX(w[6], v[5]);
v = w;
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(w[i], v[i]);
}
v.resize(21,qi);
v[20] = x;
VERIFY_IS_APPROX(v[20], x);
v.resize(22,y);
VERIFY_IS_APPROX(v[21], y);
v.push_back(x);
VERIFY_IS_APPROX(v[22], x);
// do a lot of push_back such that the deque gets internally resized
// (with memory reallocation)
QuaternionType* ref = &w[0];
for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
v.push_back(w[i%w.size()]);
for(unsigned int i=23; i<v.size(); ++i)
{
VERIFY(v[i].coeffs()==w[(i-23)%w.size()].coeffs());
}
}
void test_stddeque_overload()
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stddeque_matrix(Vector2f()));
CALL_SUBTEST_1(check_stddeque_matrix(Matrix3f()));
CALL_SUBTEST_2(check_stddeque_matrix(Matrix3d()));
// some vectorizable fixed sizes
CALL_SUBTEST_1(check_stddeque_matrix(Matrix2f()));
CALL_SUBTEST_1(check_stddeque_matrix(Vector4f()));
CALL_SUBTEST_1(check_stddeque_matrix(Matrix4f()));
CALL_SUBTEST_2(check_stddeque_matrix(Matrix4d()));
// some dynamic sizes
CALL_SUBTEST_3(check_stddeque_matrix(MatrixXd(1,1)));
CALL_SUBTEST_3(check_stddeque_matrix(VectorXd(20)));
CALL_SUBTEST_3(check_stddeque_matrix(RowVectorXf(20)));
CALL_SUBTEST_3(check_stddeque_matrix(MatrixXcf(10,10)));
// some Transform
CALL_SUBTEST_4(check_stddeque_transform(Affine2f())); // does not need the specialization (2+1)^2 = 9
CALL_SUBTEST_4(check_stddeque_transform(Affine3f()));
CALL_SUBTEST_4(check_stddeque_transform(Affine3d()));
// some Quaternion
CALL_SUBTEST_5(check_stddeque_quaternion(Quaternionf()));
CALL_SUBTEST_5(check_stddeque_quaternion(Quaterniond()));
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/mpl2only.cpp
|
.cpp
| 604
| 23
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_MPL2_ONLY
#include <Eigen/Dense>
#include <Eigen/SparseCore>
#include <Eigen/SparseLU>
#include <Eigen/SparseQR>
#include <Eigen/Sparse>
#include <Eigen/IterativeLinearSolvers>
#include <Eigen/Eigen>
int main()
{
return 0;
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/umfpack_support.cpp
|
.cpp
| 991
| 33
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS
#include "sparse_solver.h"
#include <Eigen/UmfPackSupport>
template<typename T> void test_umfpack_support_T()
{
UmfPackLU<SparseMatrix<T, ColMajor> > umfpack_colmajor;
UmfPackLU<SparseMatrix<T, RowMajor> > umfpack_rowmajor;
check_sparse_square_solving(umfpack_colmajor);
check_sparse_square_solving(umfpack_rowmajor);
check_sparse_square_determinant(umfpack_colmajor);
check_sparse_square_determinant(umfpack_rowmajor);
}
void test_umfpack_support()
{
CALL_SUBTEST_1(test_umfpack_support_T<double>());
CALL_SUBTEST_2(test_umfpack_support_T<std::complex<double> >());
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/dense_storage.cpp
|
.cpp
| 2,579
| 77
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2013 Hauke Heibel <hauke.heibel@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/Core>
template <typename T, int Rows, int Cols>
void dense_storage_copy()
{
static const int Size = ((Rows==Dynamic || Cols==Dynamic) ? Dynamic : Rows*Cols);
typedef DenseStorage<T,Size, Rows,Cols, 0> DenseStorageType;
const int rows = (Rows==Dynamic) ? 4 : Rows;
const int cols = (Cols==Dynamic) ? 3 : Cols;
const int size = rows*cols;
DenseStorageType reference(size, rows, cols);
T* raw_reference = reference.data();
for (int i=0; i<size; ++i)
raw_reference[i] = static_cast<T>(i);
DenseStorageType copied_reference(reference);
const T* raw_copied_reference = copied_reference.data();
for (int i=0; i<size; ++i)
VERIFY_IS_EQUAL(raw_reference[i], raw_copied_reference[i]);
}
template <typename T, int Rows, int Cols>
void dense_storage_assignment()
{
static const int Size = ((Rows==Dynamic || Cols==Dynamic) ? Dynamic : Rows*Cols);
typedef DenseStorage<T,Size, Rows,Cols, 0> DenseStorageType;
const int rows = (Rows==Dynamic) ? 4 : Rows;
const int cols = (Cols==Dynamic) ? 3 : Cols;
const int size = rows*cols;
DenseStorageType reference(size, rows, cols);
T* raw_reference = reference.data();
for (int i=0; i<size; ++i)
raw_reference[i] = static_cast<T>(i);
DenseStorageType copied_reference;
copied_reference = reference;
const T* raw_copied_reference = copied_reference.data();
for (int i=0; i<size; ++i)
VERIFY_IS_EQUAL(raw_reference[i], raw_copied_reference[i]);
}
void test_dense_storage()
{
dense_storage_copy<int,Dynamic,Dynamic>();
dense_storage_copy<int,Dynamic,3>();
dense_storage_copy<int,4,Dynamic>();
dense_storage_copy<int,4,3>();
dense_storage_copy<float,Dynamic,Dynamic>();
dense_storage_copy<float,Dynamic,3>();
dense_storage_copy<float,4,Dynamic>();
dense_storage_copy<float,4,3>();
dense_storage_assignment<int,Dynamic,Dynamic>();
dense_storage_assignment<int,Dynamic,3>();
dense_storage_assignment<int,4,Dynamic>();
dense_storage_assignment<int,4,3>();
dense_storage_assignment<float,Dynamic,Dynamic>();
dense_storage_assignment<float,Dynamic,3>();
dense_storage_assignment<float,4,Dynamic>();
dense_storage_assignment<float,4,3>();
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/pastix_support.cpp
|
.cpp
| 1,894
| 55
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS
#include "sparse_solver.h"
#include <Eigen/PaStiXSupport>
#include <unsupported/Eigen/SparseExtra>
template<typename T> void test_pastix_T()
{
PastixLLT< SparseMatrix<T, ColMajor>, Eigen::Lower > pastix_llt_lower;
PastixLDLT< SparseMatrix<T, ColMajor>, Eigen::Lower > pastix_ldlt_lower;
PastixLLT< SparseMatrix<T, ColMajor>, Eigen::Upper > pastix_llt_upper;
PastixLDLT< SparseMatrix<T, ColMajor>, Eigen::Upper > pastix_ldlt_upper;
PastixLU< SparseMatrix<T, ColMajor> > pastix_lu;
check_sparse_spd_solving(pastix_llt_lower);
check_sparse_spd_solving(pastix_ldlt_lower);
check_sparse_spd_solving(pastix_llt_upper);
check_sparse_spd_solving(pastix_ldlt_upper);
check_sparse_square_solving(pastix_lu);
// Some compilation check:
pastix_llt_lower.iparm();
pastix_llt_lower.dparm();
pastix_ldlt_lower.iparm();
pastix_ldlt_lower.dparm();
pastix_lu.iparm();
pastix_lu.dparm();
}
// There is no support for selfadjoint matrices with PaStiX.
// Complex symmetric matrices should pass though
template<typename T> void test_pastix_T_LU()
{
PastixLU< SparseMatrix<T, ColMajor> > pastix_lu;
check_sparse_square_solving(pastix_lu);
}
void test_pastix_support()
{
CALL_SUBTEST_1(test_pastix_T<float>());
CALL_SUBTEST_2(test_pastix_T<double>());
CALL_SUBTEST_3( (test_pastix_T_LU<std::complex<float> >()) );
CALL_SUBTEST_4(test_pastix_T_LU<std::complex<double> >());
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/schur_real.cpp
|
.cpp
| 3,956
| 111
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <limits>
#include <Eigen/Eigenvalues>
template<typename MatrixType> void verifyIsQuasiTriangular(const MatrixType& T)
{
const Index size = T.cols();
typedef typename MatrixType::Scalar Scalar;
// Check T is lower Hessenberg
for(int row = 2; row < size; ++row) {
for(int col = 0; col < row - 1; ++col) {
VERIFY(T(row,col) == Scalar(0));
}
}
// Check that any non-zero on the subdiagonal is followed by a zero and is
// part of a 2x2 diagonal block with imaginary eigenvalues.
for(int row = 1; row < size; ++row) {
if (T(row,row-1) != Scalar(0)) {
VERIFY(row == size-1 || T(row+1,row) == 0);
Scalar tr = T(row-1,row-1) + T(row,row);
Scalar det = T(row-1,row-1) * T(row,row) - T(row-1,row) * T(row,row-1);
VERIFY(4 * det > tr * tr);
}
}
}
template<typename MatrixType> void schur(int size = MatrixType::ColsAtCompileTime)
{
// Test basic functionality: T is quasi-triangular and A = U T U*
for(int counter = 0; counter < g_repeat; ++counter) {
MatrixType A = MatrixType::Random(size, size);
RealSchur<MatrixType> schurOfA(A);
VERIFY_IS_EQUAL(schurOfA.info(), Success);
MatrixType U = schurOfA.matrixU();
MatrixType T = schurOfA.matrixT();
verifyIsQuasiTriangular(T);
VERIFY_IS_APPROX(A, U * T * U.transpose());
}
// Test asserts when not initialized
RealSchur<MatrixType> rsUninitialized;
VERIFY_RAISES_ASSERT(rsUninitialized.matrixT());
VERIFY_RAISES_ASSERT(rsUninitialized.matrixU());
VERIFY_RAISES_ASSERT(rsUninitialized.info());
// Test whether compute() and constructor returns same result
MatrixType A = MatrixType::Random(size, size);
RealSchur<MatrixType> rs1;
rs1.compute(A);
RealSchur<MatrixType> rs2(A);
VERIFY_IS_EQUAL(rs1.info(), Success);
VERIFY_IS_EQUAL(rs2.info(), Success);
VERIFY_IS_EQUAL(rs1.matrixT(), rs2.matrixT());
VERIFY_IS_EQUAL(rs1.matrixU(), rs2.matrixU());
// Test maximum number of iterations
RealSchur<MatrixType> rs3;
rs3.setMaxIterations(RealSchur<MatrixType>::m_maxIterationsPerRow * size).compute(A);
VERIFY_IS_EQUAL(rs3.info(), Success);
VERIFY_IS_EQUAL(rs3.matrixT(), rs1.matrixT());
VERIFY_IS_EQUAL(rs3.matrixU(), rs1.matrixU());
if (size > 2) {
rs3.setMaxIterations(1).compute(A);
VERIFY_IS_EQUAL(rs3.info(), NoConvergence);
VERIFY_IS_EQUAL(rs3.getMaxIterations(), 1);
}
MatrixType Atriangular = A;
Atriangular.template triangularView<StrictlyLower>().setZero();
rs3.setMaxIterations(1).compute(Atriangular); // triangular matrices do not need any iterations
VERIFY_IS_EQUAL(rs3.info(), Success);
VERIFY_IS_APPROX(rs3.matrixT(), Atriangular); // approx because of scaling...
VERIFY_IS_EQUAL(rs3.matrixU(), MatrixType::Identity(size, size));
// Test computation of only T, not U
RealSchur<MatrixType> rsOnlyT(A, false);
VERIFY_IS_EQUAL(rsOnlyT.info(), Success);
VERIFY_IS_EQUAL(rs1.matrixT(), rsOnlyT.matrixT());
VERIFY_RAISES_ASSERT(rsOnlyT.matrixU());
if (size > 2 && size < 20)
{
// Test matrix with NaN
A(0,0) = std::numeric_limits<typename MatrixType::Scalar>::quiet_NaN();
RealSchur<MatrixType> rsNaN(A);
VERIFY_IS_EQUAL(rsNaN.info(), NoConvergence);
}
}
void test_schur_real()
{
CALL_SUBTEST_1(( schur<Matrix4f>() ));
CALL_SUBTEST_2(( schur<MatrixXd>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4)) ));
CALL_SUBTEST_3(( schur<Matrix<float, 1, 1> >() ));
CALL_SUBTEST_4(( schur<Matrix<double, 3, 3, Eigen::RowMajor> >() ));
// Test problem size constructors
CALL_SUBTEST_5(RealSchur<MatrixXf>(10));
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/stddeque.cpp
|
.cpp
| 4,252
| 131
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/StdDeque>
#include <Eigen/Geometry>
template<typename MatrixType>
void check_stddeque_matrix(const MatrixType& m)
{
Index rows = m.rows();
Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
std::deque<MatrixType,Eigen::aligned_allocator<MatrixType> > v(10, MatrixType::Zero(rows,cols)), w(20, y);
v.front() = x;
w.front() = w.back();
VERIFY_IS_APPROX(w.front(), w.back());
v = w;
typename std::deque<MatrixType,Eigen::aligned_allocator<MatrixType> >::iterator vi = v.begin();
typename std::deque<MatrixType,Eigen::aligned_allocator<MatrixType> >::iterator wi = w.begin();
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(*vi, *wi);
++vi;
++wi;
}
v.resize(21,MatrixType::Zero(rows,cols));
v.back() = x;
VERIFY_IS_APPROX(v.back(), x);
v.resize(22,y);
VERIFY_IS_APPROX(v.back(), y);
v.push_back(x);
VERIFY_IS_APPROX(v.back(), x);
}
template<typename TransformType>
void check_stddeque_transform(const TransformType&)
{
typedef typename TransformType::MatrixType MatrixType;
TransformType x(MatrixType::Random()), y(MatrixType::Random()), ti=TransformType::Identity();
std::deque<TransformType,Eigen::aligned_allocator<TransformType> > v(10,ti), w(20, y);
v.front() = x;
w.front() = w.back();
VERIFY_IS_APPROX(w.front(), w.back());
v = w;
typename std::deque<TransformType,Eigen::aligned_allocator<TransformType> >::iterator vi = v.begin();
typename std::deque<TransformType,Eigen::aligned_allocator<TransformType> >::iterator wi = w.begin();
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(*vi, *wi);
++vi;
++wi;
}
v.resize(21,ti);
v.back() = x;
VERIFY_IS_APPROX(v.back(), x);
v.resize(22,y);
VERIFY_IS_APPROX(v.back(), y);
v.push_back(x);
VERIFY_IS_APPROX(v.back(), x);
}
template<typename QuaternionType>
void check_stddeque_quaternion(const QuaternionType&)
{
typedef typename QuaternionType::Coefficients Coefficients;
QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity();
std::deque<QuaternionType,Eigen::aligned_allocator<QuaternionType> > v(10,qi), w(20, y);
v.front() = x;
w.front() = w.back();
VERIFY_IS_APPROX(w.front(), w.back());
v = w;
typename std::deque<QuaternionType,Eigen::aligned_allocator<QuaternionType> >::iterator vi = v.begin();
typename std::deque<QuaternionType,Eigen::aligned_allocator<QuaternionType> >::iterator wi = w.begin();
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(*vi, *wi);
++vi;
++wi;
}
v.resize(21,qi);
v.back() = x;
VERIFY_IS_APPROX(v.back(), x);
v.resize(22,y);
VERIFY_IS_APPROX(v.back(), y);
v.push_back(x);
VERIFY_IS_APPROX(v.back(), x);
}
void test_stddeque()
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stddeque_matrix(Vector2f()));
CALL_SUBTEST_1(check_stddeque_matrix(Matrix3f()));
CALL_SUBTEST_2(check_stddeque_matrix(Matrix3d()));
// some vectorizable fixed sizes
CALL_SUBTEST_1(check_stddeque_matrix(Matrix2f()));
CALL_SUBTEST_1(check_stddeque_matrix(Vector4f()));
CALL_SUBTEST_1(check_stddeque_matrix(Matrix4f()));
CALL_SUBTEST_2(check_stddeque_matrix(Matrix4d()));
// some dynamic sizes
CALL_SUBTEST_3(check_stddeque_matrix(MatrixXd(1,1)));
CALL_SUBTEST_3(check_stddeque_matrix(VectorXd(20)));
CALL_SUBTEST_3(check_stddeque_matrix(RowVectorXf(20)));
CALL_SUBTEST_3(check_stddeque_matrix(MatrixXcf(10,10)));
// some Transform
CALL_SUBTEST_4(check_stddeque_transform(Affine2f()));
CALL_SUBTEST_4(check_stddeque_transform(Affine3f()));
CALL_SUBTEST_4(check_stddeque_transform(Affine3d()));
// some Quaternion
CALL_SUBTEST_5(check_stddeque_quaternion(Quaternionf()));
CALL_SUBTEST_5(check_stddeque_quaternion(Quaterniond()));
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/meta.cpp
|
.cpp
| 4,715
| 98
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<typename From, typename To>
bool check_is_convertible(const From&, const To&)
{
return internal::is_convertible<From,To>::value;
}
void test_meta()
{
VERIFY((internal::conditional<(3<4),internal::true_type, internal::false_type>::type::value));
VERIFY(( internal::is_same<float,float>::value));
VERIFY((!internal::is_same<float,double>::value));
VERIFY((!internal::is_same<float,float&>::value));
VERIFY((!internal::is_same<float,const float&>::value));
VERIFY(( internal::is_same<float,internal::remove_all<const float&>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_all<const float*>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_all<const float*&>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_all<float**>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_all<float**&>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_all<float* const *&>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_all<float* const>::type >::value));
// test add_const
VERIFY(( internal::is_same< internal::add_const<float>::type, const float >::value));
VERIFY(( internal::is_same< internal::add_const<float*>::type, float* const>::value));
VERIFY(( internal::is_same< internal::add_const<float const*>::type, float const* const>::value));
VERIFY(( internal::is_same< internal::add_const<float&>::type, float& >::value));
// test remove_const
VERIFY(( internal::is_same< internal::remove_const<float const* const>::type, float const* >::value));
VERIFY(( internal::is_same< internal::remove_const<float const*>::type, float const* >::value));
VERIFY(( internal::is_same< internal::remove_const<float* const>::type, float* >::value));
// test add_const_on_value_type
VERIFY(( internal::is_same< internal::add_const_on_value_type<float&>::type, float const& >::value));
VERIFY(( internal::is_same< internal::add_const_on_value_type<float*>::type, float const* >::value));
VERIFY(( internal::is_same< internal::add_const_on_value_type<float>::type, const float >::value));
VERIFY(( internal::is_same< internal::add_const_on_value_type<const float>::type, const float >::value));
VERIFY(( internal::is_same< internal::add_const_on_value_type<const float* const>::type, const float* const>::value));
VERIFY(( internal::is_same< internal::add_const_on_value_type<float* const>::type, const float* const>::value));
VERIFY(( internal::is_same<float,internal::remove_reference<float&>::type >::value));
VERIFY(( internal::is_same<const float,internal::remove_reference<const float&>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_pointer<float*>::type >::value));
VERIFY(( internal::is_same<const float,internal::remove_pointer<const float*>::type >::value));
VERIFY(( internal::is_same<float,internal::remove_pointer<float* const >::type >::value));
VERIFY(( internal::is_convertible<float,double>::value ));
VERIFY(( internal::is_convertible<int,double>::value ));
VERIFY(( internal::is_convertible<double,int>::value ));
VERIFY((!internal::is_convertible<std::complex<double>,double>::value ));
VERIFY(( internal::is_convertible<Array33f,Matrix3f>::value ));
// VERIFY((!internal::is_convertible<Matrix3f,Matrix3d>::value )); //does not work because the conversion is prevented by a static assertion
VERIFY((!internal::is_convertible<Array33f,int>::value ));
VERIFY((!internal::is_convertible<MatrixXf,float>::value ));
{
float f;
MatrixXf A, B;
VectorXf a, b;
VERIFY(( check_is_convertible(a.dot(b), f) ));
VERIFY(( check_is_convertible(a.transpose()*b, f) ));
VERIFY((!check_is_convertible(A*B, f) ));
VERIFY(( check_is_convertible(A*B, A) ));
}
VERIFY(internal::meta_sqrt<1>::ret == 1);
#define VERIFY_META_SQRT(X) VERIFY(internal::meta_sqrt<X>::ret == int(std::sqrt(double(X))))
VERIFY_META_SQRT(2);
VERIFY_META_SQRT(3);
VERIFY_META_SQRT(4);
VERIFY_META_SQRT(5);
VERIFY_META_SQRT(6);
VERIFY_META_SQRT(8);
VERIFY_META_SQRT(9);
VERIFY_META_SQRT(15);
VERIFY_META_SQRT(16);
VERIFY_META_SQRT(17);
VERIFY_META_SQRT(255);
VERIFY_META_SQRT(256);
VERIFY_META_SQRT(257);
VERIFY_META_SQRT(1023);
VERIFY_META_SQRT(1024);
VERIFY_META_SQRT(1025);
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/bug1213.h
|
.h
| 147
| 9
|
#include <Eigen/Core>
template<typename T, int dim>
bool bug1213_2(const Eigen::Matrix<T,dim,1>& x);
bool bug1213_1(const Eigen::Vector3f& x);
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/visitor.cpp
|
.cpp
| 3,994
| 134
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<typename MatrixType> void matrixVisitor(const MatrixType& p)
{
typedef typename MatrixType::Scalar Scalar;
Index rows = p.rows();
Index cols = p.cols();
// construct a random matrix where all coefficients are different
MatrixType m;
m = MatrixType::Random(rows, cols);
for(Index i = 0; i < m.size(); i++)
for(Index i2 = 0; i2 < i; i2++)
while(m(i) == m(i2)) // yes, ==
m(i) = internal::random<Scalar>();
Scalar minc = Scalar(1000), maxc = Scalar(-1000);
Index minrow=0,mincol=0,maxrow=0,maxcol=0;
for(Index j = 0; j < cols; j++)
for(Index i = 0; i < rows; i++)
{
if(m(i,j) < minc)
{
minc = m(i,j);
minrow = i;
mincol = j;
}
if(m(i,j) > maxc)
{
maxc = m(i,j);
maxrow = i;
maxcol = j;
}
}
Index eigen_minrow, eigen_mincol, eigen_maxrow, eigen_maxcol;
Scalar eigen_minc, eigen_maxc;
eigen_minc = m.minCoeff(&eigen_minrow,&eigen_mincol);
eigen_maxc = m.maxCoeff(&eigen_maxrow,&eigen_maxcol);
VERIFY(minrow == eigen_minrow);
VERIFY(maxrow == eigen_maxrow);
VERIFY(mincol == eigen_mincol);
VERIFY(maxcol == eigen_maxcol);
VERIFY_IS_APPROX(minc, eigen_minc);
VERIFY_IS_APPROX(maxc, eigen_maxc);
VERIFY_IS_APPROX(minc, m.minCoeff());
VERIFY_IS_APPROX(maxc, m.maxCoeff());
eigen_maxc = (m.adjoint()*m).maxCoeff(&eigen_maxrow,&eigen_maxcol);
eigen_maxc = (m.adjoint()*m).eval().maxCoeff(&maxrow,&maxcol);
VERIFY(maxrow == eigen_maxrow);
VERIFY(maxcol == eigen_maxcol);
}
template<typename VectorType> void vectorVisitor(const VectorType& w)
{
typedef typename VectorType::Scalar Scalar;
Index size = w.size();
// construct a random vector where all coefficients are different
VectorType v;
v = VectorType::Random(size);
for(Index i = 0; i < size; i++)
for(Index i2 = 0; i2 < i; i2++)
while(v(i) == v(i2)) // yes, ==
v(i) = internal::random<Scalar>();
Scalar minc = v(0), maxc = v(0);
Index minidx=0, maxidx=0;
for(Index i = 0; i < size; i++)
{
if(v(i) < minc)
{
minc = v(i);
minidx = i;
}
if(v(i) > maxc)
{
maxc = v(i);
maxidx = i;
}
}
Index eigen_minidx, eigen_maxidx;
Scalar eigen_minc, eigen_maxc;
eigen_minc = v.minCoeff(&eigen_minidx);
eigen_maxc = v.maxCoeff(&eigen_maxidx);
VERIFY(minidx == eigen_minidx);
VERIFY(maxidx == eigen_maxidx);
VERIFY_IS_APPROX(minc, eigen_minc);
VERIFY_IS_APPROX(maxc, eigen_maxc);
VERIFY_IS_APPROX(minc, v.minCoeff());
VERIFY_IS_APPROX(maxc, v.maxCoeff());
Index idx0 = internal::random<Index>(0,size-1);
Index idx1 = eigen_minidx;
Index idx2 = eigen_maxidx;
VectorType v1(v), v2(v);
v1(idx0) = v1(idx1);
v2(idx0) = v2(idx2);
v1.minCoeff(&eigen_minidx);
v2.maxCoeff(&eigen_maxidx);
VERIFY(eigen_minidx == (std::min)(idx0,idx1));
VERIFY(eigen_maxidx == (std::min)(idx0,idx2));
}
void test_visitor()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( matrixVisitor(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( matrixVisitor(Matrix2f()) );
CALL_SUBTEST_3( matrixVisitor(Matrix4d()) );
CALL_SUBTEST_4( matrixVisitor(MatrixXd(8, 12)) );
CALL_SUBTEST_5( matrixVisitor(Matrix<double,Dynamic,Dynamic,RowMajor>(20, 20)) );
CALL_SUBTEST_6( matrixVisitor(MatrixXi(8, 12)) );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_7( vectorVisitor(Vector4f()) );
CALL_SUBTEST_7( vectorVisitor(Matrix<int,12,1>()) );
CALL_SUBTEST_8( vectorVisitor(VectorXd(10)) );
CALL_SUBTEST_9( vectorVisitor(RowVectorXd(10)) );
CALL_SUBTEST_10( vectorVisitor(VectorXf(33)) );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/qr_colpivoting.cpp
|
.cpp
| 12,543
| 339
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/QR>
#include <Eigen/SVD>
template <typename MatrixType>
void cod() {
Index rows = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
Index cols = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
Index cols2 = internal::random<Index>(2, EIGEN_TEST_MAX_SIZE);
Index rank = internal::random<Index>(1, (std::min)(rows, cols) - 1);
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime,
MatrixType::RowsAtCompileTime>
MatrixQType;
MatrixType matrix;
createRandomPIMatrixOfRank(rank, rows, cols, matrix);
CompleteOrthogonalDecomposition<MatrixType> cod(matrix);
VERIFY(rank == cod.rank());
VERIFY(cols - cod.rank() == cod.dimensionOfKernel());
VERIFY(!cod.isInjective());
VERIFY(!cod.isInvertible());
VERIFY(!cod.isSurjective());
MatrixQType q = cod.householderQ();
VERIFY_IS_UNITARY(q);
MatrixType z = cod.matrixZ();
VERIFY_IS_UNITARY(z);
MatrixType t;
t.setZero(rows, cols);
t.topLeftCorner(rank, rank) =
cod.matrixT().topLeftCorner(rank, rank).template triangularView<Upper>();
MatrixType c = q * t * z * cod.colsPermutation().inverse();
VERIFY_IS_APPROX(matrix, c);
MatrixType exact_solution = MatrixType::Random(cols, cols2);
MatrixType rhs = matrix * exact_solution;
MatrixType cod_solution = cod.solve(rhs);
VERIFY_IS_APPROX(rhs, matrix * cod_solution);
// Verify that we get the same minimum-norm solution as the SVD.
JacobiSVD<MatrixType> svd(matrix, ComputeThinU | ComputeThinV);
MatrixType svd_solution = svd.solve(rhs);
VERIFY_IS_APPROX(cod_solution, svd_solution);
MatrixType pinv = cod.pseudoInverse();
VERIFY_IS_APPROX(cod_solution, pinv * rhs);
}
template <typename MatrixType, int Cols2>
void cod_fixedsize() {
enum {
Rows = MatrixType::RowsAtCompileTime,
Cols = MatrixType::ColsAtCompileTime
};
typedef typename MatrixType::Scalar Scalar;
int rank = internal::random<int>(1, (std::min)(int(Rows), int(Cols)) - 1);
Matrix<Scalar, Rows, Cols> matrix;
createRandomPIMatrixOfRank(rank, Rows, Cols, matrix);
CompleteOrthogonalDecomposition<Matrix<Scalar, Rows, Cols> > cod(matrix);
VERIFY(rank == cod.rank());
VERIFY(Cols - cod.rank() == cod.dimensionOfKernel());
VERIFY(cod.isInjective() == (rank == Rows));
VERIFY(cod.isSurjective() == (rank == Cols));
VERIFY(cod.isInvertible() == (cod.isInjective() && cod.isSurjective()));
Matrix<Scalar, Cols, Cols2> exact_solution;
exact_solution.setRandom(Cols, Cols2);
Matrix<Scalar, Rows, Cols2> rhs = matrix * exact_solution;
Matrix<Scalar, Cols, Cols2> cod_solution = cod.solve(rhs);
VERIFY_IS_APPROX(rhs, matrix * cod_solution);
// Verify that we get the same minimum-norm solution as the SVD.
JacobiSVD<MatrixType> svd(matrix, ComputeFullU | ComputeFullV);
Matrix<Scalar, Cols, Cols2> svd_solution = svd.solve(rhs);
VERIFY_IS_APPROX(cod_solution, svd_solution);
}
template<typename MatrixType> void qr()
{
using std::sqrt;
Index rows = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE), cols2 = internal::random<Index>(2,EIGEN_TEST_MAX_SIZE);
Index rank = internal::random<Index>(1, (std::min)(rows, cols)-1);
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> MatrixQType;
MatrixType m1;
createRandomPIMatrixOfRank(rank,rows,cols,m1);
ColPivHouseholderQR<MatrixType> qr(m1);
VERIFY_IS_EQUAL(rank, qr.rank());
VERIFY_IS_EQUAL(cols - qr.rank(), qr.dimensionOfKernel());
VERIFY(!qr.isInjective());
VERIFY(!qr.isInvertible());
VERIFY(!qr.isSurjective());
MatrixQType q = qr.householderQ();
VERIFY_IS_UNITARY(q);
MatrixType r = qr.matrixQR().template triangularView<Upper>();
MatrixType c = q * r * qr.colsPermutation().inverse();
VERIFY_IS_APPROX(m1, c);
// Verify that the absolute value of the diagonal elements in R are
// non-increasing until they reach the singularity threshold.
RealScalar threshold =
sqrt(RealScalar(rows)) * numext::abs(r(0, 0)) * NumTraits<Scalar>::epsilon();
for (Index i = 0; i < (std::min)(rows, cols) - 1; ++i) {
RealScalar x = numext::abs(r(i, i));
RealScalar y = numext::abs(r(i + 1, i + 1));
if (x < threshold && y < threshold) continue;
if (!test_isApproxOrLessThan(y, x)) {
for (Index j = 0; j < (std::min)(rows, cols); ++j) {
std::cout << "i = " << j << ", |r_ii| = " << numext::abs(r(j, j)) << std::endl;
}
std::cout << "Failure at i=" << i << ", rank=" << rank
<< ", threshold=" << threshold << std::endl;
}
VERIFY_IS_APPROX_OR_LESS_THAN(y, x);
}
MatrixType m2 = MatrixType::Random(cols,cols2);
MatrixType m3 = m1*m2;
m2 = MatrixType::Random(cols,cols2);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
{
Index size = rows;
do {
m1 = MatrixType::Random(size,size);
qr.compute(m1);
} while(!qr.isInvertible());
MatrixType m1_inv = qr.inverse();
m3 = m1 * MatrixType::Random(size,cols2);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m2, m1_inv*m3);
}
}
template<typename MatrixType, int Cols2> void qr_fixedsize()
{
using std::sqrt;
using std::abs;
enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
int rank = internal::random<int>(1, (std::min)(int(Rows), int(Cols))-1);
Matrix<Scalar,Rows,Cols> m1;
createRandomPIMatrixOfRank(rank,Rows,Cols,m1);
ColPivHouseholderQR<Matrix<Scalar,Rows,Cols> > qr(m1);
VERIFY_IS_EQUAL(rank, qr.rank());
VERIFY_IS_EQUAL(Cols - qr.rank(), qr.dimensionOfKernel());
VERIFY_IS_EQUAL(qr.isInjective(), (rank == Rows));
VERIFY_IS_EQUAL(qr.isSurjective(), (rank == Cols));
VERIFY_IS_EQUAL(qr.isInvertible(), (qr.isInjective() && qr.isSurjective()));
Matrix<Scalar,Rows,Cols> r = qr.matrixQR().template triangularView<Upper>();
Matrix<Scalar,Rows,Cols> c = qr.householderQ() * r * qr.colsPermutation().inverse();
VERIFY_IS_APPROX(m1, c);
Matrix<Scalar,Cols,Cols2> m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2);
Matrix<Scalar,Rows,Cols2> m3 = m1*m2;
m2 = Matrix<Scalar,Cols,Cols2>::Random(Cols,Cols2);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
// Verify that the absolute value of the diagonal elements in R are
// non-increasing until they reache the singularity threshold.
RealScalar threshold =
sqrt(RealScalar(Rows)) * (std::abs)(r(0, 0)) * NumTraits<Scalar>::epsilon();
for (Index i = 0; i < (std::min)(int(Rows), int(Cols)) - 1; ++i) {
RealScalar x = numext::abs(r(i, i));
RealScalar y = numext::abs(r(i + 1, i + 1));
if (x < threshold && y < threshold) continue;
if (!test_isApproxOrLessThan(y, x)) {
for (Index j = 0; j < (std::min)(int(Rows), int(Cols)); ++j) {
std::cout << "i = " << j << ", |r_ii| = " << numext::abs(r(j, j)) << std::endl;
}
std::cout << "Failure at i=" << i << ", rank=" << rank
<< ", threshold=" << threshold << std::endl;
}
VERIFY_IS_APPROX_OR_LESS_THAN(y, x);
}
}
// This test is meant to verify that pivots are chosen such that
// even for a graded matrix, the diagonal of R falls of roughly
// monotonically until it reaches the threshold for singularity.
// We use the so-called Kahan matrix, which is a famous counter-example
// for rank-revealing QR. See
// http://www.netlib.org/lapack/lawnspdf/lawn176.pdf
// page 3 for more detail.
template<typename MatrixType> void qr_kahan_matrix()
{
using std::sqrt;
using std::abs;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
Index rows = 300, cols = rows;
MatrixType m1;
m1.setZero(rows,cols);
RealScalar s = std::pow(NumTraits<RealScalar>::epsilon(), 1.0 / rows);
RealScalar c = std::sqrt(1 - s*s);
RealScalar pow_s_i(1.0); // pow(s,i)
for (Index i = 0; i < rows; ++i) {
m1(i, i) = pow_s_i;
m1.row(i).tail(rows - i - 1) = -pow_s_i * c * MatrixType::Ones(1, rows - i - 1);
pow_s_i *= s;
}
m1 = (m1 + m1.transpose()).eval();
ColPivHouseholderQR<MatrixType> qr(m1);
MatrixType r = qr.matrixQR().template triangularView<Upper>();
RealScalar threshold =
std::sqrt(RealScalar(rows)) * numext::abs(r(0, 0)) * NumTraits<Scalar>::epsilon();
for (Index i = 0; i < (std::min)(rows, cols) - 1; ++i) {
RealScalar x = numext::abs(r(i, i));
RealScalar y = numext::abs(r(i + 1, i + 1));
if (x < threshold && y < threshold) continue;
if (!test_isApproxOrLessThan(y, x)) {
for (Index j = 0; j < (std::min)(rows, cols); ++j) {
std::cout << "i = " << j << ", |r_ii| = " << numext::abs(r(j, j)) << std::endl;
}
std::cout << "Failure at i=" << i << ", rank=" << qr.rank()
<< ", threshold=" << threshold << std::endl;
}
VERIFY_IS_APPROX_OR_LESS_THAN(y, x);
}
}
template<typename MatrixType> void qr_invertible()
{
using std::log;
using std::abs;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Scalar Scalar;
int size = internal::random<int>(10,50);
MatrixType m1(size, size), m2(size, size), m3(size, size);
m1 = MatrixType::Random(size,size);
if (internal::is_same<RealScalar,float>::value)
{
// let's build a matrix more stable to inverse
MatrixType a = MatrixType::Random(size,size*2);
m1 += a * a.adjoint();
}
ColPivHouseholderQR<MatrixType> qr(m1);
m3 = MatrixType::Random(size,size);
m2 = qr.solve(m3);
//VERIFY_IS_APPROX(m3, m1*m2);
// now construct a matrix with prescribed determinant
m1.setZero();
for(int i = 0; i < size; i++) m1(i,i) = internal::random<Scalar>();
RealScalar absdet = abs(m1.diagonal().prod());
m3 = qr.householderQ(); // get a unitary
m1 = m3 * m1 * m3;
qr.compute(m1);
VERIFY_IS_APPROX(absdet, qr.absDeterminant());
VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant());
}
template<typename MatrixType> void qr_verify_assert()
{
MatrixType tmp;
ColPivHouseholderQR<MatrixType> qr;
VERIFY_RAISES_ASSERT(qr.matrixQR())
VERIFY_RAISES_ASSERT(qr.solve(tmp))
VERIFY_RAISES_ASSERT(qr.householderQ())
VERIFY_RAISES_ASSERT(qr.dimensionOfKernel())
VERIFY_RAISES_ASSERT(qr.isInjective())
VERIFY_RAISES_ASSERT(qr.isSurjective())
VERIFY_RAISES_ASSERT(qr.isInvertible())
VERIFY_RAISES_ASSERT(qr.inverse())
VERIFY_RAISES_ASSERT(qr.absDeterminant())
VERIFY_RAISES_ASSERT(qr.logAbsDeterminant())
}
void test_qr_colpivoting()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( qr<MatrixXf>() );
CALL_SUBTEST_2( qr<MatrixXd>() );
CALL_SUBTEST_3( qr<MatrixXcd>() );
CALL_SUBTEST_4(( qr_fixedsize<Matrix<float,3,5>, 4 >() ));
CALL_SUBTEST_5(( qr_fixedsize<Matrix<double,6,2>, 3 >() ));
CALL_SUBTEST_5(( qr_fixedsize<Matrix<double,1,1>, 1 >() ));
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( cod<MatrixXf>() );
CALL_SUBTEST_2( cod<MatrixXd>() );
CALL_SUBTEST_3( cod<MatrixXcd>() );
CALL_SUBTEST_4(( cod_fixedsize<Matrix<float,3,5>, 4 >() ));
CALL_SUBTEST_5(( cod_fixedsize<Matrix<double,6,2>, 3 >() ));
CALL_SUBTEST_5(( cod_fixedsize<Matrix<double,1,1>, 1 >() ));
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( qr_invertible<MatrixXf>() );
CALL_SUBTEST_2( qr_invertible<MatrixXd>() );
CALL_SUBTEST_6( qr_invertible<MatrixXcf>() );
CALL_SUBTEST_3( qr_invertible<MatrixXcd>() );
}
CALL_SUBTEST_7(qr_verify_assert<Matrix3f>());
CALL_SUBTEST_8(qr_verify_assert<Matrix3d>());
CALL_SUBTEST_1(qr_verify_assert<MatrixXf>());
CALL_SUBTEST_2(qr_verify_assert<MatrixXd>());
CALL_SUBTEST_6(qr_verify_assert<MatrixXcf>());
CALL_SUBTEST_3(qr_verify_assert<MatrixXcd>());
// Test problem size constructors
CALL_SUBTEST_9(ColPivHouseholderQR<MatrixXf>(10, 20));
CALL_SUBTEST_1( qr_kahan_matrix<MatrixXf>() );
CALL_SUBTEST_2( qr_kahan_matrix<MatrixXd>() );
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/main.h
|
.h
| 31,942
| 809
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include <cstdlib>
#include <cerrno>
#include <ctime>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <vector>
#include <typeinfo>
// The following includes of STL headers have to be done _before_ the
// definition of macros min() and max(). The reason is that many STL
// implementations will not work properly as the min and max symbols collide
// with the STL functions std:min() and std::max(). The STL headers may check
// for the macro definition of min/max and issue a warning or undefine the
// macros.
//
// Still, Windows defines min() and max() in windef.h as part of the regular
// Windows system interfaces and many other Windows APIs depend on these
// macros being available. To prevent the macro expansion of min/max and to
// make Eigen compatible with the Windows environment all function calls of
// std::min() and std::max() have to be written with parenthesis around the
// function name.
//
// All STL headers used by Eigen should be included here. Because main.h is
// included before any Eigen header and because the STL headers are guarded
// against multiple inclusions, no STL header will see our own min/max macro
// definitions.
#include <limits>
#include <algorithm>
#include <complex>
#include <deque>
#include <queue>
#include <cassert>
#include <list>
#if __cplusplus >= 201103L
#include <random>
#ifdef EIGEN_USE_THREADS
#include <future>
#endif
#endif
// Same for cuda_fp16.h
#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
#define EIGEN_TEST_CUDACC_VER ((__CUDACC_VER_MAJOR__ * 10000) + (__CUDACC_VER_MINOR__ * 100))
#elif defined(__CUDACC_VER__)
#define EIGEN_TEST_CUDACC_VER __CUDACC_VER__
#else
#define EIGEN_TEST_CUDACC_VER 0
#endif
#if EIGEN_TEST_CUDACC_VER >= 70500
#include <cuda_fp16.h>
#endif
// To test that all calls from Eigen code to std::min() and std::max() are
// protected by parenthesis against macro expansion, the min()/max() macros
// are defined here and any not-parenthesized min/max call will cause a
// compiler error.
#define min(A,B) please_protect_your_min_with_parentheses
#define max(A,B) please_protect_your_max_with_parentheses
#define isnan(X) please_protect_your_isnan_with_parentheses
#define isinf(X) please_protect_your_isinf_with_parentheses
#define isfinite(X) please_protect_your_isfinite_with_parentheses
// test possible conflicts
struct real {};
struct imag {};
#ifdef M_PI
#undef M_PI
#endif
#define M_PI please_use_EIGEN_PI_instead_of_M_PI
#define FORBIDDEN_IDENTIFIER (this_identifier_is_forbidden_to_avoid_clashes) this_identifier_is_forbidden_to_avoid_clashes
// B0 is defined in POSIX header termios.h
#define B0 FORBIDDEN_IDENTIFIER
// Unit tests calling Eigen's blas library must preserve the default blocking size
// to avoid troubles.
#ifndef EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS
#define EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
#endif
// shuts down ICC's remark #593: variable "XXX" was set but never used
#define TEST_SET_BUT_UNUSED_VARIABLE(X) EIGEN_UNUSED_VARIABLE(X)
#ifdef TEST_ENABLE_TEMPORARY_TRACKING
static long int nb_temporaries;
static long int nb_temporaries_on_assert = -1;
inline void on_temporary_creation(long int size) {
// here's a great place to set a breakpoint when debugging failures in this test!
if(size!=0) nb_temporaries++;
if(nb_temporaries_on_assert>0) assert(nb_temporaries<nb_temporaries_on_assert);
}
#define EIGEN_DENSE_STORAGE_CTOR_PLUGIN { on_temporary_creation(size); }
#define VERIFY_EVALUATION_COUNT(XPR,N) {\
nb_temporaries = 0; \
XPR; \
if(nb_temporaries!=N) { std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; }\
VERIFY( (#XPR) && nb_temporaries==N ); \
}
#endif
// the following file is automatically generated by cmake
#include "split_test_helper.h"
#ifdef NDEBUG
#undef NDEBUG
#endif
// On windows CE, NDEBUG is automatically defined <assert.h> if NDEBUG is not defined.
#ifndef DEBUG
#define DEBUG
#endif
// bounds integer values for AltiVec
#if defined(__ALTIVEC__) || defined(__VSX__)
#define EIGEN_MAKING_DOCS
#endif
#ifndef EIGEN_TEST_FUNC
#error EIGEN_TEST_FUNC must be defined
#endif
#define DEFAULT_REPEAT 10
namespace Eigen
{
static std::vector<std::string> g_test_stack;
// level == 0 <=> abort if test fail
// level >= 1 <=> warning message to std::cerr if test fail
static int g_test_level = 0;
static int g_repeat;
static unsigned int g_seed;
static bool g_has_set_repeat, g_has_set_seed;
}
#define TRACK std::cerr << __FILE__ << " " << __LINE__ << std::endl
// #define TRACK while()
#define EI_PP_MAKE_STRING2(S) #S
#define EI_PP_MAKE_STRING(S) EI_PP_MAKE_STRING2(S)
#define EIGEN_DEFAULT_IO_FORMAT IOFormat(4, 0, " ", "\n", "", "", "", "")
#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__)
#define EIGEN_EXCEPTIONS
#endif
#ifndef EIGEN_NO_ASSERTION_CHECKING
namespace Eigen
{
static const bool should_raise_an_assert = false;
// Used to avoid to raise two exceptions at a time in which
// case the exception is not properly caught.
// This may happen when a second exceptions is triggered in a destructor.
static bool no_more_assert = false;
static bool report_on_cerr_on_assert_failure = true;
struct eigen_assert_exception
{
eigen_assert_exception(void) {}
~eigen_assert_exception() { Eigen::no_more_assert = false; }
};
struct eigen_static_assert_exception
{
eigen_static_assert_exception(void) {}
~eigen_static_assert_exception() { Eigen::no_more_assert = false; }
};
}
// If EIGEN_DEBUG_ASSERTS is defined and if no assertion is triggered while
// one should have been, then the list of excecuted assertions is printed out.
//
// EIGEN_DEBUG_ASSERTS is not enabled by default as it
// significantly increases the compilation time
// and might even introduce side effects that would hide
// some memory errors.
#ifdef EIGEN_DEBUG_ASSERTS
namespace Eigen
{
namespace internal
{
static bool push_assert = false;
}
static std::vector<std::string> eigen_assert_list;
}
#define eigen_assert(a) \
if( (!(a)) && (!no_more_assert) ) \
{ \
if(report_on_cerr_on_assert_failure) \
std::cerr << #a << " " __FILE__ << "(" << __LINE__ << ")\n"; \
Eigen::no_more_assert = true; \
EIGEN_THROW_X(Eigen::eigen_assert_exception()); \
} \
else if (Eigen::internal::push_assert) \
{ \
eigen_assert_list.push_back(std::string(EI_PP_MAKE_STRING(__FILE__) " (" EI_PP_MAKE_STRING(__LINE__) ") : " #a) ); \
}
#ifdef EIGEN_EXCEPTIONS
#define VERIFY_RAISES_ASSERT(a) \
{ \
Eigen::no_more_assert = false; \
Eigen::eigen_assert_list.clear(); \
Eigen::internal::push_assert = true; \
Eigen::report_on_cerr_on_assert_failure = false; \
try { \
a; \
std::cerr << "One of the following asserts should have been triggered:\n"; \
for (uint ai=0 ; ai<eigen_assert_list.size() ; ++ai) \
std::cerr << " " << eigen_assert_list[ai] << "\n"; \
VERIFY(Eigen::should_raise_an_assert && # a); \
} catch (Eigen::eigen_assert_exception) { \
Eigen::internal::push_assert = false; VERIFY(true); \
} \
Eigen::report_on_cerr_on_assert_failure = true; \
Eigen::internal::push_assert = false; \
}
#endif //EIGEN_EXCEPTIONS
#elif !defined(__CUDACC__) // EIGEN_DEBUG_ASSERTS
// see bug 89. The copy_bool here is working around a bug in gcc <= 4.3
#define eigen_assert(a) \
if( (!Eigen::internal::copy_bool(a)) && (!no_more_assert) )\
{ \
Eigen::no_more_assert = true; \
if(report_on_cerr_on_assert_failure) \
eigen_plain_assert(a); \
else \
EIGEN_THROW_X(Eigen::eigen_assert_exception()); \
}
#ifdef EIGEN_EXCEPTIONS
#define VERIFY_RAISES_ASSERT(a) { \
Eigen::no_more_assert = false; \
Eigen::report_on_cerr_on_assert_failure = false; \
try { \
a; \
VERIFY(Eigen::should_raise_an_assert && # a); \
} \
catch (Eigen::eigen_assert_exception&) { VERIFY(true); } \
Eigen::report_on_cerr_on_assert_failure = true; \
}
#endif // EIGEN_EXCEPTIONS
#endif // EIGEN_DEBUG_ASSERTS
#if defined(TEST_CHECK_STATIC_ASSERTIONS) && defined(EIGEN_EXCEPTIONS)
#define EIGEN_STATIC_ASSERT(a,MSG) \
if( (!Eigen::internal::copy_bool(a)) && (!no_more_assert) )\
{ \
Eigen::no_more_assert = true; \
if(report_on_cerr_on_assert_failure) \
eigen_plain_assert((a) && #MSG); \
else \
EIGEN_THROW_X(Eigen::eigen_static_assert_exception()); \
}
#define VERIFY_RAISES_STATIC_ASSERT(a) { \
Eigen::no_more_assert = false; \
Eigen::report_on_cerr_on_assert_failure = false; \
try { \
a; \
VERIFY(Eigen::should_raise_an_assert && # a); \
} \
catch (Eigen::eigen_static_assert_exception&) { VERIFY(true); } \
Eigen::report_on_cerr_on_assert_failure = true; \
}
#endif // TEST_CHECK_STATIC_ASSERTIONS
#ifndef VERIFY_RAISES_ASSERT
#define VERIFY_RAISES_ASSERT(a) \
std::cout << "Can't VERIFY_RAISES_ASSERT( " #a " ) with exceptions disabled\n";
#endif
#ifndef VERIFY_RAISES_STATIC_ASSERT
#define VERIFY_RAISES_STATIC_ASSERT(a) \
std::cout << "Can't VERIFY_RAISES_STATIC_ASSERT( " #a " ) with exceptions disabled\n";
#endif
#if !defined(__CUDACC__)
#define EIGEN_USE_CUSTOM_ASSERT
#endif
#else // EIGEN_NO_ASSERTION_CHECKING
#define VERIFY_RAISES_ASSERT(a) {}
#define VERIFY_RAISES_STATIC_ASSERT(a) {}
#endif // EIGEN_NO_ASSERTION_CHECKING
#define EIGEN_INTERNAL_DEBUGGING
#include <Eigen/QR> // required for createRandomPIMatrixOfRank
inline void verify_impl(bool condition, const char *testname, const char *file, int line, const char *condition_as_string)
{
if (!condition)
{
if(Eigen::g_test_level>0)
std::cerr << "WARNING: ";
std::cerr << "Test " << testname << " failed in " << file << " (" << line << ")"
<< std::endl << " " << condition_as_string << std::endl;
std::cerr << "Stack:\n";
const int test_stack_size = static_cast<int>(Eigen::g_test_stack.size());
for(int i=test_stack_size-1; i>=0; --i)
std::cerr << " - " << Eigen::g_test_stack[i] << "\n";
std::cerr << "\n";
if(Eigen::g_test_level==0)
abort();
}
}
#define VERIFY(a) ::verify_impl(a, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a))
#define VERIFY_GE(a, b) ::verify_impl(a >= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a >= b))
#define VERIFY_LE(a, b) ::verify_impl(a <= b, g_test_stack.back().c_str(), __FILE__, __LINE__, EI_PP_MAKE_STRING(a <= b))
#define VERIFY_IS_EQUAL(a, b) VERIFY(test_is_equal(a, b, true))
#define VERIFY_IS_NOT_EQUAL(a, b) VERIFY(test_is_equal(a, b, false))
#define VERIFY_IS_APPROX(a, b) VERIFY(verifyIsApprox(a, b))
#define VERIFY_IS_NOT_APPROX(a, b) VERIFY(!test_isApprox(a, b))
#define VERIFY_IS_MUCH_SMALLER_THAN(a, b) VERIFY(test_isMuchSmallerThan(a, b))
#define VERIFY_IS_NOT_MUCH_SMALLER_THAN(a, b) VERIFY(!test_isMuchSmallerThan(a, b))
#define VERIFY_IS_APPROX_OR_LESS_THAN(a, b) VERIFY(test_isApproxOrLessThan(a, b))
#define VERIFY_IS_NOT_APPROX_OR_LESS_THAN(a, b) VERIFY(!test_isApproxOrLessThan(a, b))
#define VERIFY_IS_UNITARY(a) VERIFY(test_isUnitary(a))
#define CALL_SUBTEST(FUNC) do { \
g_test_stack.push_back(EI_PP_MAKE_STRING(FUNC)); \
FUNC; \
g_test_stack.pop_back(); \
} while (0)
namespace Eigen {
template<typename T> inline typename NumTraits<T>::Real test_precision() { return NumTraits<T>::dummy_precision(); }
template<> inline float test_precision<float>() { return 1e-3f; }
template<> inline double test_precision<double>() { return 1e-6; }
template<> inline long double test_precision<long double>() { return 1e-6l; }
template<> inline float test_precision<std::complex<float> >() { return test_precision<float>(); }
template<> inline double test_precision<std::complex<double> >() { return test_precision<double>(); }
template<> inline long double test_precision<std::complex<long double> >() { return test_precision<long double>(); }
inline bool test_isApprox(const short& a, const short& b)
{ return internal::isApprox(a, b, test_precision<short>()); }
inline bool test_isApprox(const unsigned short& a, const unsigned short& b)
{ return internal::isApprox(a, b, test_precision<unsigned short>()); }
inline bool test_isApprox(const unsigned int& a, const unsigned int& b)
{ return internal::isApprox(a, b, test_precision<unsigned int>()); }
inline bool test_isApprox(const long& a, const long& b)
{ return internal::isApprox(a, b, test_precision<long>()); }
inline bool test_isApprox(const unsigned long& a, const unsigned long& b)
{ return internal::isApprox(a, b, test_precision<unsigned long>()); }
inline bool test_isApprox(const int& a, const int& b)
{ return internal::isApprox(a, b, test_precision<int>()); }
inline bool test_isMuchSmallerThan(const int& a, const int& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<int>()); }
inline bool test_isApproxOrLessThan(const int& a, const int& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<int>()); }
inline bool test_isApprox(const float& a, const float& b)
{ return internal::isApprox(a, b, test_precision<float>()); }
inline bool test_isMuchSmallerThan(const float& a, const float& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<float>()); }
inline bool test_isApproxOrLessThan(const float& a, const float& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<float>()); }
inline bool test_isApprox(const double& a, const double& b)
{ return internal::isApprox(a, b, test_precision<double>()); }
inline bool test_isMuchSmallerThan(const double& a, const double& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<double>()); }
inline bool test_isApproxOrLessThan(const double& a, const double& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<double>()); }
#ifndef EIGEN_TEST_NO_COMPLEX
inline bool test_isApprox(const std::complex<float>& a, const std::complex<float>& b)
{ return internal::isApprox(a, b, test_precision<std::complex<float> >()); }
inline bool test_isMuchSmallerThan(const std::complex<float>& a, const std::complex<float>& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<std::complex<float> >()); }
inline bool test_isApprox(const std::complex<double>& a, const std::complex<double>& b)
{ return internal::isApprox(a, b, test_precision<std::complex<double> >()); }
inline bool test_isMuchSmallerThan(const std::complex<double>& a, const std::complex<double>& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<std::complex<double> >()); }
#ifndef EIGEN_TEST_NO_LONGDOUBLE
inline bool test_isApprox(const std::complex<long double>& a, const std::complex<long double>& b)
{ return internal::isApprox(a, b, test_precision<std::complex<long double> >()); }
inline bool test_isMuchSmallerThan(const std::complex<long double>& a, const std::complex<long double>& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<std::complex<long double> >()); }
#endif
#endif
#ifndef EIGEN_TEST_NO_LONGDOUBLE
inline bool test_isApprox(const long double& a, const long double& b)
{
bool ret = internal::isApprox(a, b, test_precision<long double>());
if (!ret) std::cerr
<< std::endl << " actual = " << a
<< std::endl << " expected = " << b << std::endl << std::endl;
return ret;
}
inline bool test_isMuchSmallerThan(const long double& a, const long double& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<long double>()); }
inline bool test_isApproxOrLessThan(const long double& a, const long double& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<long double>()); }
#endif // EIGEN_TEST_NO_LONGDOUBLE
inline bool test_isApprox(const half& a, const half& b)
{ return internal::isApprox(a, b, test_precision<half>()); }
inline bool test_isMuchSmallerThan(const half& a, const half& b)
{ return internal::isMuchSmallerThan(a, b, test_precision<half>()); }
inline bool test_isApproxOrLessThan(const half& a, const half& b)
{ return internal::isApproxOrLessThan(a, b, test_precision<half>()); }
// test_relative_error returns the relative difference between a and b as a real scalar as used in isApprox.
template<typename T1,typename T2>
typename NumTraits<typename T1::RealScalar>::NonInteger test_relative_error(const EigenBase<T1> &a, const EigenBase<T2> &b)
{
using std::sqrt;
typedef typename NumTraits<typename T1::RealScalar>::NonInteger RealScalar;
typename internal::nested_eval<T1,2>::type ea(a.derived());
typename internal::nested_eval<T2,2>::type eb(b.derived());
return sqrt(RealScalar((ea-eb).cwiseAbs2().sum()) / RealScalar((std::min)(eb.cwiseAbs2().sum(),ea.cwiseAbs2().sum())));
}
template<typename T1,typename T2>
typename T1::RealScalar test_relative_error(const T1 &a, const T2 &b, const typename T1::Coefficients* = 0)
{
return test_relative_error(a.coeffs(), b.coeffs());
}
template<typename T1,typename T2>
typename T1::Scalar test_relative_error(const T1 &a, const T2 &b, const typename T1::MatrixType* = 0)
{
return test_relative_error(a.matrix(), b.matrix());
}
template<typename S, int D>
S test_relative_error(const Translation<S,D> &a, const Translation<S,D> &b)
{
return test_relative_error(a.vector(), b.vector());
}
template <typename S, int D, int O>
S test_relative_error(const ParametrizedLine<S,D,O> &a, const ParametrizedLine<S,D,O> &b)
{
return (std::max)(test_relative_error(a.origin(), b.origin()), test_relative_error(a.origin(), b.origin()));
}
template <typename S, int D>
S test_relative_error(const AlignedBox<S,D> &a, const AlignedBox<S,D> &b)
{
return (std::max)(test_relative_error((a.min)(), (b.min)()), test_relative_error((a.max)(), (b.max)()));
}
template<typename Derived> class SparseMatrixBase;
template<typename T1,typename T2>
typename T1::RealScalar test_relative_error(const MatrixBase<T1> &a, const SparseMatrixBase<T2> &b)
{
return test_relative_error(a,b.toDense());
}
template<typename Derived> class SparseMatrixBase;
template<typename T1,typename T2>
typename T1::RealScalar test_relative_error(const SparseMatrixBase<T1> &a, const MatrixBase<T2> &b)
{
return test_relative_error(a.toDense(),b);
}
template<typename Derived> class SparseMatrixBase;
template<typename T1,typename T2>
typename T1::RealScalar test_relative_error(const SparseMatrixBase<T1> &a, const SparseMatrixBase<T2> &b)
{
return test_relative_error(a.toDense(),b.toDense());
}
template<typename T1,typename T2>
typename NumTraits<typename NumTraits<T1>::Real>::NonInteger test_relative_error(const T1 &a, const T2 &b, typename internal::enable_if<internal::is_arithmetic<typename NumTraits<T1>::Real>::value, T1>::type* = 0)
{
typedef typename NumTraits<typename NumTraits<T1>::Real>::NonInteger RealScalar;
return numext::sqrt(RealScalar(numext::abs2(a-b))/RealScalar((numext::mini)(numext::abs2(a),numext::abs2(b))));
}
template<typename T>
T test_relative_error(const Rotation2D<T> &a, const Rotation2D<T> &b)
{
return test_relative_error(a.angle(), b.angle());
}
template<typename T>
T test_relative_error(const AngleAxis<T> &a, const AngleAxis<T> &b)
{
return (std::max)(test_relative_error(a.angle(), b.angle()), test_relative_error(a.axis(), b.axis()));
}
template<typename Type1, typename Type2>
inline bool test_isApprox(const Type1& a, const Type2& b, typename Type1::Scalar* = 0) // Enabled for Eigen's type only
{
return a.isApprox(b, test_precision<typename Type1::Scalar>());
}
// get_test_precision is a small wrapper to test_precision allowing to return the scalar precision for either scalars or expressions
template<typename T>
typename NumTraits<typename T::Scalar>::Real get_test_precision(const T&, const typename T::Scalar* = 0)
{
return test_precision<typename NumTraits<typename T::Scalar>::Real>();
}
template<typename T>
typename NumTraits<T>::Real get_test_precision(const T&,typename internal::enable_if<internal::is_arithmetic<typename NumTraits<T>::Real>::value, T>::type* = 0)
{
return test_precision<typename NumTraits<T>::Real>();
}
// verifyIsApprox is a wrapper to test_isApprox that outputs the relative difference magnitude if the test fails.
template<typename Type1, typename Type2>
inline bool verifyIsApprox(const Type1& a, const Type2& b)
{
bool ret = test_isApprox(a,b);
if(!ret)
{
std::cerr << "Difference too large wrt tolerance " << get_test_precision(a) << ", relative error is: " << test_relative_error(a,b) << std::endl;
}
return ret;
}
// The idea behind this function is to compare the two scalars a and b where
// the scalar ref is a hint about the expected order of magnitude of a and b.
// WARNING: the scalar a and b must be positive
// Therefore, if for some reason a and b are very small compared to ref,
// we won't issue a false negative.
// This test could be: abs(a-b) <= eps * ref
// However, it seems that simply comparing a+ref and b+ref is more sensitive to true error.
template<typename Scalar,typename ScalarRef>
inline bool test_isApproxWithRef(const Scalar& a, const Scalar& b, const ScalarRef& ref)
{
return test_isApprox(a+ref, b+ref);
}
template<typename Derived1, typename Derived2>
inline bool test_isMuchSmallerThan(const MatrixBase<Derived1>& m1,
const MatrixBase<Derived2>& m2)
{
return m1.isMuchSmallerThan(m2, test_precision<typename internal::traits<Derived1>::Scalar>());
}
template<typename Derived>
inline bool test_isMuchSmallerThan(const MatrixBase<Derived>& m,
const typename NumTraits<typename internal::traits<Derived>::Scalar>::Real& s)
{
return m.isMuchSmallerThan(s, test_precision<typename internal::traits<Derived>::Scalar>());
}
template<typename Derived>
inline bool test_isUnitary(const MatrixBase<Derived>& m)
{
return m.isUnitary(test_precision<typename internal::traits<Derived>::Scalar>());
}
// Forward declaration to avoid ICC warning
template<typename T, typename U>
bool test_is_equal(const T& actual, const U& expected, bool expect_equal=true);
template<typename T, typename U>
bool test_is_equal(const T& actual, const U& expected, bool expect_equal)
{
if ((actual==expected) == expect_equal)
return true;
// false:
std::cerr
<< "\n actual = " << actual
<< "\n expected " << (expect_equal ? "= " : "!=") << expected << "\n\n";
return false;
}
/** Creates a random Partial Isometry matrix of given rank.
*
* A partial isometry is a matrix all of whose singular values are either 0 or 1.
* This is very useful to test rank-revealing algorithms.
*/
// Forward declaration to avoid ICC warning
template<typename MatrixType>
void createRandomPIMatrixOfRank(Index desired_rank, Index rows, Index cols, MatrixType& m);
template<typename MatrixType>
void createRandomPIMatrixOfRank(Index desired_rank, Index rows, Index cols, MatrixType& m)
{
typedef typename internal::traits<MatrixType>::Scalar Scalar;
enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime };
typedef Matrix<Scalar, Dynamic, 1> VectorType;
typedef Matrix<Scalar, Rows, Rows> MatrixAType;
typedef Matrix<Scalar, Cols, Cols> MatrixBType;
if(desired_rank == 0)
{
m.setZero(rows,cols);
return;
}
if(desired_rank == 1)
{
// here we normalize the vectors to get a partial isometry
m = VectorType::Random(rows).normalized() * VectorType::Random(cols).normalized().transpose();
return;
}
MatrixAType a = MatrixAType::Random(rows,rows);
MatrixType d = MatrixType::Identity(rows,cols);
MatrixBType b = MatrixBType::Random(cols,cols);
// set the diagonal such that only desired_rank non-zero entries reamain
const Index diag_size = (std::min)(d.rows(),d.cols());
if(diag_size != desired_rank)
d.diagonal().segment(desired_rank, diag_size-desired_rank) = VectorType::Zero(diag_size-desired_rank);
HouseholderQR<MatrixAType> qra(a);
HouseholderQR<MatrixBType> qrb(b);
m = qra.householderQ() * d * qrb.householderQ();
}
// Forward declaration to avoid ICC warning
template<typename PermutationVectorType>
void randomPermutationVector(PermutationVectorType& v, Index size);
template<typename PermutationVectorType>
void randomPermutationVector(PermutationVectorType& v, Index size)
{
typedef typename PermutationVectorType::Scalar Scalar;
v.resize(size);
for(Index i = 0; i < size; ++i) v(i) = Scalar(i);
if(size == 1) return;
for(Index n = 0; n < 3 * size; ++n)
{
Index i = internal::random<Index>(0, size-1);
Index j;
do j = internal::random<Index>(0, size-1); while(j==i);
std::swap(v(i), v(j));
}
}
template<typename T> bool isNotNaN(const T& x)
{
return x==x;
}
template<typename T> bool isPlusInf(const T& x)
{
return x > NumTraits<T>::highest();
}
template<typename T> bool isMinusInf(const T& x)
{
return x < NumTraits<T>::lowest();
}
} // end namespace Eigen
template<typename T> struct GetDifferentType;
template<> struct GetDifferentType<float> { typedef double type; };
template<> struct GetDifferentType<double> { typedef float type; };
template<typename T> struct GetDifferentType<std::complex<T> >
{ typedef std::complex<typename GetDifferentType<T>::type> type; };
// Forward declaration to avoid ICC warning
template<typename T> std::string type_name();
template<typename T> std::string type_name() { return "other"; }
template<> std::string type_name<float>() { return "float"; }
template<> std::string type_name<double>() { return "double"; }
template<> std::string type_name<long double>() { return "long double"; }
template<> std::string type_name<int>() { return "int"; }
template<> std::string type_name<std::complex<float> >() { return "complex<float>"; }
template<> std::string type_name<std::complex<double> >() { return "complex<double>"; }
template<> std::string type_name<std::complex<long double> >() { return "complex<long double>"; }
template<> std::string type_name<std::complex<int> >() { return "complex<int>"; }
// forward declaration of the main test function
void EIGEN_CAT(test_,EIGEN_TEST_FUNC)();
using namespace Eigen;
inline void set_repeat_from_string(const char *str)
{
errno = 0;
g_repeat = int(strtoul(str, 0, 10));
if(errno || g_repeat <= 0)
{
std::cout << "Invalid repeat value " << str << std::endl;
exit(EXIT_FAILURE);
}
g_has_set_repeat = true;
}
inline void set_seed_from_string(const char *str)
{
errno = 0;
g_seed = int(strtoul(str, 0, 10));
if(errno || g_seed == 0)
{
std::cout << "Invalid seed value " << str << std::endl;
exit(EXIT_FAILURE);
}
g_has_set_seed = true;
}
int main(int argc, char *argv[])
{
g_has_set_repeat = false;
g_has_set_seed = false;
bool need_help = false;
for(int i = 1; i < argc; i++)
{
if(argv[i][0] == 'r')
{
if(g_has_set_repeat)
{
std::cout << "Argument " << argv[i] << " conflicting with a former argument" << std::endl;
return 1;
}
set_repeat_from_string(argv[i]+1);
}
else if(argv[i][0] == 's')
{
if(g_has_set_seed)
{
std::cout << "Argument " << argv[i] << " conflicting with a former argument" << std::endl;
return 1;
}
set_seed_from_string(argv[i]+1);
}
else
{
need_help = true;
}
}
if(need_help)
{
std::cout << "This test application takes the following optional arguments:" << std::endl;
std::cout << " rN Repeat each test N times (default: " << DEFAULT_REPEAT << ")" << std::endl;
std::cout << " sN Use N as seed for random numbers (default: based on current time)" << std::endl;
std::cout << std::endl;
std::cout << "If defined, the environment variables EIGEN_REPEAT and EIGEN_SEED" << std::endl;
std::cout << "will be used as default values for these parameters." << std::endl;
return 1;
}
char *env_EIGEN_REPEAT = getenv("EIGEN_REPEAT");
if(!g_has_set_repeat && env_EIGEN_REPEAT)
set_repeat_from_string(env_EIGEN_REPEAT);
char *env_EIGEN_SEED = getenv("EIGEN_SEED");
if(!g_has_set_seed && env_EIGEN_SEED)
set_seed_from_string(env_EIGEN_SEED);
if(!g_has_set_seed) g_seed = (unsigned int) time(NULL);
if(!g_has_set_repeat) g_repeat = DEFAULT_REPEAT;
std::cout << "Initializing random number generator with seed " << g_seed << std::endl;
std::stringstream ss;
ss << "Seed: " << g_seed;
g_test_stack.push_back(ss.str());
srand(g_seed);
std::cout << "Repeating each test " << g_repeat << " times" << std::endl;
Eigen::g_test_stack.push_back(std::string(EI_PP_MAKE_STRING(EIGEN_TEST_FUNC)));
EIGEN_CAT(test_,EIGEN_TEST_FUNC)();
return 0;
}
// These warning are disabled here such that they are still ON when parsing Eigen's header files.
#if defined __INTEL_COMPILER
// remark #383: value copied to temporary, reference to temporary used
// -> this warning is raised even for legal usage as: g_test_stack.push_back("foo"); where g_test_stack is a std::vector<std::string>
// remark #1418: external function definition with no prior declaration
// -> this warning is raised for all our test functions. Declaring them static would fix the issue.
// warning #279: controlling expression is constant
// remark #1572: floating-point equality and inequality comparisons are unreliable
#pragma warning disable 279 383 1418 1572
#endif
#ifdef _MSC_VER
// 4503 - decorated name length exceeded, name was truncated
#pragma warning( disable : 4503)
#endif
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/jacobi.cpp
|
.cpp
| 2,725
| 81
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/SVD>
template<typename MatrixType, typename JacobiScalar>
void jacobi(const MatrixType& m = MatrixType())
{
Index rows = m.rows();
Index cols = m.cols();
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime
};
typedef Matrix<JacobiScalar, 2, 1> JacobiVector;
const MatrixType a(MatrixType::Random(rows, cols));
JacobiVector v = JacobiVector::Random().normalized();
JacobiScalar c = v.x(), s = v.y();
JacobiRotation<JacobiScalar> rot(c, s);
{
Index p = internal::random<Index>(0, rows-1);
Index q;
do {
q = internal::random<Index>(0, rows-1);
} while (q == p);
MatrixType b = a;
b.applyOnTheLeft(p, q, rot);
VERIFY_IS_APPROX(b.row(p), c * a.row(p) + numext::conj(s) * a.row(q));
VERIFY_IS_APPROX(b.row(q), -s * a.row(p) + numext::conj(c) * a.row(q));
}
{
Index p = internal::random<Index>(0, cols-1);
Index q;
do {
q = internal::random<Index>(0, cols-1);
} while (q == p);
MatrixType b = a;
b.applyOnTheRight(p, q, rot);
VERIFY_IS_APPROX(b.col(p), c * a.col(p) - s * a.col(q));
VERIFY_IS_APPROX(b.col(q), numext::conj(s) * a.col(p) + numext::conj(c) * a.col(q));
}
}
void test_jacobi()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1(( jacobi<Matrix3f, float>() ));
CALL_SUBTEST_2(( jacobi<Matrix4d, double>() ));
CALL_SUBTEST_3(( jacobi<Matrix4cf, float>() ));
CALL_SUBTEST_3(( jacobi<Matrix4cf, std::complex<float> >() ));
int r = internal::random<int>(2, internal::random<int>(1,EIGEN_TEST_MAX_SIZE)/2),
c = internal::random<int>(2, internal::random<int>(1,EIGEN_TEST_MAX_SIZE)/2);
CALL_SUBTEST_4(( jacobi<MatrixXf, float>(MatrixXf(r,c)) ));
CALL_SUBTEST_5(( jacobi<MatrixXcd, double>(MatrixXcd(r,c)) ));
CALL_SUBTEST_5(( jacobi<MatrixXcd, std::complex<double> >(MatrixXcd(r,c)) ));
// complex<float> is really important to test as it is the only way to cover conjugation issues in certain unaligned paths
CALL_SUBTEST_6(( jacobi<MatrixXcf, float>(MatrixXcf(r,c)) ));
CALL_SUBTEST_6(( jacobi<MatrixXcf, std::complex<float> >(MatrixXcf(r,c)) ));
TEST_SET_BUT_UNUSED_VARIABLE(r);
TEST_SET_BUT_UNUSED_VARIABLE(c);
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/inverse.cpp
|
.cpp
| 4,205
| 136
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/LU>
template<typename MatrixType> void inverse(const MatrixType& m)
{
using std::abs;
/* this test covers the following files:
Inverse.h
*/
Index rows = m.rows();
Index cols = m.cols();
typedef typename MatrixType::Scalar Scalar;
MatrixType m1(rows, cols),
m2(rows, cols),
identity = MatrixType::Identity(rows, rows);
createRandomPIMatrixOfRank(rows,rows,rows,m1);
m2 = m1.inverse();
VERIFY_IS_APPROX(m1, m2.inverse() );
VERIFY_IS_APPROX((Scalar(2)*m2).inverse(), m2.inverse()*Scalar(0.5));
VERIFY_IS_APPROX(identity, m1.inverse() * m1 );
VERIFY_IS_APPROX(identity, m1 * m1.inverse() );
VERIFY_IS_APPROX(m1, m1.inverse().inverse() );
// since for the general case we implement separately row-major and col-major, test that
VERIFY_IS_APPROX(MatrixType(m1.transpose().inverse()), MatrixType(m1.inverse().transpose()));
#if !defined(EIGEN_TEST_PART_5) && !defined(EIGEN_TEST_PART_6)
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
//computeInverseAndDetWithCheck tests
//First: an invertible matrix
bool invertible;
Scalar det;
m2.setZero();
m1.computeInverseAndDetWithCheck(m2, det, invertible);
VERIFY(invertible);
VERIFY_IS_APPROX(identity, m1*m2);
VERIFY_IS_APPROX(det, m1.determinant());
m2.setZero();
m1.computeInverseWithCheck(m2, invertible);
VERIFY(invertible);
VERIFY_IS_APPROX(identity, m1*m2);
//Second: a rank one matrix (not invertible, except for 1x1 matrices)
VectorType v3 = VectorType::Random(rows);
MatrixType m3 = v3*v3.transpose(), m4(rows,cols);
m3.computeInverseAndDetWithCheck(m4, det, invertible);
VERIFY( rows==1 ? invertible : !invertible );
VERIFY_IS_MUCH_SMALLER_THAN(abs(det-m3.determinant()), RealScalar(1));
m3.computeInverseWithCheck(m4, invertible);
VERIFY( rows==1 ? invertible : !invertible );
// check with submatrices
{
Matrix<Scalar, MatrixType::RowsAtCompileTime+1, MatrixType::RowsAtCompileTime+1, MatrixType::Options> m5;
m5.setRandom();
m5.topLeftCorner(rows,rows) = m1;
m2 = m5.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>().inverse();
VERIFY_IS_APPROX( (m5.template topLeftCorner<MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime>()), m2.inverse() );
}
#endif
// check in-place inversion
if(MatrixType::RowsAtCompileTime>=2 && MatrixType::RowsAtCompileTime<=4)
{
// in-place is forbidden
VERIFY_RAISES_ASSERT(m1 = m1.inverse());
}
else
{
m2 = m1.inverse();
m1 = m1.inverse();
VERIFY_IS_APPROX(m1,m2);
}
}
template<typename Scalar>
void inverse_zerosized()
{
Matrix<Scalar,Dynamic,Dynamic> A(0,0);
{
Matrix<Scalar,0,1> b, x;
x = A.inverse() * b;
}
{
Matrix<Scalar,Dynamic,Dynamic> b(0,1), x;
x = A.inverse() * b;
VERIFY_IS_EQUAL(x.rows(), 0);
VERIFY_IS_EQUAL(x.cols(), 1);
}
}
void test_inverse()
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( inverse(Matrix<double,1,1>()) );
CALL_SUBTEST_2( inverse(Matrix2d()) );
CALL_SUBTEST_3( inverse(Matrix3f()) );
CALL_SUBTEST_4( inverse(Matrix4f()) );
CALL_SUBTEST_4( inverse(Matrix<float,4,4,DontAlign>()) );
s = internal::random<int>(50,320);
CALL_SUBTEST_5( inverse(MatrixXf(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
CALL_SUBTEST_5( inverse_zerosized<float>() );
s = internal::random<int>(25,100);
CALL_SUBTEST_6( inverse(MatrixXcd(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
CALL_SUBTEST_7( inverse(Matrix4d()) );
CALL_SUBTEST_7( inverse(Matrix<double,4,4,DontAlign>()) );
CALL_SUBTEST_8( inverse(Matrix4cd()) );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/svd_fill.h
|
.h
| 4,050
| 119
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
template<typename T>
Array<T,4,1> four_denorms();
template<>
Array4f four_denorms() { return Array4f(5.60844e-39f, -5.60844e-39f, 4.94e-44f, -4.94e-44f); }
template<>
Array4d four_denorms() { return Array4d(5.60844e-313, -5.60844e-313, 4.94e-324, -4.94e-324); }
template<typename T>
Array<T,4,1> four_denorms() { return four_denorms<double>().cast<T>(); }
template<typename MatrixType>
void svd_fill_random(MatrixType &m, int Option = 0)
{
using std::pow;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
Index diagSize = (std::min)(m.rows(), m.cols());
RealScalar s = std::numeric_limits<RealScalar>::max_exponent10/4;
s = internal::random<RealScalar>(1,s);
Matrix<RealScalar,Dynamic,1> d = Matrix<RealScalar,Dynamic,1>::Random(diagSize);
for(Index k=0; k<diagSize; ++k)
d(k) = d(k)*pow(RealScalar(10),internal::random<RealScalar>(-s,s));
bool dup = internal::random<int>(0,10) < 3;
bool unit_uv = internal::random<int>(0,10) < (dup?7:3); // if we duplicate some diagonal entries, then increase the chance to preserve them using unitary U and V factors
// duplicate some singular values
if(dup)
{
Index n = internal::random<Index>(0,d.size()-1);
for(Index i=0; i<n; ++i)
d(internal::random<Index>(0,d.size()-1)) = d(internal::random<Index>(0,d.size()-1));
}
Matrix<Scalar,Dynamic,Dynamic> U(m.rows(),diagSize);
Matrix<Scalar,Dynamic,Dynamic> VT(diagSize,m.cols());
if(unit_uv)
{
// in very rare cases let's try with a pure diagonal matrix
if(internal::random<int>(0,10) < 1)
{
U.setIdentity();
VT.setIdentity();
}
else
{
createRandomPIMatrixOfRank(diagSize,U.rows(), U.cols(), U);
createRandomPIMatrixOfRank(diagSize,VT.rows(), VT.cols(), VT);
}
}
else
{
U.setRandom();
VT.setRandom();
}
Matrix<Scalar,Dynamic,1> samples(9);
samples << 0, four_denorms<RealScalar>(),
-RealScalar(1)/NumTraits<RealScalar>::highest(), RealScalar(1)/NumTraits<RealScalar>::highest(), (std::numeric_limits<RealScalar>::min)(), pow((std::numeric_limits<RealScalar>::min)(),0.8);
if(Option==Symmetric)
{
m = U * d.asDiagonal() * U.transpose();
// randomly nullify some rows/columns
{
Index count = internal::random<Index>(-diagSize,diagSize);
for(Index k=0; k<count; ++k)
{
Index i = internal::random<Index>(0,diagSize-1);
m.row(i).setZero();
m.col(i).setZero();
}
if(count<0)
// (partly) cancel some coeffs
if(!(dup && unit_uv))
{
Index n = internal::random<Index>(0,m.size()-1);
for(Index k=0; k<n; ++k)
{
Index i = internal::random<Index>(0,m.rows()-1);
Index j = internal::random<Index>(0,m.cols()-1);
m(j,i) = m(i,j) = samples(internal::random<Index>(0,samples.size()-1));
if(NumTraits<Scalar>::IsComplex)
*(&numext::real_ref(m(j,i))+1) = *(&numext::real_ref(m(i,j))+1) = samples.real()(internal::random<Index>(0,samples.size()-1));
}
}
}
}
else
{
m = U * d.asDiagonal() * VT;
// (partly) cancel some coeffs
if(!(dup && unit_uv))
{
Index n = internal::random<Index>(0,m.size()-1);
for(Index k=0; k<n; ++k)
{
Index i = internal::random<Index>(0,m.rows()-1);
Index j = internal::random<Index>(0,m.cols()-1);
m(i,j) = samples(internal::random<Index>(0,samples.size()-1));
if(NumTraits<Scalar>::IsComplex)
*(&numext::real_ref(m(i,j))+1) = samples.real()(internal::random<Index>(0,samples.size()-1));
}
}
}
}
|
Unknown
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/nesting_ops.cpp
|
.cpp
| 4,369
| 108
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define TEST_ENABLE_TEMPORARY_TRACKING
#include "main.h"
template <int N, typename XprType>
void use_n_times(const XprType &xpr)
{
typename internal::nested_eval<XprType,N>::type mat(xpr);
typename XprType::PlainObject res(mat.rows(), mat.cols());
nb_temporaries--; // remove res
res.setZero();
for(int i=0; i<N; ++i)
res += mat;
}
template <int N, typename ReferenceType, typename XprType>
bool verify_eval_type(const XprType &, const ReferenceType&)
{
typedef typename internal::nested_eval<XprType,N>::type EvalType;
return internal::is_same<typename internal::remove_all<EvalType>::type, typename internal::remove_all<ReferenceType>::type>::value;
}
template <typename MatrixType> void run_nesting_ops_1(const MatrixType& _m)
{
typename internal::nested_eval<MatrixType,2>::type m(_m);
// Make really sure that we are in debug mode!
VERIFY_RAISES_ASSERT(eigen_assert(false));
// The only intention of these tests is to ensure that this code does
// not trigger any asserts or segmentation faults... more to come.
VERIFY_IS_APPROX( (m.transpose() * m).diagonal().sum(), (m.transpose() * m).diagonal().sum() );
VERIFY_IS_APPROX( (m.transpose() * m).diagonal().array().abs().sum(), (m.transpose() * m).diagonal().array().abs().sum() );
VERIFY_IS_APPROX( (m.transpose() * m).array().abs().sum(), (m.transpose() * m).array().abs().sum() );
}
template <typename MatrixType> void run_nesting_ops_2(const MatrixType& _m)
{
typedef typename MatrixType::Scalar Scalar;
Index rows = _m.rows();
Index cols = _m.cols();
MatrixType m1 = MatrixType::Random(rows,cols);
Matrix<Scalar,MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime,ColMajor> m2;
if((MatrixType::SizeAtCompileTime==Dynamic))
{
VERIFY_EVALUATION_COUNT( use_n_times<1>(m1 + m1*m1), 1 );
VERIFY_EVALUATION_COUNT( use_n_times<10>(m1 + m1*m1), 1 );
VERIFY_EVALUATION_COUNT( use_n_times<1>(m1.template triangularView<Lower>().solve(m1.col(0))), 1 );
VERIFY_EVALUATION_COUNT( use_n_times<10>(m1.template triangularView<Lower>().solve(m1.col(0))), 1 );
VERIFY_EVALUATION_COUNT( use_n_times<1>(Scalar(2)*m1.template triangularView<Lower>().solve(m1.col(0))), 2 ); // FIXME could be one by applying the scaling in-place on the solve result
VERIFY_EVALUATION_COUNT( use_n_times<1>(m1.col(0)+m1.template triangularView<Lower>().solve(m1.col(0))), 2 ); // FIXME could be one by adding m1.col() inplace
VERIFY_EVALUATION_COUNT( use_n_times<10>(m1.col(0)+m1.template triangularView<Lower>().solve(m1.col(0))), 2 );
}
{
VERIFY( verify_eval_type<10>(m1, m1) );
if(!NumTraits<Scalar>::IsComplex)
{
VERIFY( verify_eval_type<3>(2*m1, 2*m1) );
VERIFY( verify_eval_type<4>(2*m1, m1) );
}
else
{
VERIFY( verify_eval_type<2>(2*m1, 2*m1) );
VERIFY( verify_eval_type<3>(2*m1, m1) );
}
VERIFY( verify_eval_type<2>(m1+m1, m1+m1) );
VERIFY( verify_eval_type<3>(m1+m1, m1) );
VERIFY( verify_eval_type<1>(m1*m1.transpose(), m2) );
VERIFY( verify_eval_type<1>(m1*(m1+m1).transpose(), m2) );
VERIFY( verify_eval_type<2>(m1*m1.transpose(), m2) );
VERIFY( verify_eval_type<1>(m1+m1*m1, m1) );
VERIFY( verify_eval_type<1>(m1.template triangularView<Lower>().solve(m1), m1) );
VERIFY( verify_eval_type<1>(m1+m1.template triangularView<Lower>().solve(m1), m1) );
}
}
void test_nesting_ops()
{
CALL_SUBTEST_1(run_nesting_ops_1(MatrixXf::Random(25,25)));
CALL_SUBTEST_2(run_nesting_ops_1(MatrixXcd::Random(25,25)));
CALL_SUBTEST_3(run_nesting_ops_1(Matrix4f::Random()));
CALL_SUBTEST_4(run_nesting_ops_1(Matrix2d::Random()));
Index s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
CALL_SUBTEST_1( run_nesting_ops_2(MatrixXf(s,s)) );
CALL_SUBTEST_2( run_nesting_ops_2(MatrixXcd(s,s)) );
CALL_SUBTEST_3( run_nesting_ops_2(Matrix4f()) );
CALL_SUBTEST_4( run_nesting_ops_2(Matrix2d()) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/product_selfadjoint.cpp
|
.cpp
| 3,502
| 87
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<typename MatrixType> void product_selfadjoint(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, 1, MatrixType::RowsAtCompileTime> RowVectorType;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, Dynamic, RowMajor> RhsMatrixType;
Index rows = m.rows();
Index cols = m.cols();
MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols),
m3;
VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows),
v3(rows);
RowVectorType r1 = RowVectorType::Random(rows),
r2 = RowVectorType::Random(rows);
RhsMatrixType m4 = RhsMatrixType::Random(rows,10);
Scalar s1 = internal::random<Scalar>(),
s2 = internal::random<Scalar>(),
s3 = internal::random<Scalar>();
m1 = (m1.adjoint() + m1).eval();
// rank2 update
m2 = m1.template triangularView<Lower>();
m2.template selfadjointView<Lower>().rankUpdate(v1,v2);
VERIFY_IS_APPROX(m2, (m1 + v1 * v2.adjoint()+ v2 * v1.adjoint()).template triangularView<Lower>().toDenseMatrix());
m2 = m1.template triangularView<Upper>();
m2.template selfadjointView<Upper>().rankUpdate(-v1,s2*v2,s3);
VERIFY_IS_APPROX(m2, (m1 + (s3*(-v1)*(s2*v2).adjoint()+numext::conj(s3)*(s2*v2)*(-v1).adjoint())).template triangularView<Upper>().toDenseMatrix());
m2 = m1.template triangularView<Upper>();
m2.template selfadjointView<Upper>().rankUpdate(-s2*r1.adjoint(),r2.adjoint()*s3,s1);
VERIFY_IS_APPROX(m2, (m1 + s1*(-s2*r1.adjoint())*(r2.adjoint()*s3).adjoint() + numext::conj(s1)*(r2.adjoint()*s3) * (-s2*r1.adjoint()).adjoint()).template triangularView<Upper>().toDenseMatrix());
if (rows>1)
{
m2 = m1.template triangularView<Lower>();
m2.block(1,1,rows-1,cols-1).template selfadjointView<Lower>().rankUpdate(v1.tail(rows-1),v2.head(cols-1));
m3 = m1;
m3.block(1,1,rows-1,cols-1) += v1.tail(rows-1) * v2.head(cols-1).adjoint()+ v2.head(cols-1) * v1.tail(rows-1).adjoint();
VERIFY_IS_APPROX(m2, m3.template triangularView<Lower>().toDenseMatrix());
}
}
void test_product_selfadjoint()
{
int s = 0;
for(int i = 0; i < g_repeat ; i++) {
CALL_SUBTEST_1( product_selfadjoint(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( product_selfadjoint(Matrix<float, 2, 2>()) );
CALL_SUBTEST_3( product_selfadjoint(Matrix3d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2);
CALL_SUBTEST_4( product_selfadjoint(MatrixXcf(s, s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2);
CALL_SUBTEST_5( product_selfadjoint(MatrixXcd(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
CALL_SUBTEST_6( product_selfadjoint(MatrixXd(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
CALL_SUBTEST_7( product_selfadjoint(Matrix<float,Dynamic,Dynamic,RowMajor>(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/stdvector_overload.cpp
|
.cpp
| 5,048
| 162
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/StdVector>
#include <Eigen/Geometry>
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Vector4f)
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2f)
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix4f)
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix4d)
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Affine3f)
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Affine3d)
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Quaternionf)
EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Quaterniond)
template<typename MatrixType>
void check_stdvector_matrix(const MatrixType& m)
{
typename MatrixType::Index rows = m.rows();
typename MatrixType::Index cols = m.cols();
MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols);
std::vector<MatrixType> v(10, MatrixType::Zero(rows,cols)), w(20, y);
v[5] = x;
w[6] = v[5];
VERIFY_IS_APPROX(w[6], v[5]);
v = w;
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(w[i], v[i]);
}
v.resize(21);
v[20] = x;
VERIFY_IS_APPROX(v[20], x);
v.resize(22,y);
VERIFY_IS_APPROX(v[21], y);
v.push_back(x);
VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(MatrixType));
// do a lot of push_back such that the vector gets internally resized
// (with memory reallocation)
MatrixType* ref = &w[0];
for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
v.push_back(w[i%w.size()]);
for(unsigned int i=23; i<v.size(); ++i)
{
VERIFY(v[i]==w[(i-23)%w.size()]);
}
}
template<typename TransformType>
void check_stdvector_transform(const TransformType&)
{
typedef typename TransformType::MatrixType MatrixType;
TransformType x(MatrixType::Random()), y(MatrixType::Random());
std::vector<TransformType> v(10), w(20, y);
v[5] = x;
w[6] = v[5];
VERIFY_IS_APPROX(w[6], v[5]);
v = w;
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(w[i], v[i]);
}
v.resize(21);
v[20] = x;
VERIFY_IS_APPROX(v[20], x);
v.resize(22,y);
VERIFY_IS_APPROX(v[21], y);
v.push_back(x);
VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(TransformType));
// do a lot of push_back such that the vector gets internally resized
// (with memory reallocation)
TransformType* ref = &w[0];
for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
v.push_back(w[i%w.size()]);
for(unsigned int i=23; i<v.size(); ++i)
{
VERIFY(v[i].matrix()==w[(i-23)%w.size()].matrix());
}
}
template<typename QuaternionType>
void check_stdvector_quaternion(const QuaternionType&)
{
typedef typename QuaternionType::Coefficients Coefficients;
QuaternionType x(Coefficients::Random()), y(Coefficients::Random()), qi=QuaternionType::Identity();
std::vector<QuaternionType> v(10,qi), w(20, y);
v[5] = x;
w[6] = v[5];
VERIFY_IS_APPROX(w[6], v[5]);
v = w;
for(int i = 0; i < 20; i++)
{
VERIFY_IS_APPROX(w[i], v[i]);
}
v.resize(21);
v[20] = x;
VERIFY_IS_APPROX(v[20], x);
v.resize(22,y);
VERIFY_IS_APPROX(v[21], y);
v.push_back(x);
VERIFY_IS_APPROX(v[22], x);
VERIFY((internal::UIntPtr)&(v[22]) == (internal::UIntPtr)&(v[21]) + sizeof(QuaternionType));
// do a lot of push_back such that the vector gets internally resized
// (with memory reallocation)
QuaternionType* ref = &w[0];
for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i)
v.push_back(w[i%w.size()]);
for(unsigned int i=23; i<v.size(); ++i)
{
VERIFY(v[i].coeffs()==w[(i-23)%w.size()].coeffs());
}
}
void test_stdvector_overload()
{
// some non vectorizable fixed sizes
CALL_SUBTEST_1(check_stdvector_matrix(Vector2f()));
CALL_SUBTEST_1(check_stdvector_matrix(Matrix3f()));
CALL_SUBTEST_2(check_stdvector_matrix(Matrix3d()));
// some vectorizable fixed sizes
CALL_SUBTEST_1(check_stdvector_matrix(Matrix2f()));
CALL_SUBTEST_1(check_stdvector_matrix(Vector4f()));
CALL_SUBTEST_1(check_stdvector_matrix(Matrix4f()));
CALL_SUBTEST_2(check_stdvector_matrix(Matrix4d()));
// some dynamic sizes
CALL_SUBTEST_3(check_stdvector_matrix(MatrixXd(1,1)));
CALL_SUBTEST_3(check_stdvector_matrix(VectorXd(20)));
CALL_SUBTEST_3(check_stdvector_matrix(RowVectorXf(20)));
CALL_SUBTEST_3(check_stdvector_matrix(MatrixXcf(10,10)));
// some Transform
CALL_SUBTEST_4(check_stdvector_transform(Affine2f())); // does not need the specialization (2+1)^2 = 9
CALL_SUBTEST_4(check_stdvector_transform(Affine3f()));
CALL_SUBTEST_4(check_stdvector_transform(Affine3d()));
// some Quaternion
CALL_SUBTEST_5(check_stdvector_quaternion(Quaternionf()));
CALL_SUBTEST_5(check_stdvector_quaternion(Quaterniond()));
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/geo_eulerangles.cpp
|
.cpp
| 3,554
| 113
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/Geometry>
#include <Eigen/LU>
#include <Eigen/SVD>
template<typename Scalar>
void verify_euler(const Matrix<Scalar,3,1>& ea, int i, int j, int k)
{
typedef Matrix<Scalar,3,3> Matrix3;
typedef Matrix<Scalar,3,1> Vector3;
typedef AngleAxis<Scalar> AngleAxisx;
using std::abs;
Matrix3 m(AngleAxisx(ea[0], Vector3::Unit(i)) * AngleAxisx(ea[1], Vector3::Unit(j)) * AngleAxisx(ea[2], Vector3::Unit(k)));
Vector3 eabis = m.eulerAngles(i, j, k);
Matrix3 mbis(AngleAxisx(eabis[0], Vector3::Unit(i)) * AngleAxisx(eabis[1], Vector3::Unit(j)) * AngleAxisx(eabis[2], Vector3::Unit(k)));
VERIFY_IS_APPROX(m, mbis);
/* If I==K, and ea[1]==0, then there no unique solution. */
/* The remark apply in the case where I!=K, and |ea[1]| is close to pi/2. */
if( (i!=k || ea[1]!=0) && (i==k || !internal::isApprox(abs(ea[1]),Scalar(EIGEN_PI/2),test_precision<Scalar>())) )
VERIFY((ea-eabis).norm() <= test_precision<Scalar>());
// approx_or_less_than does not work for 0
VERIFY(0 < eabis[0] || test_isMuchSmallerThan(eabis[0], Scalar(1)));
VERIFY_IS_APPROX_OR_LESS_THAN(eabis[0], Scalar(EIGEN_PI));
VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(EIGEN_PI), eabis[1]);
VERIFY_IS_APPROX_OR_LESS_THAN(eabis[1], Scalar(EIGEN_PI));
VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(EIGEN_PI), eabis[2]);
VERIFY_IS_APPROX_OR_LESS_THAN(eabis[2], Scalar(EIGEN_PI));
}
template<typename Scalar> void check_all_var(const Matrix<Scalar,3,1>& ea)
{
verify_euler(ea, 0,1,2);
verify_euler(ea, 0,1,0);
verify_euler(ea, 0,2,1);
verify_euler(ea, 0,2,0);
verify_euler(ea, 1,2,0);
verify_euler(ea, 1,2,1);
verify_euler(ea, 1,0,2);
verify_euler(ea, 1,0,1);
verify_euler(ea, 2,0,1);
verify_euler(ea, 2,0,2);
verify_euler(ea, 2,1,0);
verify_euler(ea, 2,1,2);
}
template<typename Scalar> void eulerangles()
{
typedef Matrix<Scalar,3,3> Matrix3;
typedef Matrix<Scalar,3,1> Vector3;
typedef Array<Scalar,3,1> Array3;
typedef Quaternion<Scalar> Quaternionx;
typedef AngleAxis<Scalar> AngleAxisx;
Scalar a = internal::random<Scalar>(-Scalar(EIGEN_PI), Scalar(EIGEN_PI));
Quaternionx q1;
q1 = AngleAxisx(a, Vector3::Random().normalized());
Matrix3 m;
m = q1;
Vector3 ea = m.eulerAngles(0,1,2);
check_all_var(ea);
ea = m.eulerAngles(0,1,0);
check_all_var(ea);
// Check with purely random Quaternion:
q1.coeffs() = Quaternionx::Coefficients::Random().normalized();
m = q1;
ea = m.eulerAngles(0,1,2);
check_all_var(ea);
ea = m.eulerAngles(0,1,0);
check_all_var(ea);
// Check with random angles in range [0:pi]x[-pi:pi]x[-pi:pi].
ea = (Array3::Random() + Array3(1,0,0))*Scalar(EIGEN_PI)*Array3(0.5,1,1);
check_all_var(ea);
ea[2] = ea[0] = internal::random<Scalar>(0,Scalar(EIGEN_PI));
check_all_var(ea);
ea[0] = ea[1] = internal::random<Scalar>(0,Scalar(EIGEN_PI));
check_all_var(ea);
ea[1] = 0;
check_all_var(ea);
ea.head(2).setZero();
check_all_var(ea);
ea.setZero();
check_all_var(ea);
}
void test_geo_eulerangles()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( eulerangles<float>() );
CALL_SUBTEST_2( eulerangles<double>() );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/eigensolver_complex.cpp
|
.cpp
| 6,212
| 177
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <limits>
#include <Eigen/Eigenvalues>
#include <Eigen/LU>
template<typename MatrixType> bool find_pivot(typename MatrixType::Scalar tol, MatrixType &diffs, Index col=0)
{
bool match = diffs.diagonal().sum() <= tol;
if(match || col==diffs.cols())
{
return match;
}
else
{
Index n = diffs.cols();
std::vector<std::pair<Index,Index> > transpositions;
for(Index i=col; i<n; ++i)
{
Index best_index(0);
if(diffs.col(col).segment(col,n-i).minCoeff(&best_index) > tol)
break;
best_index += col;
diffs.row(col).swap(diffs.row(best_index));
if(find_pivot(tol,diffs,col+1)) return true;
diffs.row(col).swap(diffs.row(best_index));
// move current pivot to the end
diffs.row(n-(i-col)-1).swap(diffs.row(best_index));
transpositions.push_back(std::pair<Index,Index>(n-(i-col)-1,best_index));
}
// restore
for(Index k=transpositions.size()-1; k>=0; --k)
diffs.row(transpositions[k].first).swap(diffs.row(transpositions[k].second));
}
return false;
}
/* Check that two column vectors are approximately equal upto permutations.
* Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(),
* however this strategy is numerically inacurate because of numerical cancellation issues.
*/
template<typename VectorType>
void verify_is_approx_upto_permutation(const VectorType& vec1, const VectorType& vec2)
{
typedef typename VectorType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
VERIFY(vec1.cols() == 1);
VERIFY(vec2.cols() == 1);
VERIFY(vec1.rows() == vec2.rows());
Index n = vec1.rows();
RealScalar tol = test_precision<RealScalar>()*test_precision<RealScalar>()*numext::maxi(vec1.squaredNorm(),vec2.squaredNorm());
Matrix<RealScalar,Dynamic,Dynamic> diffs = (vec1.rowwise().replicate(n) - vec2.rowwise().replicate(n).transpose()).cwiseAbs2();
VERIFY( find_pivot(tol, diffs) );
}
template<typename MatrixType> void eigensolver(const MatrixType& m)
{
/* this test covers the following files:
ComplexEigenSolver.h, and indirectly ComplexSchur.h
*/
Index rows = m.rows();
Index cols = m.cols();
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
MatrixType a = MatrixType::Random(rows,cols);
MatrixType symmA = a.adjoint() * a;
ComplexEigenSolver<MatrixType> ei0(symmA);
VERIFY_IS_EQUAL(ei0.info(), Success);
VERIFY_IS_APPROX(symmA * ei0.eigenvectors(), ei0.eigenvectors() * ei0.eigenvalues().asDiagonal());
ComplexEigenSolver<MatrixType> ei1(a);
VERIFY_IS_EQUAL(ei1.info(), Success);
VERIFY_IS_APPROX(a * ei1.eigenvectors(), ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
// Note: If MatrixType is real then a.eigenvalues() uses EigenSolver and thus
// another algorithm so results may differ slightly
verify_is_approx_upto_permutation(a.eigenvalues(), ei1.eigenvalues());
ComplexEigenSolver<MatrixType> ei2;
ei2.setMaxIterations(ComplexSchur<MatrixType>::m_maxIterationsPerRow * rows).compute(a);
VERIFY_IS_EQUAL(ei2.info(), Success);
VERIFY_IS_EQUAL(ei2.eigenvectors(), ei1.eigenvectors());
VERIFY_IS_EQUAL(ei2.eigenvalues(), ei1.eigenvalues());
if (rows > 2) {
ei2.setMaxIterations(1).compute(a);
VERIFY_IS_EQUAL(ei2.info(), NoConvergence);
VERIFY_IS_EQUAL(ei2.getMaxIterations(), 1);
}
ComplexEigenSolver<MatrixType> eiNoEivecs(a, false);
VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
// Regression test for issue #66
MatrixType z = MatrixType::Zero(rows,cols);
ComplexEigenSolver<MatrixType> eiz(z);
VERIFY((eiz.eigenvalues().cwiseEqual(0)).all());
MatrixType id = MatrixType::Identity(rows, cols);
VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
if (rows > 1 && rows < 20)
{
// Test matrix with NaN
a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
ComplexEigenSolver<MatrixType> eiNaN(a);
VERIFY_IS_EQUAL(eiNaN.info(), NoConvergence);
}
// regression test for bug 1098
{
ComplexEigenSolver<MatrixType> eig(a.adjoint() * a);
eig.compute(a.adjoint() * a);
}
// regression test for bug 478
{
a.setZero();
ComplexEigenSolver<MatrixType> ei3(a);
VERIFY_IS_EQUAL(ei3.info(), Success);
VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1));
VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity());
}
}
template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
{
ComplexEigenSolver<MatrixType> eig;
VERIFY_RAISES_ASSERT(eig.eigenvectors());
VERIFY_RAISES_ASSERT(eig.eigenvalues());
MatrixType a = MatrixType::Random(m.rows(),m.cols());
eig.compute(a, false);
VERIFY_RAISES_ASSERT(eig.eigenvectors());
}
void test_eigensolver_complex()
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( eigensolver(Matrix4cf()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_2( eigensolver(MatrixXcd(s,s)) );
CALL_SUBTEST_3( eigensolver(Matrix<std::complex<float>, 1, 1>()) );
CALL_SUBTEST_4( eigensolver(Matrix3f()) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4cf()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXcd(s,s)) );
CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<std::complex<float>, 1, 1>()) );
CALL_SUBTEST_4( eigensolver_verify_assert(Matrix3f()) );
// Test problem size constructors
CALL_SUBTEST_5(ComplexEigenSolver<MatrixXf> tmp(s));
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/geo_hyperplane.cpp
|
.cpp
| 7,556
| 198
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/Geometry>
#include <Eigen/LU>
#include <Eigen/QR>
template<typename HyperplaneType> void hyperplane(const HyperplaneType& _plane)
{
/* this test covers the following files:
Hyperplane.h
*/
using std::abs;
const Index dim = _plane.dim();
enum { Options = HyperplaneType::Options };
typedef typename HyperplaneType::Scalar Scalar;
typedef typename HyperplaneType::RealScalar RealScalar;
typedef Matrix<Scalar, HyperplaneType::AmbientDimAtCompileTime, 1> VectorType;
typedef Matrix<Scalar, HyperplaneType::AmbientDimAtCompileTime,
HyperplaneType::AmbientDimAtCompileTime> MatrixType;
VectorType p0 = VectorType::Random(dim);
VectorType p1 = VectorType::Random(dim);
VectorType n0 = VectorType::Random(dim).normalized();
VectorType n1 = VectorType::Random(dim).normalized();
HyperplaneType pl0(n0, p0);
HyperplaneType pl1(n1, p1);
HyperplaneType pl2 = pl1;
Scalar s0 = internal::random<Scalar>();
Scalar s1 = internal::random<Scalar>();
VERIFY_IS_APPROX( n1.dot(n1), Scalar(1) );
VERIFY_IS_MUCH_SMALLER_THAN( pl0.absDistance(p0), Scalar(1) );
if(numext::abs2(s0)>RealScalar(1e-6))
VERIFY_IS_APPROX( pl1.signedDistance(p1 + n1 * s0), s0);
else
VERIFY_IS_MUCH_SMALLER_THAN( abs(pl1.signedDistance(p1 + n1 * s0) - s0), Scalar(1) );
VERIFY_IS_MUCH_SMALLER_THAN( pl1.signedDistance(pl1.projection(p0)), Scalar(1) );
VERIFY_IS_MUCH_SMALLER_THAN( pl1.absDistance(p1 + pl1.normal().unitOrthogonal() * s1), Scalar(1) );
// transform
if (!NumTraits<Scalar>::IsComplex)
{
MatrixType rot = MatrixType::Random(dim,dim).householderQr().householderQ();
DiagonalMatrix<Scalar,HyperplaneType::AmbientDimAtCompileTime> scaling(VectorType::Random());
Translation<Scalar,HyperplaneType::AmbientDimAtCompileTime> translation(VectorType::Random());
while(scaling.diagonal().cwiseAbs().minCoeff()<RealScalar(1e-4)) scaling.diagonal() = VectorType::Random();
pl2 = pl1;
VERIFY_IS_MUCH_SMALLER_THAN( pl2.transform(rot).absDistance(rot * p1), Scalar(1) );
pl2 = pl1;
VERIFY_IS_MUCH_SMALLER_THAN( pl2.transform(rot,Isometry).absDistance(rot * p1), Scalar(1) );
pl2 = pl1;
VERIFY_IS_MUCH_SMALLER_THAN( pl2.transform(rot*scaling).absDistance((rot*scaling) * p1), Scalar(1) );
VERIFY_IS_APPROX( pl2.normal().norm(), RealScalar(1) );
pl2 = pl1;
VERIFY_IS_MUCH_SMALLER_THAN( pl2.transform(rot*scaling*translation)
.absDistance((rot*scaling*translation) * p1), Scalar(1) );
VERIFY_IS_APPROX( pl2.normal().norm(), RealScalar(1) );
pl2 = pl1;
VERIFY_IS_MUCH_SMALLER_THAN( pl2.transform(rot*translation,Isometry)
.absDistance((rot*translation) * p1), Scalar(1) );
VERIFY_IS_APPROX( pl2.normal().norm(), RealScalar(1) );
}
// casting
const int Dim = HyperplaneType::AmbientDimAtCompileTime;
typedef typename GetDifferentType<Scalar>::type OtherScalar;
Hyperplane<OtherScalar,Dim,Options> hp1f = pl1.template cast<OtherScalar>();
VERIFY_IS_APPROX(hp1f.template cast<Scalar>(),pl1);
Hyperplane<Scalar,Dim,Options> hp1d = pl1.template cast<Scalar>();
VERIFY_IS_APPROX(hp1d.template cast<Scalar>(),pl1);
}
template<typename Scalar> void lines()
{
using std::abs;
typedef Hyperplane<Scalar, 2> HLine;
typedef ParametrizedLine<Scalar, 2> PLine;
typedef Matrix<Scalar,2,1> Vector;
typedef Matrix<Scalar,3,1> CoeffsType;
for(int i = 0; i < 10; i++)
{
Vector center = Vector::Random();
Vector u = Vector::Random();
Vector v = Vector::Random();
Scalar a = internal::random<Scalar>();
while (abs(a-1) < Scalar(1e-4)) a = internal::random<Scalar>();
while (u.norm() < Scalar(1e-4)) u = Vector::Random();
while (v.norm() < Scalar(1e-4)) v = Vector::Random();
HLine line_u = HLine::Through(center + u, center + a*u);
HLine line_v = HLine::Through(center + v, center + a*v);
// the line equations should be normalized so that a^2+b^2=1
VERIFY_IS_APPROX(line_u.normal().norm(), Scalar(1));
VERIFY_IS_APPROX(line_v.normal().norm(), Scalar(1));
Vector result = line_u.intersection(line_v);
// the lines should intersect at the point we called "center"
if(abs(a-1) > Scalar(1e-2) && abs(v.normalized().dot(u.normalized()))<Scalar(0.9))
VERIFY_IS_APPROX(result, center);
// check conversions between two types of lines
PLine pl(line_u); // gcc 3.3 will commit suicide if we don't name this variable
HLine line_u2(pl);
CoeffsType converted_coeffs = line_u2.coeffs();
if(line_u2.normal().dot(line_u.normal())<Scalar(0))
converted_coeffs = -line_u2.coeffs();
VERIFY(line_u.coeffs().isApprox(converted_coeffs));
}
}
template<typename Scalar> void planes()
{
using std::abs;
typedef Hyperplane<Scalar, 3> Plane;
typedef Matrix<Scalar,3,1> Vector;
for(int i = 0; i < 10; i++)
{
Vector v0 = Vector::Random();
Vector v1(v0), v2(v0);
if(internal::random<double>(0,1)>0.25)
v1 += Vector::Random();
if(internal::random<double>(0,1)>0.25)
v2 += v1 * std::pow(internal::random<Scalar>(0,1),internal::random<int>(1,16));
if(internal::random<double>(0,1)>0.25)
v2 += Vector::Random() * std::pow(internal::random<Scalar>(0,1),internal::random<int>(1,16));
Plane p0 = Plane::Through(v0, v1, v2);
VERIFY_IS_APPROX(p0.normal().norm(), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v0), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v1), Scalar(1));
VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v2), Scalar(1));
}
}
template<typename Scalar> void hyperplane_alignment()
{
typedef Hyperplane<Scalar,3,AutoAlign> Plane3a;
typedef Hyperplane<Scalar,3,DontAlign> Plane3u;
EIGEN_ALIGN_MAX Scalar array1[4];
EIGEN_ALIGN_MAX Scalar array2[4];
EIGEN_ALIGN_MAX Scalar array3[4+1];
Scalar* array3u = array3+1;
Plane3a *p1 = ::new(reinterpret_cast<void*>(array1)) Plane3a;
Plane3u *p2 = ::new(reinterpret_cast<void*>(array2)) Plane3u;
Plane3u *p3 = ::new(reinterpret_cast<void*>(array3u)) Plane3u;
p1->coeffs().setRandom();
*p2 = *p1;
*p3 = *p1;
VERIFY_IS_APPROX(p1->coeffs(), p2->coeffs());
VERIFY_IS_APPROX(p1->coeffs(), p3->coeffs());
#if defined(EIGEN_VECTORIZE) && EIGEN_MAX_STATIC_ALIGN_BYTES > 0
if(internal::packet_traits<Scalar>::Vectorizable && internal::packet_traits<Scalar>::size<=4)
VERIFY_RAISES_ASSERT((::new(reinterpret_cast<void*>(array3u)) Plane3a));
#endif
}
void test_geo_hyperplane()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( hyperplane(Hyperplane<float,2>()) );
CALL_SUBTEST_2( hyperplane(Hyperplane<float,3>()) );
CALL_SUBTEST_2( hyperplane(Hyperplane<float,3,DontAlign>()) );
CALL_SUBTEST_2( hyperplane_alignment<float>() );
CALL_SUBTEST_3( hyperplane(Hyperplane<double,4>()) );
CALL_SUBTEST_4( hyperplane(Hyperplane<std::complex<double>,5>()) );
CALL_SUBTEST_1( lines<float>() );
CALL_SUBTEST_3( lines<double>() );
CALL_SUBTEST_2( planes<float>() );
CALL_SUBTEST_5( planes<double>() );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/product_symm.cpp
|
.cpp
| 6,125
| 126
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<typename Scalar, int Size, int OtherSize> void symm(int size = Size, int othersize = OtherSize)
{
typedef Matrix<Scalar, Size, Size> MatrixType;
typedef Matrix<Scalar, Size, OtherSize> Rhs1;
typedef Matrix<Scalar, OtherSize, Size> Rhs2;
enum { order = OtherSize==1 ? 0 : RowMajor };
typedef Matrix<Scalar, Size, OtherSize,order> Rhs3;
Index rows = size;
Index cols = size;
MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols), m3;
m1 = (m1+m1.adjoint()).eval();
Rhs1 rhs1 = Rhs1::Random(cols, othersize), rhs12(cols, othersize), rhs13(cols, othersize);
Rhs2 rhs2 = Rhs2::Random(othersize, rows), rhs22(othersize, rows), rhs23(othersize, rows);
Rhs3 rhs3 = Rhs3::Random(cols, othersize), rhs32(cols, othersize), rhs33(cols, othersize);
Scalar s1 = internal::random<Scalar>(),
s2 = internal::random<Scalar>();
m2 = m1.template triangularView<Lower>();
m3 = m2.template selfadjointView<Lower>();
VERIFY_IS_EQUAL(m1, m3);
VERIFY_IS_APPROX(rhs12 = (s1*m2).template selfadjointView<Lower>() * (s2*rhs1),
rhs13 = (s1*m1) * (s2*rhs1));
VERIFY_IS_APPROX(rhs12 = (s1*m2).transpose().template selfadjointView<Upper>() * (s2*rhs1),
rhs13 = (s1*m1.transpose()) * (s2*rhs1));
VERIFY_IS_APPROX(rhs12 = (s1*m2).template selfadjointView<Lower>().transpose() * (s2*rhs1),
rhs13 = (s1*m1.transpose()) * (s2*rhs1));
VERIFY_IS_APPROX(rhs12 = (s1*m2).conjugate().template selfadjointView<Lower>() * (s2*rhs1),
rhs13 = (s1*m1).conjugate() * (s2*rhs1));
VERIFY_IS_APPROX(rhs12 = (s1*m2).template selfadjointView<Lower>().conjugate() * (s2*rhs1),
rhs13 = (s1*m1).conjugate() * (s2*rhs1));
VERIFY_IS_APPROX(rhs12 = (s1*m2).adjoint().template selfadjointView<Upper>() * (s2*rhs1),
rhs13 = (s1*m1).adjoint() * (s2*rhs1));
VERIFY_IS_APPROX(rhs12 = (s1*m2).template selfadjointView<Lower>().adjoint() * (s2*rhs1),
rhs13 = (s1*m1).adjoint() * (s2*rhs1));
m2 = m1.template triangularView<Upper>(); rhs12.setRandom(); rhs13 = rhs12;
m3 = m2.template selfadjointView<Upper>();
VERIFY_IS_EQUAL(m1, m3);
VERIFY_IS_APPROX(rhs12 += (s1*m2).template selfadjointView<Upper>() * (s2*rhs1),
rhs13 += (s1*m1) * (s2*rhs1));
m2 = m1.template triangularView<Lower>();
VERIFY_IS_APPROX(rhs12 = (s1*m2).template selfadjointView<Lower>() * (s2*rhs2.adjoint()),
rhs13 = (s1*m1) * (s2*rhs2.adjoint()));
m2 = m1.template triangularView<Upper>();
VERIFY_IS_APPROX(rhs12 = (s1*m2).template selfadjointView<Upper>() * (s2*rhs2.adjoint()),
rhs13 = (s1*m1) * (s2*rhs2.adjoint()));
m2 = m1.template triangularView<Upper>();
VERIFY_IS_APPROX(rhs12 = (s1*m2.adjoint()).template selfadjointView<Lower>() * (s2*rhs2.adjoint()),
rhs13 = (s1*m1.adjoint()) * (s2*rhs2.adjoint()));
// test row major = <...>
m2 = m1.template triangularView<Lower>(); rhs32.setRandom(); rhs13 = rhs32;
VERIFY_IS_APPROX(rhs32.noalias() -= (s1*m2).template selfadjointView<Lower>() * (s2*rhs3),
rhs13 -= (s1*m1) * (s2 * rhs3));
m2 = m1.template triangularView<Upper>();
VERIFY_IS_APPROX(rhs32.noalias() = (s1*m2.adjoint()).template selfadjointView<Lower>() * (s2*rhs3).conjugate(),
rhs13 = (s1*m1.adjoint()) * (s2*rhs3).conjugate());
m2 = m1.template triangularView<Upper>(); rhs13 = rhs12;
VERIFY_IS_APPROX(rhs12.noalias() += s1 * ((m2.adjoint()).template selfadjointView<Lower>() * (s2*rhs3).conjugate()),
rhs13 += (s1*m1.adjoint()) * (s2*rhs3).conjugate());
m2 = m1.template triangularView<Lower>();
VERIFY_IS_APPROX(rhs22 = (rhs2) * (m2).template selfadjointView<Lower>(), rhs23 = (rhs2) * (m1));
VERIFY_IS_APPROX(rhs22 = (s2*rhs2) * (s1*m2).template selfadjointView<Lower>(), rhs23 = (s2*rhs2) * (s1*m1));
// destination with a non-default inner-stride
// see bug 1741
{
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixX;
MatrixX buffer(2*cols,2*othersize);
Map<Rhs1,0,Stride<Dynamic,2> > map1(buffer.data(),cols,othersize,Stride<Dynamic,2>(2*rows,2));
buffer.setZero();
VERIFY_IS_APPROX( map1.noalias() = (s1*m2).template selfadjointView<Lower>() * (s2*rhs1),
rhs13 = (s1*m1) * (s2*rhs1));
Map<Rhs2,0,Stride<Dynamic,2> > map2(buffer.data(),rhs22.rows(),rhs22.cols(),Stride<Dynamic,2>(2*rhs22.outerStride(),2));
buffer.setZero();
VERIFY_IS_APPROX(map2 = (rhs2) * (m2).template selfadjointView<Lower>(), rhs23 = (rhs2) * (m1));
}
}
void test_product_symm()
{
for(int i = 0; i < g_repeat ; i++)
{
CALL_SUBTEST_1(( symm<float,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ));
CALL_SUBTEST_2(( symm<double,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE),internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ));
CALL_SUBTEST_3(( symm<std::complex<float>,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2),internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2)) ));
CALL_SUBTEST_4(( symm<std::complex<double>,Dynamic,Dynamic>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2),internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2)) ));
CALL_SUBTEST_5(( symm<float,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ));
CALL_SUBTEST_6(( symm<double,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ));
CALL_SUBTEST_7(( symm<std::complex<float>,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ));
CALL_SUBTEST_8(( symm<std::complex<double>,Dynamic,1>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE)) ));
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/array_of_string.cpp
|
.cpp
| 953
| 33
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
void test_array_of_string()
{
typedef Array<std::string,1,Dynamic> ArrayXs;
ArrayXs a1(3), a2(3), a3(3), a3ref(3);
a1 << "one", "two", "three";
a2 << "1", "2", "3";
a3ref << "one (1)", "two (2)", "three (3)";
std::stringstream s1;
s1 << a1;
VERIFY_IS_EQUAL(s1.str(), std::string(" one two three"));
a3 = a1 + std::string(" (") + a2 + std::string(")");
VERIFY((a3==a3ref).all());
a3 = a1;
a3 += std::string(" (") + a2 + std::string(")");
VERIFY((a3==a3ref).all());
a1.swap(a3);
VERIFY((a1==a3ref).all());
VERIFY((a3!=a3ref).all());
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/commainitializer.cpp
|
.cpp
| 2,923
| 107
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
template<int M1, int M2, int N1, int N2>
void test_blocks()
{
Matrix<int, M1+M2, N1+N2> m_fixed;
MatrixXi m_dynamic(M1+M2, N1+N2);
Matrix<int, M1, N1> mat11; mat11.setRandom();
Matrix<int, M1, N2> mat12; mat12.setRandom();
Matrix<int, M2, N1> mat21; mat21.setRandom();
Matrix<int, M2, N2> mat22; mat22.setRandom();
MatrixXi matx11 = mat11, matx12 = mat12, matx21 = mat21, matx22 = mat22;
{
VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat21, matx22).finished(), (m_dynamic << mat11, matx12, mat21, matx22).finished());
VERIFY_IS_EQUAL((m_fixed.template topLeftCorner<M1,N1>()), mat11);
VERIFY_IS_EQUAL((m_fixed.template topRightCorner<M1,N2>()), mat12);
VERIFY_IS_EQUAL((m_fixed.template bottomLeftCorner<M2,N1>()), mat21);
VERIFY_IS_EQUAL((m_fixed.template bottomRightCorner<M2,N2>()), mat22);
VERIFY_IS_EQUAL((m_fixed << mat12, mat11, matx21, mat22).finished(), (m_dynamic << mat12, matx11, matx21, mat22).finished());
}
if(N1 > 0)
{
VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat11, mat21, mat22));
VERIFY_RAISES_ASSERT((m_fixed << mat11, mat12, mat21, mat21, mat22));
}
else
{
// allow insertion of zero-column blocks:
VERIFY_IS_EQUAL((m_fixed << mat11, mat12, mat11, mat11, mat21, mat21, mat22).finished(), (m_dynamic << mat12, mat22).finished());
}
if(M1 != M2)
{
VERIFY_RAISES_ASSERT((m_fixed << mat11, mat21, mat12, mat22));
}
}
template<int N>
struct test_block_recursion
{
static void run()
{
test_blocks<(N>>6)&3, (N>>4)&3, (N>>2)&3, N & 3>();
test_block_recursion<N-1>::run();
}
};
template<>
struct test_block_recursion<-1>
{
static void run() { }
};
void test_commainitializer()
{
Matrix3d m3;
Matrix4d m4;
VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8) );
#ifndef _MSC_VER
VERIFY_RAISES_ASSERT( (m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) );
#endif
double data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
Matrix3d ref = Map<Matrix<double,3,3,RowMajor> >(data);
m3 = Matrix3d::Random();
m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9;
VERIFY_IS_APPROX(m3, ref );
Vector3d vec[3];
vec[0] << 1, 4, 7;
vec[1] << 2, 5, 8;
vec[2] << 3, 6, 9;
m3 = Matrix3d::Random();
m3 << vec[0], vec[1], vec[2];
VERIFY_IS_APPROX(m3, ref);
vec[0] << 1, 2, 3;
vec[1] << 4, 5, 6;
vec[2] << 7, 8, 9;
m3 = Matrix3d::Random();
m3 << vec[0].transpose(),
4, 5, 6,
vec[2].transpose();
VERIFY_IS_APPROX(m3, ref);
// recursively test all block-sizes from 0 to 3:
test_block_recursion<(1<<8) - 1>();
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/product_small.cpp
|
.cpp
| 11,857
| 294
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_NO_STATIC_ASSERT
#include "product.h"
#include <Eigen/LU>
// regression test for bug 447
template<int>
void product1x1()
{
Matrix<float,1,3> matAstatic;
Matrix<float,3,1> matBstatic;
matAstatic.setRandom();
matBstatic.setRandom();
VERIFY_IS_APPROX( (matAstatic * matBstatic).coeff(0,0),
matAstatic.cwiseProduct(matBstatic.transpose()).sum() );
MatrixXf matAdynamic(1,3);
MatrixXf matBdynamic(3,1);
matAdynamic.setRandom();
matBdynamic.setRandom();
VERIFY_IS_APPROX( (matAdynamic * matBdynamic).coeff(0,0),
matAdynamic.cwiseProduct(matBdynamic.transpose()).sum() );
}
template<typename TC, typename TA, typename TB>
const TC& ref_prod(TC &C, const TA &A, const TB &B)
{
for(Index i=0;i<C.rows();++i)
for(Index j=0;j<C.cols();++j)
for(Index k=0;k<A.cols();++k)
C.coeffRef(i,j) += A.coeff(i,k) * B.coeff(k,j);
return C;
}
template<typename T, int Rows, int Cols, int Depth, int OC, int OA, int OB>
typename internal::enable_if<! ( (Rows ==1&&Depth!=1&&OA==ColMajor)
|| (Depth==1&&Rows !=1&&OA==RowMajor)
|| (Cols ==1&&Depth!=1&&OB==RowMajor)
|| (Depth==1&&Cols !=1&&OB==ColMajor)
|| (Rows ==1&&Cols !=1&&OC==ColMajor)
|| (Cols ==1&&Rows !=1&&OC==RowMajor)),void>::type
test_lazy_single(int rows, int cols, int depth)
{
Matrix<T,Rows,Depth,OA> A(rows,depth); A.setRandom();
Matrix<T,Depth,Cols,OB> B(depth,cols); B.setRandom();
Matrix<T,Rows,Cols,OC> C(rows,cols); C.setRandom();
Matrix<T,Rows,Cols,OC> D(C);
VERIFY_IS_APPROX(C+=A.lazyProduct(B), ref_prod(D,A,B));
}
template<typename T, int Rows, int Cols, int Depth, int OC, int OA, int OB>
typename internal::enable_if< ( (Rows ==1&&Depth!=1&&OA==ColMajor)
|| (Depth==1&&Rows !=1&&OA==RowMajor)
|| (Cols ==1&&Depth!=1&&OB==RowMajor)
|| (Depth==1&&Cols !=1&&OB==ColMajor)
|| (Rows ==1&&Cols !=1&&OC==ColMajor)
|| (Cols ==1&&Rows !=1&&OC==RowMajor)),void>::type
test_lazy_single(int, int, int)
{
}
template<typename T, int Rows, int Cols, int Depth>
void test_lazy_all_layout(int rows=Rows, int cols=Cols, int depth=Depth)
{
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,ColMajor,ColMajor,ColMajor>(rows,cols,depth) ));
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,RowMajor,ColMajor,ColMajor>(rows,cols,depth) ));
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,ColMajor,RowMajor,ColMajor>(rows,cols,depth) ));
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,RowMajor,RowMajor,ColMajor>(rows,cols,depth) ));
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,ColMajor,ColMajor,RowMajor>(rows,cols,depth) ));
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,RowMajor,ColMajor,RowMajor>(rows,cols,depth) ));
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,ColMajor,RowMajor,RowMajor>(rows,cols,depth) ));
CALL_SUBTEST(( test_lazy_single<T,Rows,Cols,Depth,RowMajor,RowMajor,RowMajor>(rows,cols,depth) ));
}
template<typename T>
void test_lazy_l1()
{
int rows = internal::random<int>(1,12);
int cols = internal::random<int>(1,12);
int depth = internal::random<int>(1,12);
// Inner
CALL_SUBTEST(( test_lazy_all_layout<T,1,1,1>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,1,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,1,3>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,1,8>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,1,9>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,1,-1>(1,1,depth) ));
// Outer
CALL_SUBTEST(( test_lazy_all_layout<T,2,1,1>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,2,1>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,2,2,1>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,3,3,1>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,4,1>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,8,1>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,-1,1>(4,cols) ));
CALL_SUBTEST(( test_lazy_all_layout<T,7,-1,1>(7,cols) ));
CALL_SUBTEST(( test_lazy_all_layout<T,-1,8,1>(rows) ));
CALL_SUBTEST(( test_lazy_all_layout<T,-1,3,1>(rows) ));
CALL_SUBTEST(( test_lazy_all_layout<T,-1,-1,1>(rows,cols) ));
}
template<typename T>
void test_lazy_l2()
{
int rows = internal::random<int>(1,12);
int cols = internal::random<int>(1,12);
int depth = internal::random<int>(1,12);
// mat-vec
CALL_SUBTEST(( test_lazy_all_layout<T,2,1,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,2,1,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,1,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,1,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,5,1,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,1,5>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,1,6>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,6,1,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,8,1,8>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,-1,1,4>(rows) ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,1,-1>(4,1,depth) ));
CALL_SUBTEST(( test_lazy_all_layout<T,-1,1,-1>(rows,1,depth) ));
// vec-mat
CALL_SUBTEST(( test_lazy_all_layout<T,1,2,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,2,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,4,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,4,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,5,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,4,5>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,4,6>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,6,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,8,8>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,-1, 4>(1,cols) ));
CALL_SUBTEST(( test_lazy_all_layout<T,1, 4,-1>(1,4,depth) ));
CALL_SUBTEST(( test_lazy_all_layout<T,1,-1,-1>(1,cols,depth) ));
}
template<typename T>
void test_lazy_l3()
{
int rows = internal::random<int>(1,12);
int cols = internal::random<int>(1,12);
int depth = internal::random<int>(1,12);
// mat-mat
CALL_SUBTEST(( test_lazy_all_layout<T,2,4,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,2,6,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,3,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,8,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,5,6,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,2,5>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,7,6>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,6,8,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,8,3,8>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,-1,6,4>(rows) ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,3,-1>(4,3,depth) ));
CALL_SUBTEST(( test_lazy_all_layout<T,-1,6,-1>(rows,6,depth) ));
CALL_SUBTEST(( test_lazy_all_layout<T,8,2,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,5,2,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,4,2>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,8,4,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,6,5,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,4,5>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,3,4,6>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,2,6,4>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,7,8,8>() ));
CALL_SUBTEST(( test_lazy_all_layout<T,8,-1, 4>(8,cols) ));
CALL_SUBTEST(( test_lazy_all_layout<T,3, 4,-1>(3,4,depth) ));
CALL_SUBTEST(( test_lazy_all_layout<T,4,-1,-1>(4,cols,depth) ));
}
template<typename T,int N,int M,int K>
void test_linear_but_not_vectorizable()
{
// Check tricky cases for which the result of the product is a vector and thus must exhibit the LinearBit flag,
// but is not vectorizable along the linear dimension.
Index n = N==Dynamic ? internal::random<Index>(1,32) : N;
Index m = M==Dynamic ? internal::random<Index>(1,32) : M;
Index k = K==Dynamic ? internal::random<Index>(1,32) : K;
{
Matrix<T,N,M+1> A; A.setRandom(n,m+1);
Matrix<T,M*2,K> B; B.setRandom(m*2,k);
Matrix<T,1,K> C;
Matrix<T,1,K> R;
C.noalias() = A.template topLeftCorner<1,M>() * (B.template topRows<M>()+B.template bottomRows<M>());
R.noalias() = A.template topLeftCorner<1,M>() * (B.template topRows<M>()+B.template bottomRows<M>()).eval();
VERIFY_IS_APPROX(C,R);
}
{
Matrix<T,M+1,N,RowMajor> A; A.setRandom(m+1,n);
Matrix<T,K,M*2,RowMajor> B; B.setRandom(k,m*2);
Matrix<T,K,1> C;
Matrix<T,K,1> R;
C.noalias() = (B.template leftCols<M>()+B.template rightCols<M>()) * A.template topLeftCorner<M,1>();
R.noalias() = (B.template leftCols<M>()+B.template rightCols<M>()).eval() * A.template topLeftCorner<M,1>();
VERIFY_IS_APPROX(C,R);
}
}
template<int Rows>
void bug_1311()
{
Matrix< double, Rows, 2 > A; A.setRandom();
Vector2d b = Vector2d::Random() ;
Matrix<double,Rows,1> res;
res.noalias() = 1. * (A * b);
VERIFY_IS_APPROX(res, A*b);
res.noalias() = 1.*A * b;
VERIFY_IS_APPROX(res, A*b);
res.noalias() = (1.*A).lazyProduct(b);
VERIFY_IS_APPROX(res, A*b);
res.noalias() = (1.*A).lazyProduct(1.*b);
VERIFY_IS_APPROX(res, A*b);
res.noalias() = (A).lazyProduct(1.*b);
VERIFY_IS_APPROX(res, A*b);
}
void test_product_small()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( product(Matrix<float, 3, 2>()) );
CALL_SUBTEST_2( product(Matrix<int, 3, 17>()) );
CALL_SUBTEST_8( product(Matrix<double, 3, 17>()) );
CALL_SUBTEST_3( product(Matrix3d()) );
CALL_SUBTEST_4( product(Matrix4d()) );
CALL_SUBTEST_5( product(Matrix4f()) );
CALL_SUBTEST_6( product1x1<0>() );
CALL_SUBTEST_11( test_lazy_l1<float>() );
CALL_SUBTEST_12( test_lazy_l2<float>() );
CALL_SUBTEST_13( test_lazy_l3<float>() );
CALL_SUBTEST_21( test_lazy_l1<double>() );
CALL_SUBTEST_22( test_lazy_l2<double>() );
CALL_SUBTEST_23( test_lazy_l3<double>() );
CALL_SUBTEST_31( test_lazy_l1<std::complex<float> >() );
CALL_SUBTEST_32( test_lazy_l2<std::complex<float> >() );
CALL_SUBTEST_33( test_lazy_l3<std::complex<float> >() );
CALL_SUBTEST_41( test_lazy_l1<std::complex<double> >() );
CALL_SUBTEST_42( test_lazy_l2<std::complex<double> >() );
CALL_SUBTEST_43( test_lazy_l3<std::complex<double> >() );
CALL_SUBTEST_7(( test_linear_but_not_vectorizable<float,2,1,Dynamic>() ));
CALL_SUBTEST_7(( test_linear_but_not_vectorizable<float,3,1,Dynamic>() ));
CALL_SUBTEST_7(( test_linear_but_not_vectorizable<float,2,1,16>() ));
CALL_SUBTEST_6( bug_1311<3>() );
CALL_SUBTEST_6( bug_1311<5>() );
}
#ifdef EIGEN_TEST_PART_6
{
// test compilation of (outer_product) * vector
Vector3f v = Vector3f::Random();
VERIFY_IS_APPROX( (v * v.transpose()) * v, (v * v.transpose()).eval() * v);
}
{
// regression test for pull-request #93
Eigen::Matrix<double, 1, 1> A; A.setRandom();
Eigen::Matrix<double, 18, 1> B; B.setRandom();
Eigen::Matrix<double, 1, 18> C; C.setRandom();
VERIFY_IS_APPROX(B * A.inverse(), B * A.inverse()[0]);
VERIFY_IS_APPROX(A.inverse() * C, A.inverse()[0] * C);
}
{
Eigen::Matrix<double, 10, 10> A, B, C;
A.setRandom();
C = A;
for(int k=0; k<79; ++k)
C = C * A;
B.noalias() = (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)))
* (((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)) * ((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A))*((A*A)*(A*A)));
VERIFY_IS_APPROX(B,C);
}
#endif
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/ref.cpp
|
.cpp
| 11,718
| 295
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 20013 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This unit test cannot be easily written to work with EIGEN_DEFAULT_TO_ROW_MAJOR
#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
#undef EIGEN_DEFAULT_TO_ROW_MAJOR
#endif
#define TEST_ENABLE_TEMPORARY_TRACKING
#define TEST_CHECK_STATIC_ASSERTIONS
#include "main.h"
// test Ref.h
// Deal with i387 extended precision
#if EIGEN_ARCH_i386 && !(EIGEN_ARCH_x86_64)
#if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(4,4)
#pragma GCC optimize ("-ffloat-store")
#else
#undef VERIFY_IS_EQUAL
#define VERIFY_IS_EQUAL(X,Y) VERIFY_IS_APPROX(X,Y)
#endif
#endif
template<typename MatrixType> void ref_matrix(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Matrix<Scalar,Dynamic,Dynamic,MatrixType::Options> DynMatrixType;
typedef Matrix<RealScalar,Dynamic,Dynamic,MatrixType::Options> RealDynMatrixType;
typedef Ref<MatrixType> RefMat;
typedef Ref<DynMatrixType> RefDynMat;
typedef Ref<const DynMatrixType> ConstRefDynMat;
typedef Ref<RealDynMatrixType , 0, Stride<Dynamic,Dynamic> > RefRealMatWithStride;
Index rows = m.rows(), cols = m.cols();
MatrixType m1 = MatrixType::Random(rows, cols),
m2 = m1;
Index i = internal::random<Index>(0,rows-1);
Index j = internal::random<Index>(0,cols-1);
Index brows = internal::random<Index>(1,rows-i);
Index bcols = internal::random<Index>(1,cols-j);
RefMat rm0 = m1;
VERIFY_IS_EQUAL(rm0, m1);
RefDynMat rm1 = m1;
VERIFY_IS_EQUAL(rm1, m1);
RefDynMat rm2 = m1.block(i,j,brows,bcols);
VERIFY_IS_EQUAL(rm2, m1.block(i,j,brows,bcols));
rm2.setOnes();
m2.block(i,j,brows,bcols).setOnes();
VERIFY_IS_EQUAL(m1, m2);
m2.block(i,j,brows,bcols).setRandom();
rm2 = m2.block(i,j,brows,bcols);
VERIFY_IS_EQUAL(m1, m2);
ConstRefDynMat rm3 = m1.block(i,j,brows,bcols);
m1.block(i,j,brows,bcols) *= 2;
m2.block(i,j,brows,bcols) *= 2;
VERIFY_IS_EQUAL(rm3, m2.block(i,j,brows,bcols));
RefRealMatWithStride rm4 = m1.real();
VERIFY_IS_EQUAL(rm4, m2.real());
rm4.array() += 1;
m2.real().array() += 1;
VERIFY_IS_EQUAL(m1, m2);
}
template<typename VectorType> void ref_vector(const VectorType& m)
{
typedef typename VectorType::Scalar Scalar;
typedef typename VectorType::RealScalar RealScalar;
typedef Matrix<Scalar,Dynamic,1,VectorType::Options> DynMatrixType;
typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> MatrixType;
typedef Matrix<RealScalar,Dynamic,1,VectorType::Options> RealDynMatrixType;
typedef Ref<VectorType> RefMat;
typedef Ref<DynMatrixType> RefDynMat;
typedef Ref<const DynMatrixType> ConstRefDynMat;
typedef Ref<RealDynMatrixType , 0, InnerStride<> > RefRealMatWithStride;
typedef Ref<DynMatrixType , 0, InnerStride<> > RefMatWithStride;
Index size = m.size();
VectorType v1 = VectorType::Random(size),
v2 = v1;
MatrixType mat1 = MatrixType::Random(size,size),
mat2 = mat1,
mat3 = MatrixType::Random(size,size);
Index i = internal::random<Index>(0,size-1);
Index bsize = internal::random<Index>(1,size-i);
{ RefMat rm0 = v1; VERIFY_IS_EQUAL(rm0, v1); }
{ RefMat rm0 = v1.block(0,0,size,1); VERIFY_IS_EQUAL(rm0, v1); }
{ RefDynMat rv1 = v1; VERIFY_IS_EQUAL(rv1, v1); }
{ RefDynMat rv1 = v1.block(0,0,size,1); VERIFY_IS_EQUAL(rv1, v1); }
{ VERIFY_RAISES_ASSERT( RefMat rm0 = v1.block(0, 0, size, 0); EIGEN_UNUSED_VARIABLE(rm0); ); }
if(VectorType::SizeAtCompileTime!=1)
{ VERIFY_RAISES_ASSERT( RefDynMat rv1 = v1.block(0, 0, size, 0); EIGEN_UNUSED_VARIABLE(rv1); ); }
RefDynMat rv2 = v1.segment(i,bsize);
VERIFY_IS_EQUAL(rv2, v1.segment(i,bsize));
rv2.setOnes();
v2.segment(i,bsize).setOnes();
VERIFY_IS_EQUAL(v1, v2);
v2.segment(i,bsize).setRandom();
rv2 = v2.segment(i,bsize);
VERIFY_IS_EQUAL(v1, v2);
ConstRefDynMat rm3 = v1.segment(i,bsize);
v1.segment(i,bsize) *= 2;
v2.segment(i,bsize) *= 2;
VERIFY_IS_EQUAL(rm3, v2.segment(i,bsize));
RefRealMatWithStride rm4 = v1.real();
VERIFY_IS_EQUAL(rm4, v2.real());
rm4.array() += 1;
v2.real().array() += 1;
VERIFY_IS_EQUAL(v1, v2);
RefMatWithStride rm5 = mat1.row(i).transpose();
VERIFY_IS_EQUAL(rm5, mat1.row(i).transpose());
rm5.array() += 1;
mat2.row(i).array() += 1;
VERIFY_IS_EQUAL(mat1, mat2);
rm5.noalias() = rm4.transpose() * mat3;
mat2.row(i) = v2.real().transpose() * mat3;
VERIFY_IS_APPROX(mat1, mat2);
}
template<typename PlainObjectType> void check_const_correctness(const PlainObjectType&)
{
// verify that ref-to-const don't have LvalueBit
typedef typename internal::add_const<PlainObjectType>::type ConstPlainObjectType;
VERIFY( !(internal::traits<Ref<ConstPlainObjectType> >::Flags & LvalueBit) );
VERIFY( !(internal::traits<Ref<ConstPlainObjectType, Aligned> >::Flags & LvalueBit) );
VERIFY( !(Ref<ConstPlainObjectType>::Flags & LvalueBit) );
VERIFY( !(Ref<ConstPlainObjectType, Aligned>::Flags & LvalueBit) );
}
template<typename B>
EIGEN_DONT_INLINE void call_ref_1(Ref<VectorXf> a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_2(const Ref<const VectorXf>& a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_3(Ref<VectorXf,0,InnerStride<> > a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_4(const Ref<const VectorXf,0,InnerStride<> >& a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_5(Ref<MatrixXf,0,OuterStride<> > a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_6(const Ref<const MatrixXf,0,OuterStride<> >& a, const B &b) { VERIFY_IS_EQUAL(a,b); }
template<typename B>
EIGEN_DONT_INLINE void call_ref_7(Ref<Matrix<float,Dynamic,3> > a, const B &b) { VERIFY_IS_EQUAL(a,b); }
void call_ref()
{
VectorXcf ca = VectorXcf::Random(10);
VectorXf a = VectorXf::Random(10);
RowVectorXf b = RowVectorXf::Random(10);
MatrixXf A = MatrixXf::Random(10,10);
RowVector3f c = RowVector3f::Random();
const VectorXf& ac(a);
VectorBlock<VectorXf> ab(a,0,3);
const VectorBlock<VectorXf> abc(a,0,3);
VERIFY_EVALUATION_COUNT( call_ref_1(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(b,b.transpose()), 0);
// call_ref_1(ac,a<c); // does not compile because ac is const
VERIFY_EVALUATION_COUNT( call_ref_1(ab,ab), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4),a.head(4)), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(abc,abc), 0);
VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3),A.col(3)), 0);
// call_ref_1(A.row(3),A.row(3)); // does not compile because innerstride!=1
VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3),A.row(3).transpose()), 0);
VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3),A.row(3).transpose()), 0);
// call_ref_1(a+a, a+a); // does not compile for obvious reason
MatrixXf tmp = A*A.col(1);
VERIFY_EVALUATION_COUNT( call_ref_2(A*A.col(1), tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_2(ac.head(5),ac.head(5)), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(ac,ac), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(ab,ab), 0);
VERIFY_EVALUATION_COUNT( call_ref_2(a.head(4),a.head(4)), 0);
tmp = a+a;
VERIFY_EVALUATION_COUNT( call_ref_2(a+a,tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_2(ca.imag(),ca.imag()), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_4(ac.head(5),ac.head(5)), 0);
tmp = a+a;
VERIFY_EVALUATION_COUNT( call_ref_4(a+a,tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_4(ca.imag(),ca.imag()), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(a.head(3),a.head(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(A,A), 0);
// call_ref_5(A.transpose(),A.transpose()); // does not compile because storage order does not match
VERIFY_EVALUATION_COUNT( call_ref_5(A.block(1,1,2,2),A.block(1,1,2,2)), 0);
VERIFY_EVALUATION_COUNT( call_ref_5(b,b), 0); // storage order do not match, but this is a degenerate case that should work
VERIFY_EVALUATION_COUNT( call_ref_5(a.row(3),a.row(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(a,a), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(a.head(3),a.head(3)), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(A.row(3),A.row(3)), 1); // evaluated into a temp thouth it could be avoided by viewing it as a 1xn matrix
tmp = A+A;
VERIFY_EVALUATION_COUNT( call_ref_6(A+A,tmp), 1); // evaluated into a temp
VERIFY_EVALUATION_COUNT( call_ref_6(A,A), 0);
VERIFY_EVALUATION_COUNT( call_ref_6(A.transpose(),A.transpose()), 1); // evaluated into a temp because the storage orders do not match
VERIFY_EVALUATION_COUNT( call_ref_6(A.block(1,1,2,2),A.block(1,1,2,2)), 0);
VERIFY_EVALUATION_COUNT( call_ref_7(c,c), 0);
}
typedef Matrix<double,Dynamic,Dynamic,RowMajor> RowMatrixXd;
int test_ref_overload_fun1(Ref<MatrixXd> ) { return 1; }
int test_ref_overload_fun1(Ref<RowMatrixXd> ) { return 2; }
int test_ref_overload_fun1(Ref<MatrixXf> ) { return 3; }
int test_ref_overload_fun2(Ref<const MatrixXd> ) { return 4; }
int test_ref_overload_fun2(Ref<const MatrixXf> ) { return 5; }
void test_ref_ambiguous(const Ref<const ArrayXd> &A, Ref<ArrayXd> B)
{
B = A;
B = A - A;
}
// See also bug 969
void test_ref_overloads()
{
MatrixXd Ad, Bd;
RowMatrixXd rAd, rBd;
VERIFY( test_ref_overload_fun1(Ad)==1 );
VERIFY( test_ref_overload_fun1(rAd)==2 );
MatrixXf Af, Bf;
VERIFY( test_ref_overload_fun2(Ad)==4 );
VERIFY( test_ref_overload_fun2(Ad+Bd)==4 );
VERIFY( test_ref_overload_fun2(Af+Bf)==5 );
ArrayXd A, B;
test_ref_ambiguous(A, B);
}
void test_ref_fixed_size_assert()
{
Vector4f v4;
VectorXf vx(10);
VERIFY_RAISES_STATIC_ASSERT( Ref<Vector3f> y = v4; (void)y; );
VERIFY_RAISES_STATIC_ASSERT( Ref<Vector3f> y = vx.head<4>(); (void)y; );
VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = v4; (void)y; );
VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = vx.head<4>(); (void)y; );
VERIFY_RAISES_STATIC_ASSERT( Ref<const Vector3f> y = 2*v4; (void)y; );
}
void test_ref()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( ref_vector(Matrix<float, 1, 1>()) );
CALL_SUBTEST_1( check_const_correctness(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( ref_vector(Vector4d()) );
CALL_SUBTEST_2( check_const_correctness(Matrix4d()) );
CALL_SUBTEST_3( ref_vector(Vector4cf()) );
CALL_SUBTEST_4( ref_vector(VectorXcf(8)) );
CALL_SUBTEST_5( ref_vector(VectorXi(12)) );
CALL_SUBTEST_5( check_const_correctness(VectorXi(12)) );
CALL_SUBTEST_1( ref_matrix(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( ref_matrix(Matrix4d()) );
CALL_SUBTEST_1( ref_matrix(Matrix<float,3,5>()) );
CALL_SUBTEST_4( ref_matrix(MatrixXcf(internal::random<int>(1,10),internal::random<int>(1,10))) );
CALL_SUBTEST_4( ref_matrix(Matrix<std::complex<double>,10,15>()) );
CALL_SUBTEST_5( ref_matrix(MatrixXi(internal::random<int>(1,10),internal::random<int>(1,10))) );
CALL_SUBTEST_6( call_ref() );
}
CALL_SUBTEST_7( test_ref_overloads() );
CALL_SUBTEST_7( test_ref_fixed_size_assert() );
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/eigensolver_generic.cpp
|
.cpp
| 6,172
| 166
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <limits>
#include <Eigen/Eigenvalues>
template<typename MatrixType> void eigensolver(const MatrixType& m)
{
/* this test covers the following files:
EigenSolver.h
*/
Index rows = m.rows();
Index cols = m.cols();
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealVectorType;
typedef typename std::complex<typename NumTraits<typename MatrixType::Scalar>::Real> Complex;
MatrixType a = MatrixType::Random(rows,cols);
MatrixType a1 = MatrixType::Random(rows,cols);
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
EigenSolver<MatrixType> ei0(symmA);
VERIFY_IS_EQUAL(ei0.info(), Success);
VERIFY_IS_APPROX(symmA * ei0.pseudoEigenvectors(), ei0.pseudoEigenvectors() * ei0.pseudoEigenvalueMatrix());
VERIFY_IS_APPROX((symmA.template cast<Complex>()) * (ei0.pseudoEigenvectors().template cast<Complex>()),
(ei0.pseudoEigenvectors().template cast<Complex>()) * (ei0.eigenvalues().asDiagonal()));
EigenSolver<MatrixType> ei1(a);
VERIFY_IS_EQUAL(ei1.info(), Success);
VERIFY_IS_APPROX(a * ei1.pseudoEigenvectors(), ei1.pseudoEigenvectors() * ei1.pseudoEigenvalueMatrix());
VERIFY_IS_APPROX(a.template cast<Complex>() * ei1.eigenvectors(),
ei1.eigenvectors() * ei1.eigenvalues().asDiagonal());
VERIFY_IS_APPROX(ei1.eigenvectors().colwise().norm(), RealVectorType::Ones(rows).transpose());
VERIFY_IS_APPROX(a.eigenvalues(), ei1.eigenvalues());
EigenSolver<MatrixType> ei2;
ei2.setMaxIterations(RealSchur<MatrixType>::m_maxIterationsPerRow * rows).compute(a);
VERIFY_IS_EQUAL(ei2.info(), Success);
VERIFY_IS_EQUAL(ei2.eigenvectors(), ei1.eigenvectors());
VERIFY_IS_EQUAL(ei2.eigenvalues(), ei1.eigenvalues());
if (rows > 2) {
ei2.setMaxIterations(1).compute(a);
VERIFY_IS_EQUAL(ei2.info(), NoConvergence);
VERIFY_IS_EQUAL(ei2.getMaxIterations(), 1);
}
EigenSolver<MatrixType> eiNoEivecs(a, false);
VERIFY_IS_EQUAL(eiNoEivecs.info(), Success);
VERIFY_IS_APPROX(ei1.eigenvalues(), eiNoEivecs.eigenvalues());
VERIFY_IS_APPROX(ei1.pseudoEigenvalueMatrix(), eiNoEivecs.pseudoEigenvalueMatrix());
MatrixType id = MatrixType::Identity(rows, cols);
VERIFY_IS_APPROX(id.operatorNorm(), RealScalar(1));
if (rows > 2 && rows < 20)
{
// Test matrix with NaN
a(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
EigenSolver<MatrixType> eiNaN(a);
VERIFY_IS_NOT_EQUAL(eiNaN.info(), Success);
}
// regression test for bug 1098
{
EigenSolver<MatrixType> eig(a.adjoint() * a);
eig.compute(a.adjoint() * a);
}
// regression test for bug 478
{
a.setZero();
EigenSolver<MatrixType> ei3(a);
VERIFY_IS_EQUAL(ei3.info(), Success);
VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1));
VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity());
}
}
template<typename MatrixType> void eigensolver_verify_assert(const MatrixType& m)
{
EigenSolver<MatrixType> eig;
VERIFY_RAISES_ASSERT(eig.eigenvectors());
VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
VERIFY_RAISES_ASSERT(eig.pseudoEigenvalueMatrix());
VERIFY_RAISES_ASSERT(eig.eigenvalues());
MatrixType a = MatrixType::Random(m.rows(),m.cols());
eig.compute(a, false);
VERIFY_RAISES_ASSERT(eig.eigenvectors());
VERIFY_RAISES_ASSERT(eig.pseudoEigenvectors());
}
void test_eigensolver_generic()
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( eigensolver(Matrix4f()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_2( eigensolver(MatrixXd(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
// some trivial but implementation-wise tricky cases
CALL_SUBTEST_2( eigensolver(MatrixXd(1,1)) );
CALL_SUBTEST_2( eigensolver(MatrixXd(2,2)) );
CALL_SUBTEST_3( eigensolver(Matrix<double,1,1>()) );
CALL_SUBTEST_4( eigensolver(Matrix2d()) );
}
CALL_SUBTEST_1( eigensolver_verify_assert(Matrix4f()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_2( eigensolver_verify_assert(MatrixXd(s,s)) );
CALL_SUBTEST_3( eigensolver_verify_assert(Matrix<double,1,1>()) );
CALL_SUBTEST_4( eigensolver_verify_assert(Matrix2d()) );
// Test problem size constructors
CALL_SUBTEST_5(EigenSolver<MatrixXf> tmp(s));
// regression test for bug 410
CALL_SUBTEST_2(
{
MatrixXd A(1,1);
A(0,0) = std::sqrt(-1.); // is Not-a-Number
Eigen::EigenSolver<MatrixXd> solver(A);
VERIFY_IS_EQUAL(solver.info(), NumericalIssue);
}
);
#ifdef EIGEN_TEST_PART_2
{
// regression test for bug 793
MatrixXd a(3,3);
a << 0, 0, 1,
1, 1, 1,
1, 1e+200, 1;
Eigen::EigenSolver<MatrixXd> eig(a);
double scale = 1e-200; // scale to avoid overflow during the comparisons
VERIFY_IS_APPROX(a * eig.pseudoEigenvectors()*scale, eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()*scale);
VERIFY_IS_APPROX(a * eig.eigenvectors()*scale, eig.eigenvectors() * eig.eigenvalues().asDiagonal()*scale);
}
{
// check a case where all eigenvalues are null.
MatrixXd a(2,2);
a << 1, 1,
-1, -1;
Eigen::EigenSolver<MatrixXd> eig(a);
VERIFY_IS_APPROX(eig.pseudoEigenvectors().squaredNorm(), 2.);
VERIFY_IS_APPROX((a * eig.pseudoEigenvectors()).norm()+1., 1.);
VERIFY_IS_APPROX((eig.pseudoEigenvectors() * eig.pseudoEigenvalueMatrix()).norm()+1., 1.);
VERIFY_IS_APPROX((a * eig.eigenvectors()).norm()+1., 1.);
VERIFY_IS_APPROX((eig.eigenvectors() * eig.eigenvalues().asDiagonal()).norm()+1., 1.);
}
#endif
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/mapped_matrix.cpp
|
.cpp
| 7,940
| 209
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_NO_STATIC_ASSERT
#define EIGEN_NO_STATIC_ASSERT // turn static asserts into runtime asserts in order to check them
#endif
#include "main.h"
#define EIGEN_TESTMAP_MAX_SIZE 256
template<typename VectorType> void map_class_vector(const VectorType& m)
{
typedef typename VectorType::Scalar Scalar;
Index size = m.size();
Scalar* array1 = internal::aligned_new<Scalar>(size);
Scalar* array2 = internal::aligned_new<Scalar>(size);
Scalar* array3 = new Scalar[size+1];
Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3;
Scalar array4[EIGEN_TESTMAP_MAX_SIZE];
Map<VectorType, AlignedMax>(array1, size) = VectorType::Random(size);
Map<VectorType, AlignedMax>(array2, size) = Map<VectorType,AlignedMax>(array1, size);
Map<VectorType>(array3unaligned, size) = Map<VectorType>(array1, size);
Map<VectorType>(array4, size) = Map<VectorType,AlignedMax>(array1, size);
VectorType ma1 = Map<VectorType, AlignedMax>(array1, size);
VectorType ma2 = Map<VectorType, AlignedMax>(array2, size);
VectorType ma3 = Map<VectorType>(array3unaligned, size);
VectorType ma4 = Map<VectorType>(array4, size);
VERIFY_IS_EQUAL(ma1, ma2);
VERIFY_IS_EQUAL(ma1, ma3);
VERIFY_IS_EQUAL(ma1, ma4);
#ifdef EIGEN_VECTORIZE
if(internal::packet_traits<Scalar>::Vectorizable && size>=AlignedMax)
VERIFY_RAISES_ASSERT((Map<VectorType,AlignedMax>(array3unaligned, size)))
#endif
internal::aligned_delete(array1, size);
internal::aligned_delete(array2, size);
delete[] array3;
}
template<typename MatrixType> void map_class_matrix(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
Index rows = m.rows(), cols = m.cols(), size = rows*cols;
Scalar s1 = internal::random<Scalar>();
// array1 and array2 -> aligned heap allocation
Scalar* array1 = internal::aligned_new<Scalar>(size);
for(int i = 0; i < size; i++) array1[i] = Scalar(1);
Scalar* array2 = internal::aligned_new<Scalar>(size);
for(int i = 0; i < size; i++) array2[i] = Scalar(1);
// array3unaligned -> unaligned pointer to heap
Scalar* array3 = new Scalar[size+1];
Index sizep1 = size + 1; // <- without this temporary MSVC 2103 generates bad code
for(Index i = 0; i < sizep1; i++) array3[i] = Scalar(1);
Scalar* array3unaligned = (internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES) == 0 ? array3+1 : array3;
Scalar array4[256];
if(size<=256)
for(int i = 0; i < size; i++) array4[i] = Scalar(1);
Map<MatrixType> map1(array1, rows, cols);
Map<MatrixType, AlignedMax> map2(array2, rows, cols);
Map<MatrixType> map3(array3unaligned, rows, cols);
Map<MatrixType> map4(array4, rows, cols);
VERIFY_IS_EQUAL(map1, MatrixType::Ones(rows,cols));
VERIFY_IS_EQUAL(map2, MatrixType::Ones(rows,cols));
VERIFY_IS_EQUAL(map3, MatrixType::Ones(rows,cols));
map1 = MatrixType::Random(rows,cols);
map2 = map1;
map3 = map1;
MatrixType ma1 = map1;
MatrixType ma2 = map2;
MatrixType ma3 = map3;
VERIFY_IS_EQUAL(map1, map2);
VERIFY_IS_EQUAL(map1, map3);
VERIFY_IS_EQUAL(ma1, ma2);
VERIFY_IS_EQUAL(ma1, ma3);
VERIFY_IS_EQUAL(ma1, map3);
VERIFY_IS_APPROX(s1*map1, s1*map2);
VERIFY_IS_APPROX(s1*ma1, s1*ma2);
VERIFY_IS_EQUAL(s1*ma1, s1*ma3);
VERIFY_IS_APPROX(s1*map1, s1*map3);
map2 *= s1;
map3 *= s1;
VERIFY_IS_APPROX(s1*map1, map2);
VERIFY_IS_APPROX(s1*map1, map3);
if(size<=256)
{
VERIFY_IS_EQUAL(map4, MatrixType::Ones(rows,cols));
map4 = map1;
MatrixType ma4 = map4;
VERIFY_IS_EQUAL(map1, map4);
VERIFY_IS_EQUAL(ma1, map4);
VERIFY_IS_EQUAL(ma1, ma4);
VERIFY_IS_APPROX(s1*map1, s1*map4);
map4 *= s1;
VERIFY_IS_APPROX(s1*map1, map4);
}
internal::aligned_delete(array1, size);
internal::aligned_delete(array2, size);
delete[] array3;
}
template<typename VectorType> void map_static_methods(const VectorType& m)
{
typedef typename VectorType::Scalar Scalar;
Index size = m.size();
Scalar* array1 = internal::aligned_new<Scalar>(size);
Scalar* array2 = internal::aligned_new<Scalar>(size);
Scalar* array3 = new Scalar[size+1];
Scalar* array3unaligned = internal::UIntPtr(array3)%EIGEN_MAX_ALIGN_BYTES == 0 ? array3+1 : array3;
VectorType::MapAligned(array1, size) = VectorType::Random(size);
VectorType::Map(array2, size) = VectorType::Map(array1, size);
VectorType::Map(array3unaligned, size) = VectorType::Map(array1, size);
VectorType ma1 = VectorType::Map(array1, size);
VectorType ma2 = VectorType::MapAligned(array2, size);
VectorType ma3 = VectorType::Map(array3unaligned, size);
VERIFY_IS_EQUAL(ma1, ma2);
VERIFY_IS_EQUAL(ma1, ma3);
internal::aligned_delete(array1, size);
internal::aligned_delete(array2, size);
delete[] array3;
}
template<typename PlainObjectType> void check_const_correctness(const PlainObjectType&)
{
// there's a lot that we can't test here while still having this test compile!
// the only possible approach would be to run a script trying to compile stuff and checking that it fails.
// CMake can help with that.
// verify that map-to-const don't have LvalueBit
typedef typename internal::add_const<PlainObjectType>::type ConstPlainObjectType;
VERIFY( !(internal::traits<Map<ConstPlainObjectType> >::Flags & LvalueBit) );
VERIFY( !(internal::traits<Map<ConstPlainObjectType, AlignedMax> >::Flags & LvalueBit) );
VERIFY( !(Map<ConstPlainObjectType>::Flags & LvalueBit) );
VERIFY( !(Map<ConstPlainObjectType, AlignedMax>::Flags & LvalueBit) );
}
template<typename Scalar>
void map_not_aligned_on_scalar()
{
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
Index size = 11;
Scalar* array1 = internal::aligned_new<Scalar>((size+1)*(size+1)+1);
Scalar* array2 = reinterpret_cast<Scalar*>(sizeof(Scalar)/2+std::size_t(array1));
Map<MatrixType,0,OuterStride<> > map2(array2, size, size, OuterStride<>(size+1));
MatrixType m2 = MatrixType::Random(size,size);
map2 = m2;
VERIFY_IS_EQUAL(m2, map2);
typedef Matrix<Scalar,Dynamic,1> VectorType;
Map<VectorType> map3(array2, size);
MatrixType v3 = VectorType::Random(size);
map3 = v3;
VERIFY_IS_EQUAL(v3, map3);
internal::aligned_delete(array1, (size+1)*(size+1)+1);
}
void test_mapped_matrix()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( map_class_vector(Matrix<float, 1, 1>()) );
CALL_SUBTEST_1( check_const_correctness(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( map_class_vector(Vector4d()) );
CALL_SUBTEST_2( map_class_vector(VectorXd(13)) );
CALL_SUBTEST_2( check_const_correctness(Matrix4d()) );
CALL_SUBTEST_3( map_class_vector(RowVector4f()) );
CALL_SUBTEST_4( map_class_vector(VectorXcf(8)) );
CALL_SUBTEST_5( map_class_vector(VectorXi(12)) );
CALL_SUBTEST_5( check_const_correctness(VectorXi(12)) );
CALL_SUBTEST_1( map_class_matrix(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( map_class_matrix(Matrix4d()) );
CALL_SUBTEST_11( map_class_matrix(Matrix<float,3,5>()) );
CALL_SUBTEST_4( map_class_matrix(MatrixXcf(internal::random<int>(1,10),internal::random<int>(1,10))) );
CALL_SUBTEST_5( map_class_matrix(MatrixXi(internal::random<int>(1,10),internal::random<int>(1,10))) );
CALL_SUBTEST_6( map_static_methods(Matrix<double, 1, 1>()) );
CALL_SUBTEST_7( map_static_methods(Vector3f()) );
CALL_SUBTEST_8( map_static_methods(RowVector3d()) );
CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) );
CALL_SUBTEST_10( map_static_methods(VectorXf(12)) );
CALL_SUBTEST_11( map_not_aligned_on_scalar<double>() );
}
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/incomplete_cholesky.cpp
|
.cpp
| 2,572
| 66
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015-2016 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// #define EIGEN_DONT_VECTORIZE
// #define EIGEN_MAX_ALIGN_BYTES 0
#include "sparse_solver.h"
#include <Eigen/IterativeLinearSolvers>
#include <unsupported/Eigen/IterativeSolvers>
template<typename T, typename I> void test_incomplete_cholesky_T()
{
typedef SparseMatrix<T,0,I> SparseMatrixType;
ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, AMDOrdering<I> > > cg_illt_lower_amd;
ConjugateGradient<SparseMatrixType, Lower, IncompleteCholesky<T, Lower, NaturalOrdering<I> > > cg_illt_lower_nat;
ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, AMDOrdering<I> > > cg_illt_upper_amd;
ConjugateGradient<SparseMatrixType, Upper, IncompleteCholesky<T, Upper, NaturalOrdering<I> > > cg_illt_upper_nat;
ConjugateGradient<SparseMatrixType, Upper|Lower, IncompleteCholesky<T, Lower, AMDOrdering<I> > > cg_illt_uplo_amd;
CALL_SUBTEST( check_sparse_spd_solving(cg_illt_lower_amd) );
CALL_SUBTEST( check_sparse_spd_solving(cg_illt_lower_nat) );
CALL_SUBTEST( check_sparse_spd_solving(cg_illt_upper_amd) );
CALL_SUBTEST( check_sparse_spd_solving(cg_illt_upper_nat) );
CALL_SUBTEST( check_sparse_spd_solving(cg_illt_uplo_amd) );
}
void test_incomplete_cholesky()
{
CALL_SUBTEST_1(( test_incomplete_cholesky_T<double,int>() ));
CALL_SUBTEST_2(( test_incomplete_cholesky_T<std::complex<double>, int>() ));
CALL_SUBTEST_3(( test_incomplete_cholesky_T<double,long int>() ));
#ifdef EIGEN_TEST_PART_1
// regression for bug 1150
for(int N = 1; N<20; ++N)
{
Eigen::MatrixXd b( N, N );
b.setOnes();
Eigen::SparseMatrix<double> m( N, N );
m.reserve(Eigen::VectorXi::Constant(N,4));
for( int i = 0; i < N; ++i )
{
m.insert( i, i ) = 1;
m.coeffRef( i, i / 2 ) = 2;
m.coeffRef( i, i / 3 ) = 2;
m.coeffRef( i, i / 4 ) = 2;
}
Eigen::SparseMatrix<double> A;
A = m * m.transpose();
Eigen::ConjugateGradient<Eigen::SparseMatrix<double>,
Eigen::Lower | Eigen::Upper,
Eigen::IncompleteCholesky<double> > solver( A );
VERIFY(solver.preconditioner().info() == Eigen::Success);
VERIFY(solver.info() == Eigen::Success);
}
#endif
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/eigensolver_selfadjoint.cpp
|
.cpp
| 11,023
| 274
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include "svd_fill.h"
#include <limits>
#include <Eigen/Eigenvalues>
#include <Eigen/SparseCore>
template<typename MatrixType> void selfadjointeigensolver_essential_check(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
RealScalar eival_eps = numext::mini<RealScalar>(test_precision<RealScalar>(), NumTraits<Scalar>::dummy_precision()*20000);
SelfAdjointEigenSolver<MatrixType> eiSymm(m);
VERIFY_IS_EQUAL(eiSymm.info(), Success);
RealScalar scaling = m.cwiseAbs().maxCoeff();
if(scaling<(std::numeric_limits<RealScalar>::min)())
{
VERIFY(eiSymm.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits<RealScalar>::min)());
}
else
{
VERIFY_IS_APPROX((m.template selfadjointView<Lower>() * eiSymm.eigenvectors())/scaling,
(eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal())/scaling);
}
VERIFY_IS_APPROX(m.template selfadjointView<Lower>().eigenvalues(), eiSymm.eigenvalues());
VERIFY_IS_UNITARY(eiSymm.eigenvectors());
if(m.cols()<=4)
{
SelfAdjointEigenSolver<MatrixType> eiDirect;
eiDirect.computeDirect(m);
VERIFY_IS_EQUAL(eiDirect.info(), Success);
if(! eiSymm.eigenvalues().isApprox(eiDirect.eigenvalues(), eival_eps) )
{
std::cerr << "reference eigenvalues: " << eiSymm.eigenvalues().transpose() << "\n"
<< "obtained eigenvalues: " << eiDirect.eigenvalues().transpose() << "\n"
<< "diff: " << (eiSymm.eigenvalues()-eiDirect.eigenvalues()).transpose() << "\n"
<< "error (eps): " << (eiSymm.eigenvalues()-eiDirect.eigenvalues()).norm() / eiSymm.eigenvalues().norm() << " (" << eival_eps << ")\n";
}
if(scaling<(std::numeric_limits<RealScalar>::min)())
{
VERIFY(eiDirect.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits<RealScalar>::min)());
}
else
{
VERIFY_IS_APPROX(eiSymm.eigenvalues()/scaling, eiDirect.eigenvalues()/scaling);
VERIFY_IS_APPROX((m.template selfadjointView<Lower>() * eiDirect.eigenvectors())/scaling,
(eiDirect.eigenvectors() * eiDirect.eigenvalues().asDiagonal())/scaling);
VERIFY_IS_APPROX(m.template selfadjointView<Lower>().eigenvalues()/scaling, eiDirect.eigenvalues()/scaling);
}
VERIFY_IS_UNITARY(eiDirect.eigenvectors());
}
}
template<typename MatrixType> void selfadjointeigensolver(const MatrixType& m)
{
/* this test covers the following files:
EigenSolver.h, SelfAdjointEigenSolver.h (and indirectly: Tridiagonalization.h)
*/
Index rows = m.rows();
Index cols = m.cols();
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
RealScalar largerEps = 10*test_precision<RealScalar>();
MatrixType a = MatrixType::Random(rows,cols);
MatrixType a1 = MatrixType::Random(rows,cols);
MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1;
MatrixType symmC = symmA;
svd_fill_random(symmA,Symmetric);
symmA.template triangularView<StrictlyUpper>().setZero();
symmC.template triangularView<StrictlyUpper>().setZero();
MatrixType b = MatrixType::Random(rows,cols);
MatrixType b1 = MatrixType::Random(rows,cols);
MatrixType symmB = b.adjoint() * b + b1.adjoint() * b1;
symmB.template triangularView<StrictlyUpper>().setZero();
CALL_SUBTEST( selfadjointeigensolver_essential_check(symmA) );
SelfAdjointEigenSolver<MatrixType> eiSymm(symmA);
// generalized eigen pb
GeneralizedSelfAdjointEigenSolver<MatrixType> eiSymmGen(symmC, symmB);
SelfAdjointEigenSolver<MatrixType> eiSymmNoEivecs(symmA, false);
VERIFY_IS_EQUAL(eiSymmNoEivecs.info(), Success);
VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues());
// generalized eigen problem Ax = lBx
eiSymmGen.compute(symmC, symmB,Ax_lBx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
VERIFY((symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors()).isApprox(
symmB.template selfadjointView<Lower>() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem BAx = lx
eiSymmGen.compute(symmC, symmB,BAx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
VERIFY((symmB.template selfadjointView<Lower>() * (symmC.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
// generalized eigen problem ABx = lx
eiSymmGen.compute(symmC, symmB,ABx_lx);
VERIFY_IS_EQUAL(eiSymmGen.info(), Success);
VERIFY((symmC.template selfadjointView<Lower>() * (symmB.template selfadjointView<Lower>() * eiSymmGen.eigenvectors())).isApprox(
(eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps));
eiSymm.compute(symmC);
MatrixType sqrtSymmA = eiSymm.operatorSqrt();
VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), sqrtSymmA*sqrtSymmA);
VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView<Lower>()*eiSymm.operatorInverseSqrt());
MatrixType id = MatrixType::Identity(rows, cols);
VERIFY_IS_APPROX(id.template selfadjointView<Lower>().operatorNorm(), RealScalar(1));
SelfAdjointEigenSolver<MatrixType> eiSymmUninitialized;
VERIFY_RAISES_ASSERT(eiSymmUninitialized.info());
VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvalues());
VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
eiSymmUninitialized.compute(symmA, false);
VERIFY_RAISES_ASSERT(eiSymmUninitialized.eigenvectors());
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorSqrt());
VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt());
// test Tridiagonalization's methods
Tridiagonalization<MatrixType> tridiag(symmC);
VERIFY_IS_APPROX(tridiag.diagonal(), tridiag.matrixT().diagonal());
VERIFY_IS_APPROX(tridiag.subDiagonal(), tridiag.matrixT().template diagonal<-1>());
Matrix<RealScalar,Dynamic,Dynamic> T = tridiag.matrixT();
if(rows>1 && cols>1) {
// FIXME check that upper and lower part are 0:
//VERIFY(T.topRightCorner(rows-2, cols-2).template triangularView<Upper>().isZero());
}
VERIFY_IS_APPROX(tridiag.diagonal(), T.diagonal());
VERIFY_IS_APPROX(tridiag.subDiagonal(), T.template diagonal<1>());
VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint());
VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView<Lower>()), tridiag.matrixQ() * tridiag.matrixT() * tridiag.matrixQ().adjoint());
// Test computation of eigenvalues from tridiagonal matrix
if(rows > 1)
{
SelfAdjointEigenSolver<MatrixType> eiSymmTridiag;
eiSymmTridiag.computeFromTridiagonal(tridiag.matrixT().diagonal(), tridiag.matrixT().diagonal(-1), ComputeEigenvectors);
VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmTridiag.eigenvalues());
VERIFY_IS_APPROX(tridiag.matrixT(), eiSymmTridiag.eigenvectors().real() * eiSymmTridiag.eigenvalues().asDiagonal() * eiSymmTridiag.eigenvectors().real().transpose());
}
if (rows > 1 && rows < 20)
{
// Test matrix with NaN
symmC(0,0) = std::numeric_limits<typename MatrixType::RealScalar>::quiet_NaN();
SelfAdjointEigenSolver<MatrixType> eiSymmNaN(symmC);
VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence);
}
// regression test for bug 1098
{
SelfAdjointEigenSolver<MatrixType> eig(a.adjoint() * a);
eig.compute(a.adjoint() * a);
}
// regression test for bug 478
{
a.setZero();
SelfAdjointEigenSolver<MatrixType> ei3(a);
VERIFY_IS_EQUAL(ei3.info(), Success);
VERIFY_IS_MUCH_SMALLER_THAN(ei3.eigenvalues().norm(),RealScalar(1));
VERIFY((ei3.eigenvectors().transpose()*ei3.eigenvectors().transpose()).eval().isIdentity());
}
}
template<int>
void bug_854()
{
Matrix3d m;
m << 850.961, 51.966, 0,
51.966, 254.841, 0,
0, 0, 0;
selfadjointeigensolver_essential_check(m);
}
template<int>
void bug_1014()
{
Matrix3d m;
m << 0.11111111111111114658, 0, 0,
0, 0.11111111111111109107, 0,
0, 0, 0.11111111111111107719;
selfadjointeigensolver_essential_check(m);
}
template<int>
void bug_1225()
{
Matrix3d m1, m2;
m1.setRandom();
m1 = m1*m1.transpose();
m2 = m1.triangularView<Upper>();
SelfAdjointEigenSolver<Matrix3d> eig1(m1);
SelfAdjointEigenSolver<Matrix3d> eig2(m2.selfadjointView<Upper>());
VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues());
}
template<int>
void bug_1204()
{
SparseMatrix<double> A(2,2);
A.setIdentity();
SelfAdjointEigenSolver<Eigen::SparseMatrix<double> > eig(A);
}
void test_eigensolver_selfadjoint()
{
int s = 0;
for(int i = 0; i < g_repeat; i++) {
// trivial test for 1x1 matrices:
CALL_SUBTEST_1( selfadjointeigensolver(Matrix<float, 1, 1>()));
CALL_SUBTEST_1( selfadjointeigensolver(Matrix<double, 1, 1>()));
// very important to test 3x3 and 2x2 matrices since we provide special paths for them
CALL_SUBTEST_12( selfadjointeigensolver(Matrix2f()) );
CALL_SUBTEST_12( selfadjointeigensolver(Matrix2d()) );
CALL_SUBTEST_13( selfadjointeigensolver(Matrix3f()) );
CALL_SUBTEST_13( selfadjointeigensolver(Matrix3d()) );
CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) );
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) );
CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(s,s)) );
CALL_SUBTEST_5( selfadjointeigensolver(MatrixXcd(s,s)) );
CALL_SUBTEST_9( selfadjointeigensolver(Matrix<std::complex<double>,Dynamic,Dynamic,RowMajor>(s,s)) );
TEST_SET_BUT_UNUSED_VARIABLE(s)
// some trivial but implementation-wise tricky cases
CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(1,1)) );
CALL_SUBTEST_4( selfadjointeigensolver(MatrixXd(2,2)) );
CALL_SUBTEST_6( selfadjointeigensolver(Matrix<double,1,1>()) );
CALL_SUBTEST_7( selfadjointeigensolver(Matrix<double,2,2>()) );
}
CALL_SUBTEST_13( bug_854<0>() );
CALL_SUBTEST_13( bug_1014<0>() );
CALL_SUBTEST_13( bug_1204<0>() );
CALL_SUBTEST_13( bug_1225<0>() );
// Test problem size constructors
s = internal::random<int>(1,EIGEN_TEST_MAX_SIZE/4);
CALL_SUBTEST_8(SelfAdjointEigenSolver<MatrixXf> tmp1(s));
CALL_SUBTEST_8(Tridiagonalization<MatrixXf> tmp2(s));
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/vectorwiseop.cpp
|
.cpp
| 8,627
| 251
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define TEST_ENABLE_TEMPORARY_TRACKING
#define EIGEN_NO_STATIC_ASSERT
#include "main.h"
template<typename ArrayType> void vectorwiseop_array(const ArrayType& m)
{
typedef typename ArrayType::Scalar Scalar;
typedef Array<Scalar, ArrayType::RowsAtCompileTime, 1> ColVectorType;
typedef Array<Scalar, 1, ArrayType::ColsAtCompileTime> RowVectorType;
Index rows = m.rows();
Index cols = m.cols();
Index r = internal::random<Index>(0, rows-1),
c = internal::random<Index>(0, cols-1);
ArrayType m1 = ArrayType::Random(rows, cols),
m2(rows, cols),
m3(rows, cols);
ColVectorType colvec = ColVectorType::Random(rows);
RowVectorType rowvec = RowVectorType::Random(cols);
// test addition
m2 = m1;
m2.colwise() += colvec;
VERIFY_IS_APPROX(m2, m1.colwise() + colvec);
VERIFY_IS_APPROX(m2.col(c), m1.col(c) + colvec);
VERIFY_RAISES_ASSERT(m2.colwise() += colvec.transpose());
VERIFY_RAISES_ASSERT(m1.colwise() + colvec.transpose());
m2 = m1;
m2.rowwise() += rowvec;
VERIFY_IS_APPROX(m2, m1.rowwise() + rowvec);
VERIFY_IS_APPROX(m2.row(r), m1.row(r) + rowvec);
VERIFY_RAISES_ASSERT(m2.rowwise() += rowvec.transpose());
VERIFY_RAISES_ASSERT(m1.rowwise() + rowvec.transpose());
// test substraction
m2 = m1;
m2.colwise() -= colvec;
VERIFY_IS_APPROX(m2, m1.colwise() - colvec);
VERIFY_IS_APPROX(m2.col(c), m1.col(c) - colvec);
VERIFY_RAISES_ASSERT(m2.colwise() -= colvec.transpose());
VERIFY_RAISES_ASSERT(m1.colwise() - colvec.transpose());
m2 = m1;
m2.rowwise() -= rowvec;
VERIFY_IS_APPROX(m2, m1.rowwise() - rowvec);
VERIFY_IS_APPROX(m2.row(r), m1.row(r) - rowvec);
VERIFY_RAISES_ASSERT(m2.rowwise() -= rowvec.transpose());
VERIFY_RAISES_ASSERT(m1.rowwise() - rowvec.transpose());
// test multiplication
m2 = m1;
m2.colwise() *= colvec;
VERIFY_IS_APPROX(m2, m1.colwise() * colvec);
VERIFY_IS_APPROX(m2.col(c), m1.col(c) * colvec);
VERIFY_RAISES_ASSERT(m2.colwise() *= colvec.transpose());
VERIFY_RAISES_ASSERT(m1.colwise() * colvec.transpose());
m2 = m1;
m2.rowwise() *= rowvec;
VERIFY_IS_APPROX(m2, m1.rowwise() * rowvec);
VERIFY_IS_APPROX(m2.row(r), m1.row(r) * rowvec);
VERIFY_RAISES_ASSERT(m2.rowwise() *= rowvec.transpose());
VERIFY_RAISES_ASSERT(m1.rowwise() * rowvec.transpose());
// test quotient
m2 = m1;
m2.colwise() /= colvec;
VERIFY_IS_APPROX(m2, m1.colwise() / colvec);
VERIFY_IS_APPROX(m2.col(c), m1.col(c) / colvec);
VERIFY_RAISES_ASSERT(m2.colwise() /= colvec.transpose());
VERIFY_RAISES_ASSERT(m1.colwise() / colvec.transpose());
m2 = m1;
m2.rowwise() /= rowvec;
VERIFY_IS_APPROX(m2, m1.rowwise() / rowvec);
VERIFY_IS_APPROX(m2.row(r), m1.row(r) / rowvec);
VERIFY_RAISES_ASSERT(m2.rowwise() /= rowvec.transpose());
VERIFY_RAISES_ASSERT(m1.rowwise() / rowvec.transpose());
m2 = m1;
// yes, there might be an aliasing issue there but ".rowwise() /="
// is supposed to evaluate " m2.colwise().sum()" into a temporary to avoid
// evaluating the reduction multiple times
if(ArrayType::RowsAtCompileTime>2 || ArrayType::RowsAtCompileTime==Dynamic)
{
m2.rowwise() /= m2.colwise().sum();
VERIFY_IS_APPROX(m2, m1.rowwise() / m1.colwise().sum());
}
// all/any
Array<bool,Dynamic,Dynamic> mb(rows,cols);
mb = (m1.real()<=0.7).colwise().all();
VERIFY( (mb.col(c) == (m1.real().col(c)<=0.7).all()).all() );
mb = (m1.real()<=0.7).rowwise().all();
VERIFY( (mb.row(r) == (m1.real().row(r)<=0.7).all()).all() );
mb = (m1.real()>=0.7).colwise().any();
VERIFY( (mb.col(c) == (m1.real().col(c)>=0.7).any()).all() );
mb = (m1.real()>=0.7).rowwise().any();
VERIFY( (mb.row(r) == (m1.real().row(r)>=0.7).any()).all() );
}
template<typename MatrixType> void vectorwiseop_matrix(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVectorType;
typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
typedef Matrix<RealScalar, MatrixType::RowsAtCompileTime, 1> RealColVectorType;
typedef Matrix<RealScalar, 1, MatrixType::ColsAtCompileTime> RealRowVectorType;
Index rows = m.rows();
Index cols = m.cols();
Index r = internal::random<Index>(0, rows-1),
c = internal::random<Index>(0, cols-1);
MatrixType m1 = MatrixType::Random(rows, cols),
m2(rows, cols),
m3(rows, cols);
ColVectorType colvec = ColVectorType::Random(rows);
RowVectorType rowvec = RowVectorType::Random(cols);
RealColVectorType rcres;
RealRowVectorType rrres;
// test addition
m2 = m1;
m2.colwise() += colvec;
VERIFY_IS_APPROX(m2, m1.colwise() + colvec);
VERIFY_IS_APPROX(m2.col(c), m1.col(c) + colvec);
if(rows>1)
{
VERIFY_RAISES_ASSERT(m2.colwise() += colvec.transpose());
VERIFY_RAISES_ASSERT(m1.colwise() + colvec.transpose());
}
m2 = m1;
m2.rowwise() += rowvec;
VERIFY_IS_APPROX(m2, m1.rowwise() + rowvec);
VERIFY_IS_APPROX(m2.row(r), m1.row(r) + rowvec);
if(cols>1)
{
VERIFY_RAISES_ASSERT(m2.rowwise() += rowvec.transpose());
VERIFY_RAISES_ASSERT(m1.rowwise() + rowvec.transpose());
}
// test substraction
m2 = m1;
m2.colwise() -= colvec;
VERIFY_IS_APPROX(m2, m1.colwise() - colvec);
VERIFY_IS_APPROX(m2.col(c), m1.col(c) - colvec);
if(rows>1)
{
VERIFY_RAISES_ASSERT(m2.colwise() -= colvec.transpose());
VERIFY_RAISES_ASSERT(m1.colwise() - colvec.transpose());
}
m2 = m1;
m2.rowwise() -= rowvec;
VERIFY_IS_APPROX(m2, m1.rowwise() - rowvec);
VERIFY_IS_APPROX(m2.row(r), m1.row(r) - rowvec);
if(cols>1)
{
VERIFY_RAISES_ASSERT(m2.rowwise() -= rowvec.transpose());
VERIFY_RAISES_ASSERT(m1.rowwise() - rowvec.transpose());
}
// test norm
rrres = m1.colwise().norm();
VERIFY_IS_APPROX(rrres(c), m1.col(c).norm());
rcres = m1.rowwise().norm();
VERIFY_IS_APPROX(rcres(r), m1.row(r).norm());
VERIFY_IS_APPROX(m1.cwiseAbs().colwise().sum(), m1.colwise().template lpNorm<1>());
VERIFY_IS_APPROX(m1.cwiseAbs().rowwise().sum(), m1.rowwise().template lpNorm<1>());
VERIFY_IS_APPROX(m1.cwiseAbs().colwise().maxCoeff(), m1.colwise().template lpNorm<Infinity>());
VERIFY_IS_APPROX(m1.cwiseAbs().rowwise().maxCoeff(), m1.rowwise().template lpNorm<Infinity>());
// regression for bug 1158
VERIFY_IS_APPROX(m1.cwiseAbs().colwise().sum().x(), m1.col(0).cwiseAbs().sum());
// test normalized
m2 = m1.colwise().normalized();
VERIFY_IS_APPROX(m2.col(c), m1.col(c).normalized());
m2 = m1.rowwise().normalized();
VERIFY_IS_APPROX(m2.row(r), m1.row(r).normalized());
// test normalize
m2 = m1;
m2.colwise().normalize();
VERIFY_IS_APPROX(m2.col(c), m1.col(c).normalized());
m2 = m1;
m2.rowwise().normalize();
VERIFY_IS_APPROX(m2.row(r), m1.row(r).normalized());
// test with partial reduction of products
Matrix<Scalar,MatrixType::RowsAtCompileTime,MatrixType::RowsAtCompileTime> m1m1 = m1 * m1.transpose();
VERIFY_IS_APPROX( (m1 * m1.transpose()).colwise().sum(), m1m1.colwise().sum());
Matrix<Scalar,1,MatrixType::RowsAtCompileTime> tmp(rows);
VERIFY_EVALUATION_COUNT( tmp = (m1 * m1.transpose()).colwise().sum(), 1);
m2 = m1.rowwise() - (m1.colwise().sum()/RealScalar(m1.rows())).eval();
m1 = m1.rowwise() - (m1.colwise().sum()/RealScalar(m1.rows()));
VERIFY_IS_APPROX( m1, m2 );
VERIFY_EVALUATION_COUNT( m2 = (m1.rowwise() - m1.colwise().sum()/RealScalar(m1.rows())), (MatrixType::RowsAtCompileTime!=1 ? 1 : 0) );
}
void test_vectorwiseop()
{
CALL_SUBTEST_1( vectorwiseop_array(Array22cd()) );
CALL_SUBTEST_2( vectorwiseop_array(Array<double, 3, 2>()) );
CALL_SUBTEST_3( vectorwiseop_array(ArrayXXf(3, 4)) );
CALL_SUBTEST_4( vectorwiseop_matrix(Matrix4cf()) );
CALL_SUBTEST_5( vectorwiseop_matrix(Matrix<float,4,5>()) );
CALL_SUBTEST_6( vectorwiseop_matrix(MatrixXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_7( vectorwiseop_matrix(VectorXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_7( vectorwiseop_matrix(RowVectorXd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/qr_fullpivoting.cpp
|
.cpp
| 5,242
| 158
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/QR>
template<typename MatrixType> void qr()
{
Index max_size = EIGEN_TEST_MAX_SIZE;
Index min_size = numext::maxi(1,EIGEN_TEST_MAX_SIZE/10);
Index rows = internal::random<Index>(min_size,max_size),
cols = internal::random<Index>(min_size,max_size),
cols2 = internal::random<Index>(min_size,max_size),
rank = internal::random<Index>(1, (std::min)(rows, cols)-1);
typedef typename MatrixType::Scalar Scalar;
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> MatrixQType;
MatrixType m1;
createRandomPIMatrixOfRank(rank,rows,cols,m1);
FullPivHouseholderQR<MatrixType> qr(m1);
VERIFY_IS_EQUAL(rank, qr.rank());
VERIFY_IS_EQUAL(cols - qr.rank(), qr.dimensionOfKernel());
VERIFY(!qr.isInjective());
VERIFY(!qr.isInvertible());
VERIFY(!qr.isSurjective());
MatrixType r = qr.matrixQR();
MatrixQType q = qr.matrixQ();
VERIFY_IS_UNITARY(q);
// FIXME need better way to construct trapezoid
for(int i = 0; i < rows; i++) for(int j = 0; j < cols; j++) if(i>j) r(i,j) = Scalar(0);
MatrixType c = qr.matrixQ() * r * qr.colsPermutation().inverse();
VERIFY_IS_APPROX(m1, c);
// stress the ReturnByValue mechanism
MatrixType tmp;
VERIFY_IS_APPROX(tmp.noalias() = qr.matrixQ() * r, (qr.matrixQ() * r).eval());
MatrixType m2 = MatrixType::Random(cols,cols2);
MatrixType m3 = m1*m2;
m2 = MatrixType::Random(cols,cols2);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
{
Index size = rows;
do {
m1 = MatrixType::Random(size,size);
qr.compute(m1);
} while(!qr.isInvertible());
MatrixType m1_inv = qr.inverse();
m3 = m1 * MatrixType::Random(size,cols2);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m2, m1_inv*m3);
}
}
template<typename MatrixType> void qr_invertible()
{
using std::log;
using std::abs;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Scalar Scalar;
Index max_size = numext::mini(50,EIGEN_TEST_MAX_SIZE);
Index min_size = numext::maxi(1,EIGEN_TEST_MAX_SIZE/10);
Index size = internal::random<Index>(min_size,max_size);
MatrixType m1(size, size), m2(size, size), m3(size, size);
m1 = MatrixType::Random(size,size);
if (internal::is_same<RealScalar,float>::value)
{
// let's build a matrix more stable to inverse
MatrixType a = MatrixType::Random(size,size*2);
m1 += a * a.adjoint();
}
FullPivHouseholderQR<MatrixType> qr(m1);
VERIFY(qr.isInjective());
VERIFY(qr.isInvertible());
VERIFY(qr.isSurjective());
m3 = MatrixType::Random(size,size);
m2 = qr.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
// now construct a matrix with prescribed determinant
m1.setZero();
for(int i = 0; i < size; i++) m1(i,i) = internal::random<Scalar>();
RealScalar absdet = abs(m1.diagonal().prod());
m3 = qr.matrixQ(); // get a unitary
m1 = m3 * m1 * m3;
qr.compute(m1);
VERIFY_IS_APPROX(absdet, qr.absDeterminant());
VERIFY_IS_APPROX(log(absdet), qr.logAbsDeterminant());
}
template<typename MatrixType> void qr_verify_assert()
{
MatrixType tmp;
FullPivHouseholderQR<MatrixType> qr;
VERIFY_RAISES_ASSERT(qr.matrixQR())
VERIFY_RAISES_ASSERT(qr.solve(tmp))
VERIFY_RAISES_ASSERT(qr.matrixQ())
VERIFY_RAISES_ASSERT(qr.dimensionOfKernel())
VERIFY_RAISES_ASSERT(qr.isInjective())
VERIFY_RAISES_ASSERT(qr.isSurjective())
VERIFY_RAISES_ASSERT(qr.isInvertible())
VERIFY_RAISES_ASSERT(qr.inverse())
VERIFY_RAISES_ASSERT(qr.absDeterminant())
VERIFY_RAISES_ASSERT(qr.logAbsDeterminant())
}
void test_qr_fullpivoting()
{
for(int i = 0; i < 1; i++) {
// FIXME : very weird bug here
// CALL_SUBTEST(qr(Matrix2f()) );
CALL_SUBTEST_1( qr<MatrixXf>() );
CALL_SUBTEST_2( qr<MatrixXd>() );
CALL_SUBTEST_3( qr<MatrixXcd>() );
}
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( qr_invertible<MatrixXf>() );
CALL_SUBTEST_2( qr_invertible<MatrixXd>() );
CALL_SUBTEST_4( qr_invertible<MatrixXcf>() );
CALL_SUBTEST_3( qr_invertible<MatrixXcd>() );
}
CALL_SUBTEST_5(qr_verify_assert<Matrix3f>());
CALL_SUBTEST_6(qr_verify_assert<Matrix3d>());
CALL_SUBTEST_1(qr_verify_assert<MatrixXf>());
CALL_SUBTEST_2(qr_verify_assert<MatrixXd>());
CALL_SUBTEST_4(qr_verify_assert<MatrixXcf>());
CALL_SUBTEST_3(qr_verify_assert<MatrixXcd>());
// Test problem size constructors
CALL_SUBTEST_7(FullPivHouseholderQR<MatrixXf>(10, 20));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,10,20> >(10,20)));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,10,20> >(Matrix<float,10,20>::Random())));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,20,10> >(20,10)));
CALL_SUBTEST_7((FullPivHouseholderQR<Matrix<float,20,10> >(Matrix<float,20,10>::Random())));
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/integer_types.cpp
|
.cpp
| 5,688
| 168
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_NO_STATIC_ASSERT
#include "main.h"
#undef VERIFY_IS_APPROX
#define VERIFY_IS_APPROX(a, b) VERIFY((a)==(b));
#undef VERIFY_IS_NOT_APPROX
#define VERIFY_IS_NOT_APPROX(a, b) VERIFY((a)!=(b));
template<typename MatrixType> void signed_integer_type_tests(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
enum { is_signed = (Scalar(-1) > Scalar(0)) ? 0 : 1 };
VERIFY(is_signed == 1);
Index rows = m.rows();
Index cols = m.cols();
MatrixType m1(rows, cols),
m2 = MatrixType::Random(rows, cols),
mzero = MatrixType::Zero(rows, cols);
do {
m1 = MatrixType::Random(rows, cols);
} while(m1 == mzero || m1 == m2);
// check linear structure
Scalar s1;
do {
s1 = internal::random<Scalar>();
} while(s1 == 0);
VERIFY_IS_EQUAL(-(-m1), m1);
VERIFY_IS_EQUAL(-m2+m1+m2, m1);
VERIFY_IS_EQUAL((-m1+m2)*s1, -s1*m1+s1*m2);
}
template<typename MatrixType> void integer_type_tests(const MatrixType& m)
{
typedef typename MatrixType::Scalar Scalar;
VERIFY(NumTraits<Scalar>::IsInteger);
enum { is_signed = (Scalar(-1) > Scalar(0)) ? 0 : 1 };
VERIFY(int(NumTraits<Scalar>::IsSigned) == is_signed);
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
Index rows = m.rows();
Index cols = m.cols();
// this test relies a lot on Random.h, and there's not much more that we can do
// to test it, hence I consider that we will have tested Random.h
MatrixType m1(rows, cols),
m2 = MatrixType::Random(rows, cols),
m3(rows, cols),
mzero = MatrixType::Zero(rows, cols);
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime> SquareMatrixType;
SquareMatrixType identity = SquareMatrixType::Identity(rows, rows),
square = SquareMatrixType::Random(rows, rows);
VectorType v1(rows),
v2 = VectorType::Random(rows),
vzero = VectorType::Zero(rows);
do {
m1 = MatrixType::Random(rows, cols);
} while(m1 == mzero || m1 == m2);
do {
v1 = VectorType::Random(rows);
} while(v1 == vzero || v1 == v2);
VERIFY_IS_APPROX( v1, v1);
VERIFY_IS_NOT_APPROX( v1, 2*v1);
VERIFY_IS_APPROX( vzero, v1-v1);
VERIFY_IS_APPROX( m1, m1);
VERIFY_IS_NOT_APPROX( m1, 2*m1);
VERIFY_IS_APPROX( mzero, m1-m1);
VERIFY_IS_APPROX(m3 = m1,m1);
MatrixType m4;
VERIFY_IS_APPROX(m4 = m1,m1);
m3.real() = m1.real();
VERIFY_IS_APPROX(static_cast<const MatrixType&>(m3).real(), static_cast<const MatrixType&>(m1).real());
VERIFY_IS_APPROX(static_cast<const MatrixType&>(m3).real(), m1.real());
// check == / != operators
VERIFY(m1==m1);
VERIFY(m1!=m2);
VERIFY(!(m1==m2));
VERIFY(!(m1!=m1));
m1 = m2;
VERIFY(m1==m2);
VERIFY(!(m1!=m2));
// check linear structure
Scalar s1;
do {
s1 = internal::random<Scalar>();
} while(s1 == 0);
VERIFY_IS_EQUAL(m1+m1, 2*m1);
VERIFY_IS_EQUAL(m1+m2-m1, m2);
VERIFY_IS_EQUAL(m1*s1, s1*m1);
VERIFY_IS_EQUAL((m1+m2)*s1, s1*m1+s1*m2);
m3 = m2; m3 += m1;
VERIFY_IS_EQUAL(m3, m1+m2);
m3 = m2; m3 -= m1;
VERIFY_IS_EQUAL(m3, m2-m1);
m3 = m2; m3 *= s1;
VERIFY_IS_EQUAL(m3, s1*m2);
// check matrix product.
VERIFY_IS_APPROX(identity * m1, m1);
VERIFY_IS_APPROX(square * (m1 + m2), square * m1 + square * m2);
VERIFY_IS_APPROX((m1 + m2).transpose() * square, m1.transpose() * square + m2.transpose() * square);
VERIFY_IS_APPROX((m1 * m2.transpose()) * m1, m1 * (m2.transpose() * m1));
}
void test_integer_types()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( integer_type_tests(Matrix<unsigned int, 1, 1>()) );
CALL_SUBTEST_1( integer_type_tests(Matrix<unsigned long, 3, 4>()) );
CALL_SUBTEST_2( integer_type_tests(Matrix<long, 2, 2>()) );
CALL_SUBTEST_2( signed_integer_type_tests(Matrix<long, 2, 2>()) );
CALL_SUBTEST_3( integer_type_tests(Matrix<char, 2, Dynamic>(2, 10)) );
CALL_SUBTEST_3( signed_integer_type_tests(Matrix<signed char, 2, Dynamic>(2, 10)) );
CALL_SUBTEST_4( integer_type_tests(Matrix<unsigned char, 3, 3>()) );
CALL_SUBTEST_4( integer_type_tests(Matrix<unsigned char, Dynamic, Dynamic>(20, 20)) );
CALL_SUBTEST_5( integer_type_tests(Matrix<short, Dynamic, 4>(7, 4)) );
CALL_SUBTEST_5( signed_integer_type_tests(Matrix<short, Dynamic, 4>(7, 4)) );
CALL_SUBTEST_6( integer_type_tests(Matrix<unsigned short, 4, 4>()) );
CALL_SUBTEST_7( integer_type_tests(Matrix<long long, 11, 13>()) );
CALL_SUBTEST_7( signed_integer_type_tests(Matrix<long long, 11, 13>()) );
CALL_SUBTEST_8( integer_type_tests(Matrix<unsigned long long, Dynamic, 5>(1, 5)) );
}
#ifdef EIGEN_TEST_PART_9
VERIFY_IS_EQUAL(internal::scalar_div_cost<int>::value, 8);
VERIFY_IS_EQUAL(internal::scalar_div_cost<unsigned int>::value, 8);
if(sizeof(long)>sizeof(int)) {
VERIFY(int(internal::scalar_div_cost<long>::value) > int(internal::scalar_div_cost<int>::value));
VERIFY(int(internal::scalar_div_cost<unsigned long>::value) > int(internal::scalar_div_cost<int>::value));
}
#endif
}
|
C++
|
2D
|
JaeHyunLee94/mpm2d
|
external/eigen-3.3.9/test/bicgstab.cpp
|
.cpp
| 1,460
| 35
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "sparse_solver.h"
#include <Eigen/IterativeLinearSolvers>
template<typename T, typename I> void test_bicgstab_T()
{
BiCGSTAB<SparseMatrix<T,0,I>, DiagonalPreconditioner<T> > bicgstab_colmajor_diag;
BiCGSTAB<SparseMatrix<T,0,I>, IdentityPreconditioner > bicgstab_colmajor_I;
BiCGSTAB<SparseMatrix<T,0,I>, IncompleteLUT<T,I> > bicgstab_colmajor_ilut;
//BiCGSTAB<SparseMatrix<T>, SSORPreconditioner<T> > bicgstab_colmajor_ssor;
bicgstab_colmajor_diag.setTolerance(NumTraits<T>::epsilon()*4);
bicgstab_colmajor_ilut.setTolerance(NumTraits<T>::epsilon()*4);
CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_diag) );
// CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_I) );
CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ilut) );
//CALL_SUBTEST( check_sparse_square_solving(bicgstab_colmajor_ssor) );
}
void test_bicgstab()
{
CALL_SUBTEST_1((test_bicgstab_T<double,int>()) );
CALL_SUBTEST_2((test_bicgstab_T<std::complex<double>, int>()));
CALL_SUBTEST_3((test_bicgstab_T<double,long int>()));
}
|
C++
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.