keyword
stringclasses
7 values
repo_name
stringlengths
8
98
file_path
stringlengths
4
244
file_extension
stringclasses
29 values
file_size
int64
0
84.1M
line_count
int64
0
1.6M
content
stringlengths
1
84.1M
language
stringclasses
14 values
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
.h
6,912
200
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr> /* NOTE: thes functions vave been adapted from the LDL library: LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. LDL License: Your use or distribution of LDL or any modified version of LDL implies that you agree to this License. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Permission is hereby granted to use or copy this program under the terms of the GNU LGPL, provided that the Copyright, this License, and the Availability of the original version is retained on all copies. User documentation of any code that uses this code or any modified version of this code must cite the Copyright, this License, the Availability note, and "Used by permission." Permission to modify the code and to distribute modified code is granted, provided the Copyright, this License, and the Availability note are retained, and a notice that the code was modified is included. */ #include "../Core/util/NonMPL2.h" #ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H #define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H namespace Eigen { template<typename Derived> void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT) { const StorageIndex size = StorageIndex(ap.rows()); m_matrix.resize(size, size); m_parent.resize(size); m_nonZerosPerCol.resize(size); ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); for(StorageIndex k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ tags[k] = k; /* mark node k as visited */ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { StorageIndex i = it.index(); if(i < k) { /* follow path from i to root of etree, stop at flagged node */ for(; tags[i] != k; i = m_parent[i]) { /* find parent of i if not yet determined */ if (m_parent[i] == -1) m_parent[i] = k; m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */ tags[i] = k; /* mark i as visited */ } } } } /* construct Lp index array from m_nonZerosPerCol column counts */ StorageIndex* Lp = m_matrix.outerIndexPtr(); Lp[0] = 0; for(StorageIndex k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); m_matrix.resizeNonZeros(Lp[size]); m_isInitialized = true; m_info = Success; m_analysisIsOk = true; m_factorizationIsOk = false; } template<typename Derived> template<bool DoLDLT> void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap) { using std::sqrt; eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(ap.rows()==ap.cols()); eigen_assert(m_parent.size()==ap.rows()); eigen_assert(m_nonZerosPerCol.size()==ap.rows()); const StorageIndex size = StorageIndex(ap.rows()); const StorageIndex* Lp = m_matrix.outerIndexPtr(); StorageIndex* Li = m_matrix.innerIndexPtr(); Scalar* Lx = m_matrix.valuePtr(); ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0); ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); bool ok = true; m_diag.resize(DoLDLT ? size : 0); for(StorageIndex k = 0; k < size; ++k) { // compute nonzero pattern of kth row of L, in topological order y[k] = 0.0; // Y(0:k) is now all zero StorageIndex top = size; // stack for pattern is empty tags[k] = k; // mark node k as visited m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { StorageIndex i = it.index(); if(i <= k) { y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ Index len; for(len = 0; tags[i] != k; i = m_parent[i]) { pattern[len++] = i; /* L(k,i) is nonzero */ tags[i] = k; /* mark i as visited */ } while(len > 0) pattern[--top] = pattern[--len]; } } /* compute numerical values kth row of L (a sparse triangular solve) */ RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k) y[k] = 0.0; for(; top < size; ++top) { Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; /* the nonzero entry L(k,i) */ Scalar l_ki; if(DoLDLT) l_ki = yi / numext::real(m_diag[i]); else yi = l_ki = yi / Lx[Lp[i]]; Index p2 = Lp[i] + m_nonZerosPerCol[i]; Index p; for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) y[Li[p]] -= numext::conj(Lx[p]) * yi; d -= numext::real(l_ki * numext::conj(yi)); Li[p] = k; /* store L(k,i) in column form of L */ Lx[p] = l_ki; ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ } if(DoLDLT) { m_diag[k] = d; if(d == RealScalar(0)) { ok = false; /* failure, D(k,k) is zero */ break; } } else { Index p = Lp[k] + m_nonZerosPerCol[k]++; Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ if(d <= RealScalar(0)) { ok = false; /* failure, matrix is not positive definite */ break; } Lx[p] = sqrt(d) ; } } m_info = ok ? Success : NumericalIssue; m_factorizationIsOk = true; } } // end namespace Eigen #endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/SparseQR/SparseQR.h
.h
28,373
746
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr> // Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_QR_H #define EIGEN_SPARSE_QR_H namespace Eigen { template<typename MatrixType, typename OrderingType> class SparseQR; template<typename SparseQRType> struct SparseQRMatrixQReturnType; template<typename SparseQRType> struct SparseQRMatrixQTransposeReturnType; template<typename SparseQRType, typename Derived> struct SparseQR_QProduct; namespace internal { template <typename SparseQRType> struct traits<SparseQRMatrixQReturnType<SparseQRType> > { typedef typename SparseQRType::MatrixType ReturnType; typedef typename ReturnType::StorageIndex StorageIndex; typedef typename ReturnType::StorageKind StorageKind; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic }; }; template <typename SparseQRType> struct traits<SparseQRMatrixQTransposeReturnType<SparseQRType> > { typedef typename SparseQRType::MatrixType ReturnType; }; template <typename SparseQRType, typename Derived> struct traits<SparseQR_QProduct<SparseQRType, Derived> > { typedef typename Derived::PlainObject ReturnType; }; } // End namespace internal /** * \ingroup SparseQR_Module * \class SparseQR * \brief Sparse left-looking rank-revealing QR factorization * * This class implements a left-looking rank-revealing QR decomposition * of sparse matrices. When a column has a norm less than a given tolerance * it is implicitly permuted to the end. The QR factorization thus obtained is * given by A*P = Q*R where R is upper triangular or trapezoidal. * * P is the column permutation which is the product of the fill-reducing and the * rank-revealing permutations. Use colsPermutation() to get it. * * Q is the orthogonal matrix represented as products of Householder reflectors. * Use matrixQ() to get an expression and matrixQ().adjoint() to get the adjoint. * You can then apply it to a vector. * * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient. * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank. * * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module * OrderingMethods \endlink module for the list of built-in and external ordering methods. * * \implsparsesolverconcept * * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()). * \warning For complex matrices matrixQ().transpose() will actually return the adjoint matrix. * */ template<typename _MatrixType, typename _OrderingType> class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> > { protected: typedef SparseSolverBase<SparseQR<_MatrixType,_OrderingType> > Base; using Base::m_isInitialized; public: using Base::_solve_impl; typedef _MatrixType MatrixType; typedef _OrderingType OrderingType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef SparseMatrix<Scalar,ColMajor,StorageIndex> QRMatrixType; typedef Matrix<StorageIndex, Dynamic, 1> IndexVector; typedef Matrix<Scalar, Dynamic, 1> ScalarVector; typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: SparseQR () : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { } /** Construct a QR factorization of the matrix \a mat. * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * \sa compute() */ explicit SparseQR(const MatrixType& mat) : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { compute(mat); } /** Computes the QR factorization of the sparse matrix \a mat. * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * \sa analyzePattern(), factorize() */ void compute(const MatrixType& mat) { analyzePattern(mat); factorize(mat); } void analyzePattern(const MatrixType& mat); void factorize(const MatrixType& mat); /** \returns the number of rows of the represented matrix. */ inline Index rows() const { return m_pmat.rows(); } /** \returns the number of columns of the represented matrix. */ inline Index cols() const { return m_pmat.cols();} /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization. * \warning The entries of the returned matrix are not sorted. This means that using it in algorithms * expecting sorted entries will fail. This include random coefficient accesses (SpaseMatrix::coeff()), * and coefficient-wise operations. Matrix products and triangular solves are fine though. * * To sort the entries, you can assign it to a row-major matrix, and if a column-major matrix * is required, you can copy it again: * \code * SparseMatrix<double> R = qr.matrixR(); // column-major, not sorted! * SparseMatrix<double,RowMajor> Rr = qr.matrixR(); // row-major, sorted * SparseMatrix<double> Rc = Rr; // column-major, sorted * \endcode */ const QRMatrixType& matrixR() const { return m_R; } /** \returns the number of non linearly dependent columns as determined by the pivoting threshold. * * \sa setPivotThreshold() */ Index rank() const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); return m_nonzeropivots; } /** \returns an expression of the matrix Q as products of sparse Householder reflectors. * The common usage of this function is to apply it to a dense matrix or vector * \code * VectorXd B1, B2; * // Initialize B1 * B2 = matrixQ() * B1; * \endcode * * To get a plain SparseMatrix representation of Q: * \code * SparseMatrix<double> Q; * Q = SparseQR<SparseMatrix<double> >(A).matrixQ(); * \endcode * Internally, this call simply performs a sparse product between the matrix Q * and a sparse identity matrix. However, due to the fact that the sparse * reflectors are stored unsorted, two transpositions are needed to sort * them before performing the product. */ SparseQRMatrixQReturnType<SparseQR> matrixQ() const { return SparseQRMatrixQReturnType<SparseQR>(*this); } /** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R * It is the combination of the fill-in reducing permutation and numerical column pivoting. */ const PermutationType& colsPermutation() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_outputPerm_c; } /** \returns A string describing the type of error. * This method is provided to ease debugging, not to handle errors. */ std::string lastErrorMessage() const { return m_lastError; } /** \internal */ template<typename Rhs, typename Dest> bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); Index rank = this->rank(); // Compute Q^* * b; typename Dest::PlainObject y, b; y = this->matrixQ().adjoint() * B; b = y; // Solve with the triangular matrix R y.resize((std::max<Index>)(cols(),y.rows()),y.cols()); y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank)); y.bottomRows(y.rows()-rank).setZero(); // Apply the column permutation if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols()); else dest = y.topRows(cols()); m_info = Success; return true; } /** Sets the threshold that is used to determine linearly dependent columns during the factorization. * * In practice, if during the factorization the norm of the column that has to be eliminated is below * this threshold, then the entire column is treated as zero, and it is moved at the end. */ void setPivotThreshold(const RealScalar& threshold) { m_useDefaultThreshold = false; m_threshold = threshold; } /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A. * * \sa compute() */ template<typename Rhs> inline const Solve<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); return Solve<SparseQR, Rhs>(*this, B.derived()); } template<typename Rhs> inline const Solve<SparseQR, Rhs> solve(const SparseMatrixBase<Rhs>& B) const { eigen_assert(m_isInitialized && "The factorization should be called first, use compute()"); eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix"); return Solve<SparseQR, Rhs>(*this, B.derived()); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was successful, * \c NumericalIssue if the QR factorization reports a numerical problem * \c InvalidInput if the input matrix is invalid * * \sa iparm() */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** \internal */ inline void _sort_matrix_Q() { if(this->m_isQSorted) return; // The matrix Q is sorted during the transposition SparseMatrix<Scalar, RowMajor, Index> mQrm(this->m_Q); this->m_Q = mQrm; this->m_isQSorted = true; } protected: bool m_analysisIsok; bool m_factorizationIsok; mutable ComputationInfo m_info; std::string m_lastError; QRMatrixType m_pmat; // Temporary matrix QRMatrixType m_R; // The triangular factor matrix QRMatrixType m_Q; // The orthogonal reflectors ScalarVector m_hcoeffs; // The Householder coefficients PermutationType m_perm_c; // Fill-reducing Column permutation PermutationType m_pivotperm; // The permutation for rank revealing PermutationType m_outputPerm_c; // The final column permutation RealScalar m_threshold; // Threshold to determine null Householder reflections bool m_useDefaultThreshold; // Use default threshold Index m_nonzeropivots; // Number of non zero pivots found IndexVector m_etree; // Column elimination tree IndexVector m_firstRowElt; // First element in each row bool m_isQSorted; // whether Q is sorted or not bool m_isEtreeOk; // whether the elimination tree match the initial input matrix template <typename, typename > friend struct SparseQR_QProduct; }; /** \brief Preprocessing step of a QR factorization * * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * In this step, the fill-reducing permutation is computed and applied to the columns of A * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited. * * \note In this step it is assumed that there is no empty row in the matrix \a mat. */ template <typename MatrixType, typename OrderingType> void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat) { eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR"); // Copy to a column major matrix if the input is rowmajor typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat); // Compute the column fill reducing ordering OrderingType ord; ord(matCpy, m_perm_c); Index n = mat.cols(); Index m = mat.rows(); Index diagSize = (std::min)(m,n); if (!m_perm_c.size()) { m_perm_c.resize(n); m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1)); } // Compute the column elimination tree of the permuted matrix m_outputPerm_c = m_perm_c.inverse(); internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); m_isEtreeOk = true; m_R.resize(m, n); m_Q.resize(m, diagSize); // Allocate space for nonzero elements : rough estimation m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree m_Q.reserve(2*mat.nonZeros()); m_hcoeffs.resize(diagSize); m_analysisIsok = true; } /** \brief Performs the numerical QR factorization of the input matrix * * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with * a matrix having the same sparsity pattern than \a mat. * * \param mat The sparse column-major matrix */ template <typename MatrixType, typename OrderingType> void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat) { using std::abs; eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); StorageIndex m = StorageIndex(mat.rows()); StorageIndex n = StorageIndex(mat.cols()); StorageIndex diagSize = (std::min)(m,n); IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q ScalarVector tval(m); // The dense vector used to compute the current column RealScalar pivotThreshold = m_threshold; m_R.setZero(); m_Q.setZero(); m_pmat = mat; if(!m_isEtreeOk) { m_outputPerm_c = m_perm_c.inverse(); internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); m_isEtreeOk = true; } m_pmat.uncompress(); // To have the innerNonZeroPtr allocated // Apply the fill-in reducing permutation lazily: { // If the input is row major, copy the original column indices, // otherwise directly use the input matrix // IndexVector originalOuterIndicesCpy; const StorageIndex *originalOuterIndices = mat.outerIndexPtr(); if(MatrixType::IsRowMajor) { originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1); originalOuterIndices = originalOuterIndicesCpy.data(); } for (int i = 0; i < n; i++) { Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i; m_pmat.outerIndexPtr()[p] = originalOuterIndices[i]; m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i]; } } /* Compute the default threshold as in MatLab, see: * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 */ if(m_useDefaultThreshold) { RealScalar max2Norm = 0.0; for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm()); if(max2Norm==RealScalar(0)) max2Norm = RealScalar(1); pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon(); } // Initialize the numerical permutation m_pivotperm.setIdentity(n); StorageIndex nonzeroCol = 0; // Record the number of valid pivots m_Q.startVec(0); // Left looking rank-revealing QR factorization: compute a column of R and Q at a time for (StorageIndex col = 0; col < n; ++col) { mark.setConstant(-1); m_R.startVec(col); mark(nonzeroCol) = col; Qidx(0) = nonzeroCol; nzcolR = 0; nzcolQ = 1; bool found_diag = nonzeroCol>=m; tval.setZero(); // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e., // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k. // Note: if the diagonal entry does not exist, then its contribution must be explicitly added, // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) { StorageIndex curIdx = nonzeroCol; if(itp) curIdx = StorageIndex(itp.row()); if(curIdx == nonzeroCol) found_diag = true; // Get the nonzeros indexes of the current column of R StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here if (st < 0 ) { m_lastError = "Empty row found during numerical factorization"; m_info = InvalidInput; return; } // Traverse the etree Index bi = nzcolR; for (; mark(st) != col; st = m_etree(st)) { Ridx(nzcolR) = st; // Add this row to the list, mark(st) = col; // and mark this row as visited nzcolR++; } // Reverse the list to get the topological ordering Index nt = nzcolR-bi; for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1)); // Copy the current (curIdx,pcol) value of the input matrix if(itp) tval(curIdx) = itp.value(); else tval(curIdx) = Scalar(0); // Compute the pattern of Q(:,k) if(curIdx > nonzeroCol && mark(curIdx) != col ) { Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q, mark(curIdx) = col; // and mark it as visited nzcolQ++; } } // Browse all the indexes of R(:,col) in reverse order for (Index i = nzcolR-1; i >= 0; i--) { Index curIdx = Ridx(i); // Apply the curIdx-th householder vector to the current column (temporarily stored into tval) Scalar tdot(0); // First compute q' * tval tdot = m_Q.col(curIdx).dot(tval); tdot *= m_hcoeffs(curIdx); // Then update tval = tval - q * tau // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse") for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) tval(itq.row()) -= itq.value() * tdot; // Detect fill-in for the current column of Q if(m_etree(Ridx(i)) == nonzeroCol) { for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) { StorageIndex iQ = StorageIndex(itq.row()); if (mark(iQ) != col) { Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, mark(iQ) = col; // and mark it as visited } } } } // End update current column Scalar tau = RealScalar(0); RealScalar beta = 0; if(nonzeroCol < diagSize) { // Compute the Householder reflection that eliminate the current column // FIXME this step should call the Householder module. Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); // First, the squared norm of Q((col+1):m, col) RealScalar sqrNorm = 0.; for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) { beta = numext::real(c0); tval(Qidx(0)) = 1; } else { using std::sqrt; beta = sqrt(numext::abs2(c0) + sqrNorm); if(numext::real(c0) >= RealScalar(0)) beta = -beta; tval(Qidx(0)) = 1; for (Index itq = 1; itq < nzcolQ; ++itq) tval(Qidx(itq)) /= (c0 - beta); tau = numext::conj((beta-c0) / beta); } } // Insert values in R for (Index i = nzcolR-1; i >= 0; i--) { Index curIdx = Ridx(i); if(curIdx < nonzeroCol) { m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx); tval(curIdx) = Scalar(0.); } } if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold) { m_R.insertBackByOuterInner(col, nonzeroCol) = beta; // The householder coefficient m_hcoeffs(nonzeroCol) = tau; // Record the householder reflections for (Index itq = 0; itq < nzcolQ; ++itq) { Index iQ = Qidx(itq); m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ); tval(iQ) = Scalar(0.); } nonzeroCol++; if(nonzeroCol<diagSize) m_Q.startVec(nonzeroCol); } else { // Zero pivot found: move implicitly this column to the end for (Index j = nonzeroCol; j < n-1; j++) std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]); // Recompute the column elimination tree internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data()); m_isEtreeOk = false; } } m_hcoeffs.tail(diagSize-nonzeroCol).setZero(); // Finalize the column pointers of the sparse matrices R and Q m_Q.finalize(); m_Q.makeCompressed(); m_R.finalize(); m_R.makeCompressed(); m_isQSorted = false; m_nonzeropivots = nonzeroCol; if(nonzeroCol<n) { // Permute the triangular factor to put the 'dead' columns to the end QRMatrixType tempR(m_R); m_R = tempR * m_pivotperm; // Update the column permutation m_outputPerm_c = m_outputPerm_c * m_pivotperm; } m_isInitialized = true; m_factorizationIsok = true; m_info = Success; } template <typename SparseQRType, typename Derived> struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived> > { typedef typename SparseQRType::QRMatrixType MatrixType; typedef typename SparseQRType::Scalar Scalar; // Get the references SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) : m_qr(qr),m_other(other),m_transpose(transpose) {} inline Index rows() const { return m_qr.matrixQ().rows(); } inline Index cols() const { return m_other.cols(); } // Assign to a vector template<typename DesType> void evalTo(DesType& res) const { Index m = m_qr.rows(); Index n = m_qr.cols(); Index diagSize = (std::min)(m,n); res = m_other; if (m_transpose) { eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); //Compute res = Q' * other column by column for(Index j = 0; j < res.cols(); j++){ for (Index k = 0; k < diagSize; k++) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); if(tau==Scalar(0)) continue; tau = tau * m_qr.m_hcoeffs(k); res.col(j) -= tau * m_qr.m_Q.col(k); } } } else { eigen_assert(m_qr.matrixQ().cols() == m_other.rows() && "Non conforming object sizes"); res.conservativeResize(rows(), cols()); // Compute res = Q * other column by column for(Index j = 0; j < res.cols(); j++) { for (Index k = diagSize-1; k >=0; k--) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); if(tau==Scalar(0)) continue; tau = tau * numext::conj(m_qr.m_hcoeffs(k)); res.col(j) -= tau * m_qr.m_Q.col(k); } } } } const SparseQRType& m_qr; const Derived& m_other; bool m_transpose; // TODO this actually means adjoint }; template<typename SparseQRType> struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> > { typedef typename SparseQRType::Scalar Scalar; typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix; enum { RowsAtCompileTime = Dynamic, ColsAtCompileTime = Dynamic }; explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {} template<typename Derived> SparseQR_QProduct<SparseQRType, Derived> operator*(const MatrixBase<Derived>& other) { return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false); } // To use for operations with the adjoint of Q SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const { return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr); } inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return m_qr.rows(); } // To use for operations with the transpose of Q FIXME this is the same as adjoint at the moment SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const { return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr); } const SparseQRType& m_qr; }; // TODO this actually represents the adjoint of Q template<typename SparseQRType> struct SparseQRMatrixQTransposeReturnType { explicit SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {} template<typename Derived> SparseQR_QProduct<SparseQRType,Derived> operator*(const MatrixBase<Derived>& other) { return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(), true); } const SparseQRType& m_qr; }; namespace internal { template<typename SparseQRType> struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> > { typedef typename SparseQRType::MatrixType MatrixType; typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind; typedef SparseShape Shape; }; template< typename DstXprType, typename SparseQRType> struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Sparse> { typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType; typedef typename DstXprType::Scalar Scalar; typedef typename DstXprType::StorageIndex StorageIndex; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/) { typename DstXprType::PlainObject idMat(src.rows(), src.cols()); idMat.setIdentity(); // Sort the sparse householder reflectors if needed const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q(); dst = SparseQR_QProduct<SparseQRType, DstXprType>(src.m_qr, idMat, false); } }; template< typename DstXprType, typename SparseQRType> struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Dense> { typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType; typedef typename DstXprType::Scalar Scalar; typedef typename DstXprType::StorageIndex StorageIndex; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/) { dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows()); } }; } // end namespace internal } // end namespace Eigen #endif
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/PardisoSupport/PardisoSupport.h
.h
20,060
545
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL PARDISO ******************************************************************************** */ #ifndef EIGEN_PARDISOSUPPORT_H #define EIGEN_PARDISOSUPPORT_H namespace Eigen { template<typename _MatrixType> class PardisoLU; template<typename _MatrixType, int Options=Upper> class PardisoLLT; template<typename _MatrixType, int Options=Upper> class PardisoLDLT; namespace internal { template<typename IndexType> struct pardiso_run_selector { static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a, IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x) { IndexType error = 0; ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); return error; } }; template<> struct pardiso_run_selector<long long int> { typedef long long int IndexType; static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a, IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x) { IndexType error = 0; ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error); return error; } }; template<class Pardiso> struct pardiso_traits; template<typename _MatrixType> struct pardiso_traits< PardisoLU<_MatrixType> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::StorageIndex StorageIndex; }; template<typename _MatrixType, int Options> struct pardiso_traits< PardisoLLT<_MatrixType, Options> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::StorageIndex StorageIndex; }; template<typename _MatrixType, int Options> struct pardiso_traits< PardisoLDLT<_MatrixType, Options> > { typedef _MatrixType MatrixType; typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef typename _MatrixType::StorageIndex StorageIndex; }; } // end namespace internal template<class Derived> class PardisoImpl : public SparseSolverBase<Derived> { protected: typedef SparseSolverBase<Derived> Base; using Base::derived; using Base::m_isInitialized; typedef internal::pardiso_traits<Derived> Traits; public: using Base::_solve_impl; typedef typename Traits::MatrixType MatrixType; typedef typename Traits::Scalar Scalar; typedef typename Traits::RealScalar RealScalar; typedef typename Traits::StorageIndex StorageIndex; typedef SparseMatrix<Scalar,RowMajor,StorageIndex> SparseMatrixType; typedef Matrix<Scalar,Dynamic,1> VectorType; typedef Matrix<StorageIndex, 1, MatrixType::ColsAtCompileTime> IntRowVectorType; typedef Matrix<StorageIndex, MatrixType::RowsAtCompileTime, 1> IntColVectorType; typedef Array<StorageIndex,64,1,DontAlign> ParameterType; enum { ScalarIsComplex = NumTraits<Scalar>::IsComplex, ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; PardisoImpl() { eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && "Non-supported index type"); m_iparm.setZero(); m_msglvl = 0; // No output m_isInitialized = false; } ~PardisoImpl() { pardisoRelease(); } inline Index cols() const { return m_size; } inline Index rows() const { return m_size; } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** \warning for advanced usage only. * \returns a reference to the parameter array controlling PARDISO. * See the PARDISO manual to know how to use it. */ ParameterType& pardisoParameterArray() { return m_iparm; } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ Derived& analyzePattern(const MatrixType& matrix); /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ Derived& factorize(const MatrixType& matrix); Derived& compute(const MatrixType& matrix); template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const; protected: void pardisoRelease() { if(m_isInitialized) // Factorization ran at least once { internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, -1, internal::convert_index<StorageIndex>(m_size),0, 0, 0, m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); m_isInitialized = false; } } void pardisoInit(int type) { m_type = type; EIGEN_USING_STD_MATH(abs); bool symmetric = abs(m_type) < 10; m_iparm[0] = 1; // No solver default m_iparm[1] = 2; // use Metis for the ordering m_iparm[2] = 0; // Reserved. Set to zero. (??Numbers of processors, value of OMP_NUM_THREADS??) m_iparm[3] = 0; // No iterative-direct algorithm m_iparm[4] = 0; // No user fill-in reducing permutation m_iparm[5] = 0; // Write solution into x, b is left unchanged m_iparm[6] = 0; // Not in use m_iparm[7] = 2; // Max numbers of iterative refinement steps m_iparm[8] = 0; // Not in use m_iparm[9] = 13; // Perturb the pivot elements with 1E-13 m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS m_iparm[11] = 0; // Not in use m_iparm[12] = symmetric ? 0 : 1; // Maximum weighted matching algorithm is switched-off (default for symmetric). // Try m_iparm[12] = 1 in case of inappropriate accuracy m_iparm[13] = 0; // Output: Number of perturbed pivots m_iparm[14] = 0; // Not in use m_iparm[15] = 0; // Not in use m_iparm[16] = 0; // Not in use m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU m_iparm[18] = -1; // Output: Mflops for LU factorization m_iparm[19] = 0; // Output: Numbers of CG Iterations m_iparm[20] = 0; // 1x1 pivoting m_iparm[26] = 0; // No matrix checker m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0; m_iparm[34] = 1; // C indexing m_iparm[36] = 0; // CSR m_iparm[59] = 0; // 0 - In-Core ; 1 - Automatic switch between In-Core and Out-of-Core modes ; 2 - Out-of-Core memset(m_pt, 0, sizeof(m_pt)); } protected: // cached data to reduce reallocation, etc. void manageErrorCode(Index error) const { switch(error) { case 0: m_info = Success; break; case -4: case -7: m_info = NumericalIssue; break; default: m_info = InvalidInput; } } mutable SparseMatrixType m_matrix; mutable ComputationInfo m_info; bool m_analysisIsOk, m_factorizationIsOk; StorageIndex m_type, m_msglvl; mutable void *m_pt[64]; mutable ParameterType m_iparm; mutable IntColVectorType m_perm; Index m_size; }; template<class Derived> Derived& PardisoImpl<Derived>::compute(const MatrixType& a) { m_size = a.rows(); eigen_assert(a.rows() == a.cols()); pardisoRelease(); m_perm.setZero(m_size); derived().getMatrix(a); Index error; error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 12, internal::convert_index<StorageIndex>(m_size), m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); manageErrorCode(error); m_analysisIsOk = true; m_factorizationIsOk = true; m_isInitialized = true; return derived(); } template<class Derived> Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a) { m_size = a.rows(); eigen_assert(m_size == a.cols()); pardisoRelease(); m_perm.setZero(m_size); derived().getMatrix(a); Index error; error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 11, internal::convert_index<StorageIndex>(m_size), m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); manageErrorCode(error); m_analysisIsOk = true; m_factorizationIsOk = false; m_isInitialized = true; return derived(); } template<class Derived> Derived& PardisoImpl<Derived>::factorize(const MatrixType& a) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(m_size == a.rows() && m_size == a.cols()); derived().getMatrix(a); Index error; error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 22, internal::convert_index<StorageIndex>(m_size), m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL); manageErrorCode(error); m_factorizationIsOk = true; return derived(); } template<class Derived> template<typename BDerived,typename XDerived> void PardisoImpl<Derived>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const { if(m_iparm[0] == 0) // Factorization was not computed { m_info = InvalidInput; return; } //Index n = m_matrix.rows(); Index nrhs = Index(b.cols()); eigen_assert(m_size==b.rows()); eigen_assert(((MatrixBase<BDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported"); eigen_assert(((MatrixBase<XDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported"); eigen_assert(((nrhs == 1) || b.outerStride() == b.rows())); // switch (transposed) { // case SvNoTrans : m_iparm[11] = 0 ; break; // case SvTranspose : m_iparm[11] = 2 ; break; // case SvAdjoint : m_iparm[11] = 1 ; break; // default: // //std::cerr << "Eigen: transposition option \"" << transposed << "\" not supported by the PARDISO backend\n"; // m_iparm[11] = 0; // } Scalar* rhs_ptr = const_cast<Scalar*>(b.derived().data()); Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp; // Pardiso cannot solve in-place if(rhs_ptr == x.derived().data()) { tmp = b; rhs_ptr = tmp.data(); } Index error; error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 33, internal::convert_index<StorageIndex>(m_size), m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(), m_perm.data(), internal::convert_index<StorageIndex>(nrhs), m_iparm.data(), m_msglvl, rhs_ptr, x.derived().data()); manageErrorCode(error); } /** \ingroup PardisoSupport_Module * \class PardisoLU * \brief A sparse direct LU factorization and solver based on the PARDISO library * * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible. * The vectors or matrices X and B can be either dense or sparse. * * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: * \code solver.pardisoParameterArray()[59] = 1; \endcode * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * * \implsparsesolverconcept * * \sa \ref TutorialSparseSolverConcept, class SparseLU */ template<typename MatrixType> class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> > { protected: typedef PardisoImpl<PardisoLU> Base; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; using Base::pardisoInit; using Base::m_matrix; friend class PardisoImpl< PardisoLU<MatrixType> >; public: using Base::compute; using Base::solve; PardisoLU() : Base() { pardisoInit(Base::ScalarIsComplex ? 13 : 11); } explicit PardisoLU(const MatrixType& matrix) : Base() { pardisoInit(Base::ScalarIsComplex ? 13 : 11); compute(matrix); } protected: void getMatrix(const MatrixType& matrix) { m_matrix = matrix; m_matrix.makeCompressed(); } }; /** \ingroup PardisoSupport_Module * \class PardisoLLT * \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library * * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite. * The vectors or matrices X and B can be either dense or sparse. * * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: * \code solver.pardisoParameterArray()[59] = 1; \endcode * * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used. * Upper|Lower can be used to tell both triangular parts can be used as input. * * \implsparsesolverconcept * * \sa \ref TutorialSparseSolverConcept, class SimplicialLLT */ template<typename MatrixType, int _UpLo> class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> > { protected: typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; using Base::pardisoInit; using Base::m_matrix; friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >; public: typedef typename Base::StorageIndex StorageIndex; enum { UpLo = _UpLo }; using Base::compute; PardisoLLT() : Base() { pardisoInit(Base::ScalarIsComplex ? 4 : 2); } explicit PardisoLLT(const MatrixType& matrix) : Base() { pardisoInit(Base::ScalarIsComplex ? 4 : 2); compute(matrix); } protected: void getMatrix(const MatrixType& matrix) { // PARDISO supports only upper, row-major matrices PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null; m_matrix.resize(matrix.rows(), matrix.cols()); m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null); m_matrix.makeCompressed(); } }; /** \ingroup PardisoSupport_Module * \class PardisoLDLT * \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library * * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite. * For complex matrices, A can also be symmetric only, see the \a Options template parameter. * The vectors or matrices X and B can be either dense or sparse. * * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set: * \code solver.pardisoParameterArray()[59] = 1; \endcode * * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used. * Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix. * Upper|Lower can be used to tell both triangular parts can be used as input. * * \implsparsesolverconcept * * \sa \ref TutorialSparseSolverConcept, class SimplicialLDLT */ template<typename MatrixType, int Options> class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> > { protected: typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; using Base::pardisoInit; using Base::m_matrix; friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >; public: typedef typename Base::StorageIndex StorageIndex; using Base::compute; enum { UpLo = Options&(Upper|Lower) }; PardisoLDLT() : Base() { pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2); } explicit PardisoLDLT(const MatrixType& matrix) : Base() { pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2); compute(matrix); } void getMatrix(const MatrixType& matrix) { // PARDISO supports only upper, row-major matrices PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null; m_matrix.resize(matrix.rows(), matrix.cols()); m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null); m_matrix.makeCompressed(); } }; } // end namespace Eigen #endif // EIGEN_PARDISOSUPPORT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
.h
11,405
314
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr> // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SUITESPARSEQRSUPPORT_H #define EIGEN_SUITESPARSEQRSUPPORT_H namespace Eigen { template<typename MatrixType> class SPQR; template<typename SPQRType> struct SPQRMatrixQReturnType; template<typename SPQRType> struct SPQRMatrixQTransposeReturnType; template <typename SPQRType, typename Derived> struct SPQR_QProduct; namespace internal { template <typename SPQRType> struct traits<SPQRMatrixQReturnType<SPQRType> > { typedef typename SPQRType::MatrixType ReturnType; }; template <typename SPQRType> struct traits<SPQRMatrixQTransposeReturnType<SPQRType> > { typedef typename SPQRType::MatrixType ReturnType; }; template <typename SPQRType, typename Derived> struct traits<SPQR_QProduct<SPQRType, Derived> > { typedef typename Derived::PlainObject ReturnType; }; } // End namespace internal /** * \ingroup SPQRSupport_Module * \class SPQR * \brief Sparse QR factorization based on SuiteSparseQR library * * This class is used to perform a multithreaded and multifrontal rank-revealing QR decomposition * of sparse matrices. The result is then used to solve linear leasts_square systems. * Clearly, a QR factorization is returned such that A*P = Q*R where : * * P is the column permutation. Use colsPermutation() to get it. * * Q is the orthogonal matrix represented as Householder reflectors. * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose. * You can then apply it to a vector. * * R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix. * NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index * * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> * * \implsparsesolverconcept * * */ template<typename _MatrixType> class SPQR : public SparseSolverBase<SPQR<_MatrixType> > { protected: typedef SparseSolverBase<SPQR<_MatrixType> > Base; using Base::m_isInitialized; public: typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; typedef SuiteSparse_long StorageIndex ; typedef SparseMatrix<Scalar, ColMajor, StorageIndex> MatrixType; typedef Map<PermutationMatrix<Dynamic, Dynamic, StorageIndex> > PermutationType; enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; public: SPQR() : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true) { cholmod_l_start(&m_cc); } explicit SPQR(const _MatrixType& matrix) : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true) { cholmod_l_start(&m_cc); compute(matrix); } ~SPQR() { SPQR_free(); cholmod_l_finish(&m_cc); } void SPQR_free() { cholmod_l_free_sparse(&m_H, &m_cc); cholmod_l_free_sparse(&m_cR, &m_cc); cholmod_l_free_dense(&m_HTau, &m_cc); std::free(m_E); std::free(m_HPinv); } void compute(const _MatrixType& matrix) { if(m_isInitialized) SPQR_free(); MatrixType mat(matrix); /* Compute the default threshold as in MatLab, see: * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 */ RealScalar pivotThreshold = m_tolerance; if(m_useDefaultThreshold) { RealScalar max2Norm = 0.0; for (int j = 0; j < mat.cols(); j++) max2Norm = numext::maxi(max2Norm, mat.col(j).norm()); if(max2Norm==RealScalar(0)) max2Norm = RealScalar(1); pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon(); } cholmod_sparse A; A = viewAsCholmod(mat); m_rows = matrix.rows(); Index col = matrix.cols(); m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A, &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc); if (!m_cR) { m_info = NumericalIssue; m_isInitialized = false; return; } m_info = Success; m_isInitialized = true; m_isRUpToDate = false; } /** * Get the number of rows of the input matrix and the Q matrix */ inline Index rows() const {return m_rows; } /** * Get the number of columns of the input matrix. */ inline Index cols() const { return m_cR->ncol; } template<typename Rhs, typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); eigen_assert(b.cols()==1 && "This method is for vectors only"); //Compute Q^T * b typename Dest::PlainObject y, y2; y = matrixQ().transpose() * b; // Solves with the triangular matrix R Index rk = this->rank(); y2 = y; y.resize((std::max)(cols(),Index(y.rows())),y.cols()); y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk)); // Apply the column permutation // colsPermutation() performs a copy of the permutation, // so let's apply it manually: for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i); for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero(); // y.bottomRows(y.rows()-rk).setZero(); // dest = colsPermutation() * y.topRows(cols()); m_info = Success; } /** \returns the sparse triangular factor R. It is a sparse matrix */ const MatrixType matrixR() const { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); if(!m_isRUpToDate) { m_R = viewAsEigen<Scalar,ColMajor, typename MatrixType::StorageIndex>(*m_cR); m_isRUpToDate = true; } return m_R; } /// Get an expression of the matrix Q SPQRMatrixQReturnType<SPQR> matrixQ() const { return SPQRMatrixQReturnType<SPQR>(*this); } /// Get the permutation that was applied to columns of A PermutationType colsPermutation() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return PermutationType(m_E, m_cR->ncol); } /** * Gets the rank of the matrix. * It should be equal to matrixQR().cols if the matrix is full-rank */ Index rank() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_cc.SPQR_istat[4]; } /// Set the fill-reducing ordering method to be used void setSPQROrdering(int ord) { m_ordering = ord;} /// Set the tolerance tol to treat columns with 2-norm < =tol as zero void setPivotThreshold(const RealScalar& tol) { m_useDefaultThreshold = false; m_tolerance = tol; } /** \returns a pointer to the SPQR workspace */ cholmod_common *cholmodCommon() const { return &m_cc; } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the sparse QR can not be computed */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } protected: bool m_analysisIsOk; bool m_factorizationIsOk; mutable bool m_isRUpToDate; mutable ComputationInfo m_info; int m_ordering; // Ordering method to use, see SPQR's manual int m_allow_tol; // Allow to use some tolerance during numerical factorization. RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format mutable MatrixType m_R; // The sparse matrix R in Eigen format mutable StorageIndex *m_E; // The permutation applied to columns mutable cholmod_sparse *m_H; //The householder vectors mutable StorageIndex *m_HPinv; // The row permutation of H mutable cholmod_dense *m_HTau; // The Householder coefficients mutable Index m_rank; // The rank of the matrix mutable cholmod_common m_cc; // Workspace and parameters bool m_useDefaultThreshold; // Use default threshold Index m_rows; template<typename ,typename > friend struct SPQR_QProduct; }; template <typename SPQRType, typename Derived> struct SPQR_QProduct : ReturnByValue<SPQR_QProduct<SPQRType,Derived> > { typedef typename SPQRType::Scalar Scalar; typedef typename SPQRType::StorageIndex StorageIndex; //Define the constructor to get reference to argument types SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {} inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); } inline Index cols() const { return m_other.cols(); } // Assign to a vector template<typename ResType> void evalTo(ResType& res) const { cholmod_dense y_cd; cholmod_dense *x_cd; int method = m_transpose ? SPQR_QTX : SPQR_QX; cholmod_common *cc = m_spqr.cholmodCommon(); y_cd = viewAsCholmod(m_other.const_cast_derived()); x_cd = SuiteSparseQR_qmult<Scalar>(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc); res = Matrix<Scalar,ResType::RowsAtCompileTime,ResType::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x), x_cd->nrow, x_cd->ncol); cholmod_l_free_dense(&x_cd, cc); } const SPQRType& m_spqr; const Derived& m_other; bool m_transpose; }; template<typename SPQRType> struct SPQRMatrixQReturnType{ SPQRMatrixQReturnType(const SPQRType& spqr) : m_spqr(spqr) {} template<typename Derived> SPQR_QProduct<SPQRType, Derived> operator*(const MatrixBase<Derived>& other) { return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(),false); } SPQRMatrixQTransposeReturnType<SPQRType> adjoint() const { return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr); } // To use for operations with the transpose of Q SPQRMatrixQTransposeReturnType<SPQRType> transpose() const { return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr); } const SPQRType& m_spqr; }; template<typename SPQRType> struct SPQRMatrixQTransposeReturnType{ SPQRMatrixQTransposeReturnType(const SPQRType& spqr) : m_spqr(spqr) {} template<typename Derived> SPQR_QProduct<SPQRType,Derived> operator*(const MatrixBase<Derived>& other) { return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(), true); } const SPQRType& m_spqr; }; }// End namespace Eigen #endif
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/plugins/MatrixCwiseUnaryOps.h
.h
2,937
86
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is included into the body of the base classes supporting matrix specific coefficient-wise functions. // This include MatrixBase and SparseMatrixBase. typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> CwiseAbsReturnType; typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> CwiseAbs2ReturnType; typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> CwiseSqrtReturnType; typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> CwiseSignReturnType; typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> CwiseInverseReturnType; /// \returns an expression of the coefficient-wise absolute value of \c *this /// /// Example: \include MatrixBase_cwiseAbs.cpp /// Output: \verbinclude MatrixBase_cwiseAbs.out /// EIGEN_DOC_UNARY_ADDONS(cwiseAbs,absolute value) /// /// \sa cwiseAbs2() /// EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseAbsReturnType cwiseAbs() const { return CwiseAbsReturnType(derived()); } /// \returns an expression of the coefficient-wise squared absolute value of \c *this /// /// Example: \include MatrixBase_cwiseAbs2.cpp /// Output: \verbinclude MatrixBase_cwiseAbs2.out /// EIGEN_DOC_UNARY_ADDONS(cwiseAbs2,squared absolute value) /// /// \sa cwiseAbs() /// EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseAbs2ReturnType cwiseAbs2() const { return CwiseAbs2ReturnType(derived()); } /// \returns an expression of the coefficient-wise square root of *this. /// /// Example: \include MatrixBase_cwiseSqrt.cpp /// Output: \verbinclude MatrixBase_cwiseSqrt.out /// EIGEN_DOC_UNARY_ADDONS(cwiseSqrt,square-root) /// /// \sa cwisePow(), cwiseSquare() /// EIGEN_DEVICE_FUNC inline const CwiseSqrtReturnType cwiseSqrt() const { return CwiseSqrtReturnType(derived()); } /// \returns an expression of the coefficient-wise signum of *this. /// /// Example: \include MatrixBase_cwiseSign.cpp /// Output: \verbinclude MatrixBase_cwiseSign.out /// EIGEN_DOC_UNARY_ADDONS(cwiseSign,sign function) /// EIGEN_DEVICE_FUNC inline const CwiseSignReturnType cwiseSign() const { return CwiseSignReturnType(derived()); } /// \returns an expression of the coefficient-wise inverse of *this. /// /// Example: \include MatrixBase_cwiseInverse.cpp /// Output: \verbinclude MatrixBase_cwiseInverse.out /// EIGEN_DOC_UNARY_ADDONS(cwiseInverse,inverse) /// /// \sa cwiseProduct() /// EIGEN_DEVICE_FUNC inline const CwiseInverseReturnType cwiseInverse() const { return CwiseInverseReturnType(derived()); }
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/plugins/CommonCwiseBinaryOps.h
.h
4,828
116
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is a base class plugin containing common coefficient wise functions. /** \returns an expression of the difference of \c *this and \a other * * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-(). * * \sa class CwiseBinaryOp, operator-=() */ EIGEN_MAKE_CWISE_BINARY_OP(operator-,difference) /** \returns an expression of the sum of \c *this and \a other * * \note If you want to add a given scalar to all coefficients, see Cwise::operator+(). * * \sa class CwiseBinaryOp, operator+=() */ EIGEN_MAKE_CWISE_BINARY_OP(operator+,sum) /** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other * * The template parameter \a CustomBinaryOp is the type of the functor * of the custom operator (see class CwiseBinaryOp for an example) * * Here is an example illustrating the use of custom functors: * \include class_CwiseBinaryOp.cpp * Output: \verbinclude class_CwiseBinaryOp.out * * \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct() */ template<typename CustomBinaryOp, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived> binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const { return CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other.derived(), func); } #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_MAKE_SCALAR_BINARY_OP(operator*,product) #else /** \returns an expression of \c *this scaled by the scalar factor \a scalar * * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. */ template<typename T> const CwiseBinaryOp<internal::scalar_product_op<Scalar,T>,Derived,Constant<T> > operator*(const T& scalar) const; /** \returns an expression of \a expr scaled by the scalar factor \a scalar * * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. */ template<typename T> friend const CwiseBinaryOp<internal::scalar_product_op<T,Scalar>,Constant<T>,Derived> operator*(const T& scalar, const StorageBaseType& expr); #endif #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(operator/,quotient) #else /** \returns an expression of \c *this divided by the scalar value \a scalar * * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. */ template<typename T> const CwiseBinaryOp<internal::scalar_quotient_op<Scalar,T>,Derived,Constant<T> > operator/(const T& scalar) const; #endif /** \returns an expression of the coefficient-wise boolean \b and operator of \c *this and \a other * * \warning this operator is for expression of bool only. * * Example: \include Cwise_boolean_and.cpp * Output: \verbinclude Cwise_boolean_and.out * * \sa operator||(), select() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline const CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived> operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value), THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); return CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>(derived(),other.derived()); } /** \returns an expression of the coefficient-wise boolean \b or operator of \c *this and \a other * * \warning this operator is for expression of bool only. * * Example: \include Cwise_boolean_or.cpp * Output: \verbinclude Cwise_boolean_or.out * * \sa operator&&(), select() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline const CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived> operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value), THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived()); }
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/plugins/ArrayCwiseBinaryOps.h
.h
13,166
333
/** \returns an expression of the coefficient wise product of \c *this and \a other * * \sa MatrixBase::cwiseProduct */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product) operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived()); } /** \returns an expression of the coefficient wise quotient of \c *this and \a other * * \sa MatrixBase::cwiseQuotient */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar,typename OtherDerived::Scalar>, const Derived, const OtherDerived> operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return CwiseBinaryOp<internal::scalar_quotient_op<Scalar,typename OtherDerived::Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of \c *this and \a other * * Example: \include Cwise_min.cpp * Output: \verbinclude Cwise_min.out * * \sa max() */ EIGEN_MAKE_CWISE_BINARY_OP(min,min) /** \returns an expression of the coefficient-wise min of \c *this and scalar \a other * * \sa max() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > #ifdef EIGEN_PARSED_BY_DOXYGEN min #else (min) #endif (const Scalar &other) const { return (min)(Derived::PlainObject::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise max of \c *this and \a other * * Example: \include Cwise_max.cpp * Output: \verbinclude Cwise_max.out * * \sa min() */ EIGEN_MAKE_CWISE_BINARY_OP(max,max) /** \returns an expression of the coefficient-wise max of \c *this and scalar \a other * * \sa min() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > #ifdef EIGEN_PARSED_BY_DOXYGEN max #else (max) #endif (const Scalar &other) const { return (max)(Derived::PlainObject::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise power of \c *this to the given array of \a exponents. * * This function computes the coefficient-wise power. * * Example: \include Cwise_array_power_array.cpp * Output: \verbinclude Cwise_array_power_array.out */ EIGEN_MAKE_CWISE_BINARY_OP(pow,pow) #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(pow,pow) #else /** \returns an expression of the coefficients of \c *this rasied to the constant power \a exponent * * \tparam T is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression. * * This function computes the coefficient-wise power. The function MatrixBase::pow() in the * unsupported module MatrixFunctions computes the matrix power. * * Example: \include Cwise_pow.cpp * Output: \verbinclude Cwise_pow.out * * \sa ArrayBase::pow(ArrayBase), square(), cube(), exp(), log() */ template<typename T> const CwiseBinaryOp<internal::scalar_pow_op<Scalar,T>,Derived,Constant<T> > pow(const T& exponent) const; #endif // TODO code generating macros could be moved to Macros.h and could include generation of documentation #define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \ template<typename OtherDerived> \ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, typename OtherDerived::Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \ OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \ { \ return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, typename OtherDerived::Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \ }\ typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \ typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \ OP(const Scalar& s) const { \ return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \ } \ EIGEN_DEVICE_FUNC friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \ OP(const Scalar& s, const EIGEN_CURRENT_STORAGE_BASE_CLASS<Derived>& d) { \ return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \ } #define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \ template<typename OtherDerived> \ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<typename OtherDerived::Scalar, Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \ OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \ { \ return CwiseBinaryOp<internal::scalar_cmp_op<typename OtherDerived::Scalar, Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \ } \ EIGEN_DEVICE_FUNC \ inline const RCmp ## RCOMPARATOR ## ReturnType \ OP(const Scalar& s) const { \ return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \ } \ friend inline const Cmp ## RCOMPARATOR ## ReturnType \ OP(const Scalar& s, const Derived& d) { \ return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \ } /** \returns an expression of the coefficient-wise \< operator of *this and \a other * * Example: \include Cwise_less.cpp * Output: \verbinclude Cwise_less.out * * \sa all(), any(), operator>(), operator<=() */ EIGEN_MAKE_CWISE_COMP_OP(operator<, LT) /** \returns an expression of the coefficient-wise \<= operator of *this and \a other * * Example: \include Cwise_less_equal.cpp * Output: \verbinclude Cwise_less_equal.out * * \sa all(), any(), operator>=(), operator<() */ EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE) /** \returns an expression of the coefficient-wise \> operator of *this and \a other * * Example: \include Cwise_greater.cpp * Output: \verbinclude Cwise_greater.out * * \sa all(), any(), operator>=(), operator<() */ EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT) /** \returns an expression of the coefficient-wise \>= operator of *this and \a other * * Example: \include Cwise_greater_equal.cpp * Output: \verbinclude Cwise_greater_equal.out * * \sa all(), any(), operator>(), operator<=() */ EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE) /** \returns an expression of the coefficient-wise == operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include Cwise_equal_equal.cpp * Output: \verbinclude Cwise_equal_equal.out * * \sa all(), any(), isApprox(), isMuchSmallerThan() */ EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ) /** \returns an expression of the coefficient-wise != operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include Cwise_not_equal.cpp * Output: \verbinclude Cwise_not_equal.out * * \sa all(), any(), isApprox(), isMuchSmallerThan() */ EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ) #undef EIGEN_MAKE_CWISE_COMP_OP #undef EIGEN_MAKE_CWISE_COMP_R_OP // scalar addition #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_MAKE_SCALAR_BINARY_OP(operator+,sum) #else /** \returns an expression of \c *this with each coeff incremented by the constant \a scalar * * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. * * Example: \include Cwise_plus.cpp * Output: \verbinclude Cwise_plus.out * * \sa operator+=(), operator-() */ template<typename T> const CwiseBinaryOp<internal::scalar_sum_op<Scalar,T>,Derived,Constant<T> > operator+(const T& scalar) const; /** \returns an expression of \a expr with each coeff incremented by the constant \a scalar * * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. */ template<typename T> friend const CwiseBinaryOp<internal::scalar_sum_op<T,Scalar>,Constant<T>,Derived> operator+(const T& scalar, const StorageBaseType& expr); #endif #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_MAKE_SCALAR_BINARY_OP(operator-,difference) #else /** \returns an expression of \c *this with each coeff decremented by the constant \a scalar * * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. * * Example: \include Cwise_minus.cpp * Output: \verbinclude Cwise_minus.out * * \sa operator+=(), operator-() */ template<typename T> const CwiseBinaryOp<internal::scalar_difference_op<Scalar,T>,Derived,Constant<T> > operator-(const T& scalar) const; /** \returns an expression of the constant matrix of value \a scalar decremented by the coefficients of \a expr * * \tparam T is the scalar type of \a scalar. It must be compatible with the scalar type of the given expression. */ template<typename T> friend const CwiseBinaryOp<internal::scalar_difference_op<T,Scalar>,Constant<T>,Derived> operator-(const T& scalar, const StorageBaseType& expr); #endif #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(operator/,quotient) #else /** * \brief Component-wise division of the scalar \a s by array elements of \a a. * * \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression (\c Derived::Scalar). */ template<typename T> friend inline const CwiseBinaryOp<internal::scalar_quotient_op<T,Scalar>,Constant<T>,Derived> operator/(const T& s,const StorageBaseType& a); #endif /** \returns an expression of the coefficient-wise ^ operator of *this and \a other * * \warning this operator is for expression of bool only. * * Example: \include Cwise_boolean_xor.cpp * Output: \verbinclude Cwise_boolean_xor.out * * \sa operator&&(), select() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline const CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived> operator^(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value), THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); return CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>(derived(),other.derived()); } // NOTE disabled until we agree on argument order #if 0 /** \cpp11 \returns an expression of the coefficient-wise polygamma function. * * \specialfunctions_module * * It returns the \a n -th derivative of the digamma(psi) evaluated at \c *this. * * \warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x) * * \sa Eigen::polygamma() */ template<typename DerivedN> inline const CwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const DerivedN, const Derived> polygamma(const EIGEN_CURRENT_STORAGE_BASE_CLASS<DerivedN> &n) const { return CwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const DerivedN, const Derived>(n.derived(), this->derived()); } #endif /** \returns an expression of the coefficient-wise zeta function. * * \specialfunctions_module * * It returns the Riemann zeta function of two arguments \c *this and \a q: * * \param *this is the exposent, it must be > 1 * \param q is the shift, it must be > 0 * * \note This function supports only float and double scalar types. To support other scalar types, the user has * to provide implementations of zeta(T,T) for any scalar type T to be supported. * * This method is an alias for zeta(*this,q); * * \sa Eigen::zeta() */ template<typename DerivedQ> inline const CwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const DerivedQ> zeta(const EIGEN_CURRENT_STORAGE_BASE_CLASS<DerivedQ> &q) const { return CwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const DerivedQ>(this->derived(), q.derived()); }
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/plugins/BlockMethods.h
.h
37,403
1,059
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARSED_BY_DOXYGEN /// \internal expression type of a column */ typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ColXpr; typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr; /// \internal expression type of a row */ typedef Block<Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowXpr; typedef const Block<const Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowXpr; /// \internal expression type of a block of whole columns */ typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr; typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr; /// \internal expression type of a block of whole rows */ typedef Block<Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowsBlockXpr; typedef const Block<const Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr; /// \internal expression type of a block of whole columns */ template<int N> struct NColsBlockXpr { typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; }; template<int N> struct ConstNColsBlockXpr { typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; }; /// \internal expression type of a block of whole rows */ template<int N> struct NRowsBlockXpr { typedef Block<Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; }; template<int N> struct ConstNRowsBlockXpr { typedef const Block<const Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; }; /// \internal expression of a block */ typedef Block<Derived> BlockXpr; typedef const Block<const Derived> ConstBlockXpr; /// \internal expression of a block of fixed sizes */ template<int Rows, int Cols> struct FixedBlockXpr { typedef Block<Derived,Rows,Cols> Type; }; template<int Rows, int Cols> struct ConstFixedBlockXpr { typedef Block<const Derived,Rows,Cols> Type; }; typedef VectorBlock<Derived> SegmentReturnType; typedef const VectorBlock<const Derived> ConstSegmentReturnType; template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; }; template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; }; #endif // not EIGEN_PARSED_BY_DOXYGEN /// \returns a dynamic-size expression of a block in *this. /// /// \param startRow the first row in the block /// \param startCol the first column in the block /// \param blockRows the number of rows in the block /// \param blockCols the number of columns in the block /// /// Example: \include MatrixBase_block_int_int_int_int.cpp /// Output: \verbinclude MatrixBase_block_int_int_int_int.out /// /// \note Even though the returned expression has dynamic size, in the case /// when it is applied to a fixed-size matrix, it inherits a fixed maximal size, /// which means that evaluating it does not cause a dynamic memory allocation. /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index) /// EIGEN_DEVICE_FUNC inline BlockXpr block(Index startRow, Index startCol, Index blockRows, Index blockCols) { return BlockXpr(derived(), startRow, startCol, blockRows, blockCols); } /// This is the const version of block(Index,Index,Index,Index). */ EIGEN_DEVICE_FUNC inline const ConstBlockXpr block(Index startRow, Index startCol, Index blockRows, Index blockCols) const { return ConstBlockXpr(derived(), startRow, startCol, blockRows, blockCols); } /// \returns a dynamic-size expression of a top-right corner of *this. /// /// \param cRows the number of rows in the corner /// \param cCols the number of columns in the corner /// /// Example: \include MatrixBase_topRightCorner_int_int.cpp /// Output: \verbinclude MatrixBase_topRightCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline BlockXpr topRightCorner(Index cRows, Index cCols) { return BlockXpr(derived(), 0, cols() - cCols, cRows, cCols); } /// This is the const version of topRightCorner(Index, Index). EIGEN_DEVICE_FUNC inline const ConstBlockXpr topRightCorner(Index cRows, Index cCols) const { return ConstBlockXpr(derived(), 0, cols() - cCols, cRows, cCols); } /// \returns an expression of a fixed-size top-right corner of *this. /// /// \tparam CRows the number of rows in the corner /// \tparam CCols the number of columns in the corner /// /// Example: \include MatrixBase_template_int_int_topRightCorner.cpp /// Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block<int,int>(Index,Index) /// template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner() { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols); } /// This is the const version of topRightCorner<int, int>(). template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner() const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols); } /// \returns an expression of a top-right corner of *this. /// /// \tparam CRows number of rows in corner as specified at compile-time /// \tparam CCols number of columns in corner as specified at compile-time /// \param cRows number of rows in corner as specified at run-time /// \param cCols number of columns in corner as specified at run-time /// /// This function is mainly useful for corners where the number of rows is specified at compile-time /// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time /// information should not contradict. In other words, \a cRows should equal \a CRows unless /// \a CRows is \a Dynamic, and the same for the number of columns. /// /// Example: \include MatrixBase_template_int_int_topRightCorner_int_int.cpp /// Output: \verbinclude MatrixBase_template_int_int_topRightCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block /// template<int CRows, int CCols> inline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols); } /// This is the const version of topRightCorner<int, int>(Index, Index). template<int CRows, int CCols> inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols); } /// \returns a dynamic-size expression of a top-left corner of *this. /// /// \param cRows the number of rows in the corner /// \param cCols the number of columns in the corner /// /// Example: \include MatrixBase_topLeftCorner_int_int.cpp /// Output: \verbinclude MatrixBase_topLeftCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline BlockXpr topLeftCorner(Index cRows, Index cCols) { return BlockXpr(derived(), 0, 0, cRows, cCols); } /// This is the const version of topLeftCorner(Index, Index). EIGEN_DEVICE_FUNC inline const ConstBlockXpr topLeftCorner(Index cRows, Index cCols) const { return ConstBlockXpr(derived(), 0, 0, cRows, cCols); } /// \returns an expression of a fixed-size top-left corner of *this. /// /// The template parameters CRows and CCols are the number of rows and columns in the corner. /// /// Example: \include MatrixBase_template_int_int_topLeftCorner.cpp /// Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner() { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0); } /// This is the const version of topLeftCorner<int, int>(). template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner() const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0); } /// \returns an expression of a top-left corner of *this. /// /// \tparam CRows number of rows in corner as specified at compile-time /// \tparam CCols number of columns in corner as specified at compile-time /// \param cRows number of rows in corner as specified at run-time /// \param cCols number of columns in corner as specified at run-time /// /// This function is mainly useful for corners where the number of rows is specified at compile-time /// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time /// information should not contradict. In other words, \a cRows should equal \a CRows unless /// \a CRows is \a Dynamic, and the same for the number of columns. /// /// Example: \include MatrixBase_template_int_int_topLeftCorner_int_int.cpp /// Output: \verbinclude MatrixBase_template_int_int_topLeftCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block /// template<int CRows, int CCols> inline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols); } /// This is the const version of topLeftCorner<int, int>(Index, Index). template<int CRows, int CCols> inline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols); } /// \returns a dynamic-size expression of a bottom-right corner of *this. /// /// \param cRows the number of rows in the corner /// \param cCols the number of columns in the corner /// /// Example: \include MatrixBase_bottomRightCorner_int_int.cpp /// Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline BlockXpr bottomRightCorner(Index cRows, Index cCols) { return BlockXpr(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /// This is the const version of bottomRightCorner(Index, Index). EIGEN_DEVICE_FUNC inline const ConstBlockXpr bottomRightCorner(Index cRows, Index cCols) const { return ConstBlockXpr(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /// \returns an expression of a fixed-size bottom-right corner of *this. /// /// The template parameters CRows and CCols are the number of rows and columns in the corner. /// /// Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp /// Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner() { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols); } /// This is the const version of bottomRightCorner<int, int>(). template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner() const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols); } /// \returns an expression of a bottom-right corner of *this. /// /// \tparam CRows number of rows in corner as specified at compile-time /// \tparam CCols number of columns in corner as specified at compile-time /// \param cRows number of rows in corner as specified at run-time /// \param cCols number of columns in corner as specified at run-time /// /// This function is mainly useful for corners where the number of rows is specified at compile-time /// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time /// information should not contradict. In other words, \a cRows should equal \a CRows unless /// \a CRows is \a Dynamic, and the same for the number of columns. /// /// Example: \include MatrixBase_template_int_int_bottomRightCorner_int_int.cpp /// Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block /// template<int CRows, int CCols> inline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /// This is the const version of bottomRightCorner<int, int>(Index, Index). template<int CRows, int CCols> inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols); } /// \returns a dynamic-size expression of a bottom-left corner of *this. /// /// \param cRows the number of rows in the corner /// \param cCols the number of columns in the corner /// /// Example: \include MatrixBase_bottomLeftCorner_int_int.cpp /// Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline BlockXpr bottomLeftCorner(Index cRows, Index cCols) { return BlockXpr(derived(), rows() - cRows, 0, cRows, cCols); } /// This is the const version of bottomLeftCorner(Index, Index). EIGEN_DEVICE_FUNC inline const ConstBlockXpr bottomLeftCorner(Index cRows, Index cCols) const { return ConstBlockXpr(derived(), rows() - cRows, 0, cRows, cCols); } /// \returns an expression of a fixed-size bottom-left corner of *this. /// /// The template parameters CRows and CCols are the number of rows and columns in the corner. /// /// Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp /// Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0); } /// This is the const version of bottomLeftCorner<int, int>(). template<int CRows, int CCols> EIGEN_DEVICE_FUNC inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0); } /// \returns an expression of a bottom-left corner of *this. /// /// \tparam CRows number of rows in corner as specified at compile-time /// \tparam CCols number of columns in corner as specified at compile-time /// \param cRows number of rows in corner as specified at run-time /// \param cCols number of columns in corner as specified at run-time /// /// This function is mainly useful for corners where the number of rows is specified at compile-time /// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time /// information should not contradict. In other words, \a cRows should equal \a CRows unless /// \a CRows is \a Dynamic, and the same for the number of columns. /// /// Example: \include MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp /// Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner_int_int.out /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block /// template<int CRows, int CCols> inline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) { return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols); } /// This is the const version of bottomLeftCorner<int, int>(Index, Index). template<int CRows, int CCols> inline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) const { return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols); } /// \returns a block consisting of the top rows of *this. /// /// \param n the number of rows in the block /// /// Example: \include MatrixBase_topRows_int.cpp /// Output: \verbinclude MatrixBase_topRows_int.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline RowsBlockXpr topRows(Index n) { return RowsBlockXpr(derived(), 0, 0, n, cols()); } /// This is the const version of topRows(Index). EIGEN_DEVICE_FUNC inline ConstRowsBlockXpr topRows(Index n) const { return ConstRowsBlockXpr(derived(), 0, 0, n, cols()); } /// \returns a block consisting of the top rows of *this. /// /// \tparam N the number of rows in the block as specified at compile-time /// \param n the number of rows in the block as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include MatrixBase_template_int_topRows.cpp /// Output: \verbinclude MatrixBase_template_int_topRows.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int N> EIGEN_DEVICE_FUNC inline typename NRowsBlockXpr<N>::Type topRows(Index n = N) { return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols()); } /// This is the const version of topRows<int>(). template<int N> EIGEN_DEVICE_FUNC inline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const { return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols()); } /// \returns a block consisting of the bottom rows of *this. /// /// \param n the number of rows in the block /// /// Example: \include MatrixBase_bottomRows_int.cpp /// Output: \verbinclude MatrixBase_bottomRows_int.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline RowsBlockXpr bottomRows(Index n) { return RowsBlockXpr(derived(), rows() - n, 0, n, cols()); } /// This is the const version of bottomRows(Index). EIGEN_DEVICE_FUNC inline ConstRowsBlockXpr bottomRows(Index n) const { return ConstRowsBlockXpr(derived(), rows() - n, 0, n, cols()); } /// \returns a block consisting of the bottom rows of *this. /// /// \tparam N the number of rows in the block as specified at compile-time /// \param n the number of rows in the block as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include MatrixBase_template_int_bottomRows.cpp /// Output: \verbinclude MatrixBase_template_int_bottomRows.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int N> EIGEN_DEVICE_FUNC inline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N) { return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols()); } /// This is the const version of bottomRows<int>(). template<int N> EIGEN_DEVICE_FUNC inline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const { return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols()); } /// \returns a block consisting of a range of rows of *this. /// /// \param startRow the index of the first row in the block /// \param n the number of rows in the block /// /// Example: \include DenseBase_middleRows_int.cpp /// Output: \verbinclude DenseBase_middleRows_int.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline RowsBlockXpr middleRows(Index startRow, Index n) { return RowsBlockXpr(derived(), startRow, 0, n, cols()); } /// This is the const version of middleRows(Index,Index). EIGEN_DEVICE_FUNC inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const { return ConstRowsBlockXpr(derived(), startRow, 0, n, cols()); } /// \returns a block consisting of a range of rows of *this. /// /// \tparam N the number of rows in the block as specified at compile-time /// \param startRow the index of the first row in the block /// \param n the number of rows in the block as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include DenseBase_template_int_middleRows.cpp /// Output: \verbinclude DenseBase_template_int_middleRows.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int N> EIGEN_DEVICE_FUNC inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) { return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols()); } /// This is the const version of middleRows<int>(). template<int N> EIGEN_DEVICE_FUNC inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const { return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols()); } /// \returns a block consisting of the left columns of *this. /// /// \param n the number of columns in the block /// /// Example: \include MatrixBase_leftCols_int.cpp /// Output: \verbinclude MatrixBase_leftCols_int.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline ColsBlockXpr leftCols(Index n) { return ColsBlockXpr(derived(), 0, 0, rows(), n); } /// This is the const version of leftCols(Index). EIGEN_DEVICE_FUNC inline ConstColsBlockXpr leftCols(Index n) const { return ConstColsBlockXpr(derived(), 0, 0, rows(), n); } /// \returns a block consisting of the left columns of *this. /// /// \tparam N the number of columns in the block as specified at compile-time /// \param n the number of columns in the block as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include MatrixBase_template_int_leftCols.cpp /// Output: \verbinclude MatrixBase_template_int_leftCols.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int N> EIGEN_DEVICE_FUNC inline typename NColsBlockXpr<N>::Type leftCols(Index n = N) { return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n); } /// This is the const version of leftCols<int>(). template<int N> EIGEN_DEVICE_FUNC inline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const { return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n); } /// \returns a block consisting of the right columns of *this. /// /// \param n the number of columns in the block /// /// Example: \include MatrixBase_rightCols_int.cpp /// Output: \verbinclude MatrixBase_rightCols_int.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline ColsBlockXpr rightCols(Index n) { return ColsBlockXpr(derived(), 0, cols() - n, rows(), n); } /// This is the const version of rightCols(Index). EIGEN_DEVICE_FUNC inline ConstColsBlockXpr rightCols(Index n) const { return ConstColsBlockXpr(derived(), 0, cols() - n, rows(), n); } /// \returns a block consisting of the right columns of *this. /// /// \tparam N the number of columns in the block as specified at compile-time /// \param n the number of columns in the block as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include MatrixBase_template_int_rightCols.cpp /// Output: \verbinclude MatrixBase_template_int_rightCols.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int N> EIGEN_DEVICE_FUNC inline typename NColsBlockXpr<N>::Type rightCols(Index n = N) { return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n); } /// This is the const version of rightCols<int>(). template<int N> EIGEN_DEVICE_FUNC inline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const { return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n); } /// \returns a block consisting of a range of columns of *this. /// /// \param startCol the index of the first column in the block /// \param numCols the number of columns in the block /// /// Example: \include DenseBase_middleCols_int.cpp /// Output: \verbinclude DenseBase_middleCols_int.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// EIGEN_DEVICE_FUNC inline ColsBlockXpr middleCols(Index startCol, Index numCols) { return ColsBlockXpr(derived(), 0, startCol, rows(), numCols); } /// This is the const version of middleCols(Index,Index). EIGEN_DEVICE_FUNC inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const { return ConstColsBlockXpr(derived(), 0, startCol, rows(), numCols); } /// \returns a block consisting of a range of columns of *this. /// /// \tparam N the number of columns in the block as specified at compile-time /// \param startCol the index of the first column in the block /// \param n the number of columns in the block as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include DenseBase_template_int_middleCols.cpp /// Output: \verbinclude DenseBase_template_int_middleCols.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int N> EIGEN_DEVICE_FUNC inline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) { return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n); } /// This is the const version of middleCols<int>(). template<int N> EIGEN_DEVICE_FUNC inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const { return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n); } /// \returns a fixed-size expression of a block in *this. /// /// The template parameters \a NRows and \a NCols are the number of /// rows and columns in the block. /// /// \param startRow the first row in the block /// \param startCol the first column in the block /// /// Example: \include MatrixBase_block_int_int.cpp /// Output: \verbinclude MatrixBase_block_int_int.out /// /// \note since block is a templated member, the keyword template has to be used /// if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int NRows, int NCols> EIGEN_DEVICE_FUNC inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) { return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol); } /// This is the const version of block<>(Index, Index). */ template<int NRows, int NCols> EIGEN_DEVICE_FUNC inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) const { return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol); } /// \returns an expression of a block in *this. /// /// \tparam NRows number of rows in block as specified at compile-time /// \tparam NCols number of columns in block as specified at compile-time /// \param startRow the first row in the block /// \param startCol the first column in the block /// \param blockRows number of rows in block as specified at run-time /// \param blockCols number of columns in block as specified at run-time /// /// This function is mainly useful for blocks where the number of rows is specified at compile-time /// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time /// information should not contradict. In other words, \a blockRows should equal \a NRows unless /// \a NRows is \a Dynamic, and the same for the number of columns. /// /// Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp /// Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp /// EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /// /// \sa class Block, block(Index,Index,Index,Index) /// template<int NRows, int NCols> inline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol, Index blockRows, Index blockCols) { return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols); } /// This is the const version of block<>(Index, Index, Index, Index). template<int NRows, int NCols> inline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol, Index blockRows, Index blockCols) const { return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols); } /// \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0. /// /// Example: \include MatrixBase_col.cpp /// Output: \verbinclude MatrixBase_col.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major) /** * \sa row(), class Block */ EIGEN_DEVICE_FUNC inline ColXpr col(Index i) { return ColXpr(derived(), i); } /// This is the const version of col(). EIGEN_DEVICE_FUNC inline ConstColXpr col(Index i) const { return ConstColXpr(derived(), i); } /// \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0. /// /// Example: \include MatrixBase_row.cpp /// Output: \verbinclude MatrixBase_row.out /// EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major) /** * \sa col(), class Block */ EIGEN_DEVICE_FUNC inline RowXpr row(Index i) { return RowXpr(derived(), i); } /// This is the const version of row(). */ EIGEN_DEVICE_FUNC inline ConstRowXpr row(Index i) const { return ConstRowXpr(derived(), i); } /// \returns a dynamic-size expression of a segment (i.e. a vector block) in *this. /// /// \only_for_vectors /// /// \param start the first coefficient in the segment /// \param n the number of coefficients in the segment /// /// Example: \include MatrixBase_segment_int_int.cpp /// Output: \verbinclude MatrixBase_segment_int_int.out /// /// \note Even though the returned expression has dynamic size, in the case /// when it is applied to a fixed-size vector, it inherits a fixed maximal size, /// which means that evaluating it does not cause a dynamic memory allocation. /// /// \sa class Block, segment(Index) /// EIGEN_DEVICE_FUNC inline SegmentReturnType segment(Index start, Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return SegmentReturnType(derived(), start, n); } /// This is the const version of segment(Index,Index). EIGEN_DEVICE_FUNC inline ConstSegmentReturnType segment(Index start, Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return ConstSegmentReturnType(derived(), start, n); } /// \returns a dynamic-size expression of the first coefficients of *this. /// /// \only_for_vectors /// /// \param n the number of coefficients in the segment /// /// Example: \include MatrixBase_start_int.cpp /// Output: \verbinclude MatrixBase_start_int.out /// /// \note Even though the returned expression has dynamic size, in the case /// when it is applied to a fixed-size vector, it inherits a fixed maximal size, /// which means that evaluating it does not cause a dynamic memory allocation. /// /// \sa class Block, block(Index,Index) /// EIGEN_DEVICE_FUNC inline SegmentReturnType head(Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return SegmentReturnType(derived(), 0, n); } /// This is the const version of head(Index). EIGEN_DEVICE_FUNC inline ConstSegmentReturnType head(Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return ConstSegmentReturnType(derived(), 0, n); } /// \returns a dynamic-size expression of the last coefficients of *this. /// /// \only_for_vectors /// /// \param n the number of coefficients in the segment /// /// Example: \include MatrixBase_end_int.cpp /// Output: \verbinclude MatrixBase_end_int.out /// /// \note Even though the returned expression has dynamic size, in the case /// when it is applied to a fixed-size vector, it inherits a fixed maximal size, /// which means that evaluating it does not cause a dynamic memory allocation. /// /// \sa class Block, block(Index,Index) /// EIGEN_DEVICE_FUNC inline SegmentReturnType tail(Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return SegmentReturnType(derived(), this->size() - n, n); } /// This is the const version of tail(Index). EIGEN_DEVICE_FUNC inline ConstSegmentReturnType tail(Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return ConstSegmentReturnType(derived(), this->size() - n, n); } /// \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this /// /// \only_for_vectors /// /// \tparam N the number of coefficients in the segment as specified at compile-time /// \param start the index of the first element in the segment /// \param n the number of coefficients in the segment as specified at compile-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include MatrixBase_template_int_segment.cpp /// Output: \verbinclude MatrixBase_template_int_segment.out /// /// \sa class Block /// template<int N> EIGEN_DEVICE_FUNC inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename FixedSegmentReturnType<N>::Type(derived(), start, n); } /// This is the const version of segment<int>(Index). template<int N> EIGEN_DEVICE_FUNC inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n); } /// \returns a fixed-size expression of the first coefficients of *this. /// /// \only_for_vectors /// /// \tparam N the number of coefficients in the segment as specified at compile-time /// \param n the number of coefficients in the segment as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include MatrixBase_template_int_start.cpp /// Output: \verbinclude MatrixBase_template_int_start.out /// /// \sa class Block /// template<int N> EIGEN_DEVICE_FUNC inline typename FixedSegmentReturnType<N>::Type head(Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename FixedSegmentReturnType<N>::Type(derived(), 0, n); } /// This is the const version of head<int>(). template<int N> EIGEN_DEVICE_FUNC inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n); } /// \returns a fixed-size expression of the last coefficients of *this. /// /// \only_for_vectors /// /// \tparam N the number of coefficients in the segment as specified at compile-time /// \param n the number of coefficients in the segment as specified at run-time /// /// The compile-time and run-time information should not contradict. In other words, /// \a n should equal \a N unless \a N is \a Dynamic. /// /// Example: \include MatrixBase_template_int_end.cpp /// Output: \verbinclude MatrixBase_template_int_end.out /// /// \sa class Block /// template<int N> EIGEN_DEVICE_FUNC inline typename FixedSegmentReturnType<N>::Type tail(Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename FixedSegmentReturnType<N>::Type(derived(), size() - n); } /// This is the const version of tail<int>. template<int N> EIGEN_DEVICE_FUNC inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n); }
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/plugins/MatrixCwiseBinaryOps.h
.h
6,375
153
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is a base class plugin containing matrix specifics coefficient wise functions. /** \returns an expression of the Schur product (coefficient wise product) of *this and \a other * * Example: \include MatrixBase_cwiseProduct.cpp * Output: \verbinclude MatrixBase_cwiseProduct.out * * \sa class CwiseBinaryOp, cwiseAbs2 */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product) cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived()); } /** \returns an expression of the coefficient-wise == operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include MatrixBase_cwiseEqual.cpp * Output: \verbinclude MatrixBase_cwiseEqual.out * * \sa cwiseNotEqual(), isApprox(), isMuchSmallerThan() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline const CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived> cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise != operator of *this and \a other * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * Example: \include MatrixBase_cwiseNotEqual.cpp * Output: \verbinclude MatrixBase_cwiseNotEqual.out * * \sa cwiseEqual(), isApprox(), isMuchSmallerThan() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline const CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived> cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of *this and \a other * * Example: \include MatrixBase_cwiseMin.cpp * Output: \verbinclude MatrixBase_cwiseMin.out * * \sa class CwiseBinaryOp, max() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived> cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise min of *this and scalar \a other * * \sa class CwiseBinaryOp, min() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const ConstantReturnType> cwiseMin(const Scalar &other) const { return cwiseMin(Derived::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise max of *this and \a other * * Example: \include MatrixBase_cwiseMax.cpp * Output: \verbinclude MatrixBase_cwiseMax.out * * \sa class CwiseBinaryOp, min() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived> cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); } /** \returns an expression of the coefficient-wise max of *this and scalar \a other * * \sa class CwiseBinaryOp, min() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const ConstantReturnType> cwiseMax(const Scalar &other) const { return cwiseMax(Derived::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise quotient of *this and \a other * * Example: \include MatrixBase_cwiseQuotient.cpp * Output: \verbinclude MatrixBase_cwiseQuotient.out * * \sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived> cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const { return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); } typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType; /** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s * * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. * In order to check for equality between two vectors or matrices with floating-point coefficients, it is * generally a far better idea to use a fuzzy comparison as provided by isApprox() and * isMuchSmallerThan(). * * \sa cwiseEqual(const MatrixBase<OtherDerived> &) const */ EIGEN_DEVICE_FUNC inline const CwiseScalarEqualReturnType cwiseEqual(const Scalar& s) const { return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,Scalar,internal::cmp_EQ>()); }
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/plugins/ArrayCwiseUnaryOps.h
.h
16,929
553
typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> AbsReturnType; typedef CwiseUnaryOp<internal::scalar_arg_op<Scalar>, const Derived> ArgReturnType; typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> Abs2ReturnType; typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> SqrtReturnType; typedef CwiseUnaryOp<internal::scalar_rsqrt_op<Scalar>, const Derived> RsqrtReturnType; typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> SignReturnType; typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> InverseReturnType; typedef CwiseUnaryOp<internal::scalar_boolean_not_op<Scalar>, const Derived> BooleanNotReturnType; typedef CwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived> ExpReturnType; typedef CwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived> LogReturnType; typedef CwiseUnaryOp<internal::scalar_log1p_op<Scalar>, const Derived> Log1pReturnType; typedef CwiseUnaryOp<internal::scalar_log10_op<Scalar>, const Derived> Log10ReturnType; typedef CwiseUnaryOp<internal::scalar_cos_op<Scalar>, const Derived> CosReturnType; typedef CwiseUnaryOp<internal::scalar_sin_op<Scalar>, const Derived> SinReturnType; typedef CwiseUnaryOp<internal::scalar_tan_op<Scalar>, const Derived> TanReturnType; typedef CwiseUnaryOp<internal::scalar_acos_op<Scalar>, const Derived> AcosReturnType; typedef CwiseUnaryOp<internal::scalar_asin_op<Scalar>, const Derived> AsinReturnType; typedef CwiseUnaryOp<internal::scalar_atan_op<Scalar>, const Derived> AtanReturnType; typedef CwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived> TanhReturnType; typedef CwiseUnaryOp<internal::scalar_sinh_op<Scalar>, const Derived> SinhReturnType; typedef CwiseUnaryOp<internal::scalar_cosh_op<Scalar>, const Derived> CoshReturnType; typedef CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived> SquareReturnType; typedef CwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived> CubeReturnType; typedef CwiseUnaryOp<internal::scalar_round_op<Scalar>, const Derived> RoundReturnType; typedef CwiseUnaryOp<internal::scalar_floor_op<Scalar>, const Derived> FloorReturnType; typedef CwiseUnaryOp<internal::scalar_ceil_op<Scalar>, const Derived> CeilReturnType; typedef CwiseUnaryOp<internal::scalar_isnan_op<Scalar>, const Derived> IsNaNReturnType; typedef CwiseUnaryOp<internal::scalar_isinf_op<Scalar>, const Derived> IsInfReturnType; typedef CwiseUnaryOp<internal::scalar_isfinite_op<Scalar>, const Derived> IsFiniteReturnType; /** \returns an expression of the coefficient-wise absolute value of \c *this * * Example: \include Cwise_abs.cpp * Output: \verbinclude Cwise_abs.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_abs">Math functions</a>, abs2() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const AbsReturnType abs() const { return AbsReturnType(derived()); } /** \returns an expression of the coefficient-wise phase angle of \c *this * * Example: \include Cwise_arg.cpp * Output: \verbinclude Cwise_arg.out * * \sa abs() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArgReturnType arg() const { return ArgReturnType(derived()); } /** \returns an expression of the coefficient-wise squared absolute value of \c *this * * Example: \include Cwise_abs2.cpp * Output: \verbinclude Cwise_abs2.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_abs2">Math functions</a>, abs(), square() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Abs2ReturnType abs2() const { return Abs2ReturnType(derived()); } /** \returns an expression of the coefficient-wise exponential of *this. * * This function computes the coefficient-wise exponential. The function MatrixBase::exp() in the * unsupported module MatrixFunctions computes the matrix exponential. * * Example: \include Cwise_exp.cpp * Output: \verbinclude Cwise_exp.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_exp">Math functions</a>, pow(), log(), sin(), cos() */ EIGEN_DEVICE_FUNC inline const ExpReturnType exp() const { return ExpReturnType(derived()); } /** \returns an expression of the coefficient-wise logarithm of *this. * * This function computes the coefficient-wise logarithm. The function MatrixBase::log() in the * unsupported module MatrixFunctions computes the matrix logarithm. * * Example: \include Cwise_log.cpp * Output: \verbinclude Cwise_log.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_log">Math functions</a>, exp() */ EIGEN_DEVICE_FUNC inline const LogReturnType log() const { return LogReturnType(derived()); } /** \returns an expression of the coefficient-wise logarithm of 1 plus \c *this. * * In exact arithmetic, \c x.log() is equivalent to \c (x+1).log(), * however, with finite precision, this function is much more accurate when \c x is close to zero. * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_log1p">Math functions</a>, log() */ EIGEN_DEVICE_FUNC inline const Log1pReturnType log1p() const { return Log1pReturnType(derived()); } /** \returns an expression of the coefficient-wise base-10 logarithm of *this. * * This function computes the coefficient-wise base-10 logarithm. * * Example: \include Cwise_log10.cpp * Output: \verbinclude Cwise_log10.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_log10">Math functions</a>, log() */ EIGEN_DEVICE_FUNC inline const Log10ReturnType log10() const { return Log10ReturnType(derived()); } /** \returns an expression of the coefficient-wise square root of *this. * * This function computes the coefficient-wise square root. The function MatrixBase::sqrt() in the * unsupported module MatrixFunctions computes the matrix square root. * * Example: \include Cwise_sqrt.cpp * Output: \verbinclude Cwise_sqrt.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_sqrt">Math functions</a>, pow(), square() */ EIGEN_DEVICE_FUNC inline const SqrtReturnType sqrt() const { return SqrtReturnType(derived()); } /** \returns an expression of the coefficient-wise inverse square root of *this. * * This function computes the coefficient-wise inverse square root. * * Example: \include Cwise_sqrt.cpp * Output: \verbinclude Cwise_sqrt.out * * \sa pow(), square() */ EIGEN_DEVICE_FUNC inline const RsqrtReturnType rsqrt() const { return RsqrtReturnType(derived()); } /** \returns an expression of the coefficient-wise signum of *this. * * This function computes the coefficient-wise signum. * * Example: \include Cwise_sign.cpp * Output: \verbinclude Cwise_sign.out * * \sa pow(), square() */ EIGEN_DEVICE_FUNC inline const SignReturnType sign() const { return SignReturnType(derived()); } /** \returns an expression of the coefficient-wise cosine of *this. * * This function computes the coefficient-wise cosine. The function MatrixBase::cos() in the * unsupported module MatrixFunctions computes the matrix cosine. * * Example: \include Cwise_cos.cpp * Output: \verbinclude Cwise_cos.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cos">Math functions</a>, sin(), acos() */ EIGEN_DEVICE_FUNC inline const CosReturnType cos() const { return CosReturnType(derived()); } /** \returns an expression of the coefficient-wise sine of *this. * * This function computes the coefficient-wise sine. The function MatrixBase::sin() in the * unsupported module MatrixFunctions computes the matrix sine. * * Example: \include Cwise_sin.cpp * Output: \verbinclude Cwise_sin.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_sin">Math functions</a>, cos(), asin() */ EIGEN_DEVICE_FUNC inline const SinReturnType sin() const { return SinReturnType(derived()); } /** \returns an expression of the coefficient-wise tan of *this. * * Example: \include Cwise_tan.cpp * Output: \verbinclude Cwise_tan.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_tan">Math functions</a>, cos(), sin() */ EIGEN_DEVICE_FUNC inline const TanReturnType tan() const { return TanReturnType(derived()); } /** \returns an expression of the coefficient-wise arc tan of *this. * * Example: \include Cwise_atan.cpp * Output: \verbinclude Cwise_atan.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_atan">Math functions</a>, tan(), asin(), acos() */ EIGEN_DEVICE_FUNC inline const AtanReturnType atan() const { return AtanReturnType(derived()); } /** \returns an expression of the coefficient-wise arc cosine of *this. * * Example: \include Cwise_acos.cpp * Output: \verbinclude Cwise_acos.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_acos">Math functions</a>, cos(), asin() */ EIGEN_DEVICE_FUNC inline const AcosReturnType acos() const { return AcosReturnType(derived()); } /** \returns an expression of the coefficient-wise arc sine of *this. * * Example: \include Cwise_asin.cpp * Output: \verbinclude Cwise_asin.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_asin">Math functions</a>, sin(), acos() */ EIGEN_DEVICE_FUNC inline const AsinReturnType asin() const { return AsinReturnType(derived()); } /** \returns an expression of the coefficient-wise hyperbolic tan of *this. * * Example: \include Cwise_tanh.cpp * Output: \verbinclude Cwise_tanh.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_tanh">Math functions</a>, tan(), sinh(), cosh() */ EIGEN_DEVICE_FUNC inline const TanhReturnType tanh() const { return TanhReturnType(derived()); } /** \returns an expression of the coefficient-wise hyperbolic sin of *this. * * Example: \include Cwise_sinh.cpp * Output: \verbinclude Cwise_sinh.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_sinh">Math functions</a>, sin(), tanh(), cosh() */ EIGEN_DEVICE_FUNC inline const SinhReturnType sinh() const { return SinhReturnType(derived()); } /** \returns an expression of the coefficient-wise hyperbolic cos of *this. * * Example: \include Cwise_cosh.cpp * Output: \verbinclude Cwise_cosh.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cosh">Math functions</a>, tan(), sinh(), cosh() */ EIGEN_DEVICE_FUNC inline const CoshReturnType cosh() const { return CoshReturnType(derived()); } /** \returns an expression of the coefficient-wise inverse of *this. * * Example: \include Cwise_inverse.cpp * Output: \verbinclude Cwise_inverse.out * * \sa operator/(), operator*() */ EIGEN_DEVICE_FUNC inline const InverseReturnType inverse() const { return InverseReturnType(derived()); } /** \returns an expression of the coefficient-wise square of *this. * * Example: \include Cwise_square.cpp * Output: \verbinclude Cwise_square.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_squareE">Math functions</a>, abs2(), cube(), pow() */ EIGEN_DEVICE_FUNC inline const SquareReturnType square() const { return SquareReturnType(derived()); } /** \returns an expression of the coefficient-wise cube of *this. * * Example: \include Cwise_cube.cpp * Output: \verbinclude Cwise_cube.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_cube">Math functions</a>, square(), pow() */ EIGEN_DEVICE_FUNC inline const CubeReturnType cube() const { return CubeReturnType(derived()); } /** \returns an expression of the coefficient-wise round of *this. * * Example: \include Cwise_round.cpp * Output: \verbinclude Cwise_round.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_round">Math functions</a>, ceil(), floor() */ EIGEN_DEVICE_FUNC inline const RoundReturnType round() const { return RoundReturnType(derived()); } /** \returns an expression of the coefficient-wise floor of *this. * * Example: \include Cwise_floor.cpp * Output: \verbinclude Cwise_floor.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_floor">Math functions</a>, ceil(), round() */ EIGEN_DEVICE_FUNC inline const FloorReturnType floor() const { return FloorReturnType(derived()); } /** \returns an expression of the coefficient-wise ceil of *this. * * Example: \include Cwise_ceil.cpp * Output: \verbinclude Cwise_ceil.out * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_ceil">Math functions</a>, floor(), round() */ EIGEN_DEVICE_FUNC inline const CeilReturnType ceil() const { return CeilReturnType(derived()); } /** \returns an expression of the coefficient-wise isnan of *this. * * Example: \include Cwise_isNaN.cpp * Output: \verbinclude Cwise_isNaN.out * * \sa isfinite(), isinf() */ EIGEN_DEVICE_FUNC inline const IsNaNReturnType isNaN() const { return IsNaNReturnType(derived()); } /** \returns an expression of the coefficient-wise isinf of *this. * * Example: \include Cwise_isInf.cpp * Output: \verbinclude Cwise_isInf.out * * \sa isnan(), isfinite() */ EIGEN_DEVICE_FUNC inline const IsInfReturnType isInf() const { return IsInfReturnType(derived()); } /** \returns an expression of the coefficient-wise isfinite of *this. * * Example: \include Cwise_isFinite.cpp * Output: \verbinclude Cwise_isFinite.out * * \sa isnan(), isinf() */ EIGEN_DEVICE_FUNC inline const IsFiniteReturnType isFinite() const { return IsFiniteReturnType(derived()); } /** \returns an expression of the coefficient-wise ! operator of *this * * \warning this operator is for expression of bool only. * * Example: \include Cwise_boolean_not.cpp * Output: \verbinclude Cwise_boolean_not.out * * \sa operator!=() */ EIGEN_DEVICE_FUNC inline const BooleanNotReturnType operator!() const { EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value), THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); return BooleanNotReturnType(derived()); } // --- SpecialFunctions module --- typedef CwiseUnaryOp<internal::scalar_lgamma_op<Scalar>, const Derived> LgammaReturnType; typedef CwiseUnaryOp<internal::scalar_digamma_op<Scalar>, const Derived> DigammaReturnType; typedef CwiseUnaryOp<internal::scalar_erf_op<Scalar>, const Derived> ErfReturnType; typedef CwiseUnaryOp<internal::scalar_erfc_op<Scalar>, const Derived> ErfcReturnType; /** \cpp11 \returns an expression of the coefficient-wise ln(|gamma(*this)|). * * \specialfunctions_module * * Example: \include Cwise_lgamma.cpp * Output: \verbinclude Cwise_lgamma.out * * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, * or float/double in non c++11 mode, the user has to provide implementations of lgamma(T) for any scalar * type T to be supported. * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_lgamma">Math functions</a>, digamma() */ EIGEN_DEVICE_FUNC inline const LgammaReturnType lgamma() const { return LgammaReturnType(derived()); } /** \returns an expression of the coefficient-wise digamma (psi, derivative of lgamma). * * \specialfunctions_module * * \note This function supports only float and double scalar types. To support other scalar types, * the user has to provide implementations of digamma(T) for any scalar * type T to be supported. * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_digamma">Math functions</a>, Eigen::digamma(), Eigen::polygamma(), lgamma() */ EIGEN_DEVICE_FUNC inline const DigammaReturnType digamma() const { return DigammaReturnType(derived()); } /** \cpp11 \returns an expression of the coefficient-wise Gauss error * function of *this. * * \specialfunctions_module * * Example: \include Cwise_erf.cpp * Output: \verbinclude Cwise_erf.out * * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, * or float/double in non c++11 mode, the user has to provide implementations of erf(T) for any scalar * type T to be supported. * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_erf">Math functions</a>, erfc() */ EIGEN_DEVICE_FUNC inline const ErfReturnType erf() const { return ErfReturnType(derived()); } /** \cpp11 \returns an expression of the coefficient-wise Complementary error * function of *this. * * \specialfunctions_module * * Example: \include Cwise_erfc.cpp * Output: \verbinclude Cwise_erfc.out * * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, * or float/double in non c++11 mode, the user has to provide implementations of erfc(T) for any scalar * type T to be supported. * * \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_erfc">Math functions</a>, erf() */ EIGEN_DEVICE_FUNC inline const ErfcReturnType erfc() const { return ErfcReturnType(derived()); }
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/plugins/CommonCwiseUnaryOps.h
.h
5,621
164
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is a base class plugin containing common coefficient wise functions. #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal the return type of conjugate() */ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex, const CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>, const Derived& >::type ConjugateReturnType; /** \internal the return type of real() const */ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex, const CwiseUnaryOp<internal::scalar_real_op<Scalar>, const Derived>, const Derived& >::type RealReturnType; /** \internal the return type of real() */ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex, CwiseUnaryView<internal::scalar_real_ref_op<Scalar>, Derived>, Derived& >::type NonConstRealReturnType; /** \internal the return type of imag() const */ typedef CwiseUnaryOp<internal::scalar_imag_op<Scalar>, const Derived> ImagReturnType; /** \internal the return type of imag() */ typedef CwiseUnaryView<internal::scalar_imag_ref_op<Scalar>, Derived> NonConstImagReturnType; typedef CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived> NegativeReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN /// \returns an expression of the opposite of \c *this /// EIGEN_DOC_UNARY_ADDONS(operator-,opposite) /// EIGEN_DEVICE_FUNC inline const NegativeReturnType operator-() const { return NegativeReturnType(derived()); } template<class NewType> struct CastXpr { typedef typename internal::cast_return_type<Derived,const CwiseUnaryOp<internal::scalar_cast_op<Scalar, NewType>, const Derived> >::type Type; }; /// \returns an expression of \c *this with the \a Scalar type casted to /// \a NewScalar. /// /// The template parameter \a NewScalar is the type we are casting the scalars to. /// EIGEN_DOC_UNARY_ADDONS(cast,conversion function) /// /// \sa class CwiseUnaryOp /// template<typename NewType> EIGEN_DEVICE_FUNC typename CastXpr<NewType>::Type cast() const { return typename CastXpr<NewType>::Type(derived()); } /// \returns an expression of the complex conjugate of \c *this. /// EIGEN_DOC_UNARY_ADDONS(conjugate,complex conjugate) /// /// \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_conj">Math functions</a>, MatrixBase::adjoint() EIGEN_DEVICE_FUNC inline ConjugateReturnType conjugate() const { return ConjugateReturnType(derived()); } /// \returns a read-only expression of the real part of \c *this. /// EIGEN_DOC_UNARY_ADDONS(real,real part function) /// /// \sa imag() EIGEN_DEVICE_FUNC inline RealReturnType real() const { return RealReturnType(derived()); } /// \returns an read-only expression of the imaginary part of \c *this. /// EIGEN_DOC_UNARY_ADDONS(imag,imaginary part function) /// /// \sa real() EIGEN_DEVICE_FUNC inline const ImagReturnType imag() const { return ImagReturnType(derived()); } /// \brief Apply a unary operator coefficient-wise /// \param[in] func Functor implementing the unary operator /// \tparam CustomUnaryOp Type of \a func /// \returns An expression of a custom coefficient-wise unary operator \a func of *this /// /// The function \c ptr_fun() from the C++ standard library can be used to make functors out of normal functions. /// /// Example: /// \include class_CwiseUnaryOp_ptrfun.cpp /// Output: \verbinclude class_CwiseUnaryOp_ptrfun.out /// /// Genuine functors allow for more possibilities, for instance it may contain a state. /// /// Example: /// \include class_CwiseUnaryOp.cpp /// Output: \verbinclude class_CwiseUnaryOp.out /// EIGEN_DOC_UNARY_ADDONS(unaryExpr,unary function) /// /// \sa unaryViewExpr, binaryExpr, class CwiseUnaryOp /// template<typename CustomUnaryOp> EIGEN_DEVICE_FUNC inline const CwiseUnaryOp<CustomUnaryOp, const Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const { return CwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func); } /// \returns an expression of a custom coefficient-wise unary operator \a func of *this /// /// The template parameter \a CustomUnaryOp is the type of the functor /// of the custom unary operator. /// /// Example: /// \include class_CwiseUnaryOp.cpp /// Output: \verbinclude class_CwiseUnaryOp.out /// EIGEN_DOC_UNARY_ADDONS(unaryViewExpr,unary function) /// /// \sa unaryExpr, binaryExpr class CwiseUnaryOp /// template<typename CustomViewOp> EIGEN_DEVICE_FUNC inline const CwiseUnaryView<CustomViewOp, const Derived> unaryViewExpr(const CustomViewOp& func = CustomViewOp()) const { return CwiseUnaryView<CustomViewOp, const Derived>(derived(), func); } /// \returns a non const expression of the real part of \c *this. /// EIGEN_DOC_UNARY_ADDONS(real,real part function) /// /// \sa imag() EIGEN_DEVICE_FUNC inline NonConstRealReturnType real() { return NonConstRealReturnType(derived()); } /// \returns a non const expression of the imaginary part of \c *this. /// EIGEN_DOC_UNARY_ADDONS(imag,imaginary part function) /// /// \sa real() EIGEN_DEVICE_FUNC inline NonConstImagReturnType imag() { return NonConstImagReturnType(derived()); }
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Jacobi/Jacobi.h
.h
15,902
463
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_JACOBI_H #define EIGEN_JACOBI_H namespace Eigen { /** \ingroup Jacobi_Module * \jacobi_module * \class JacobiRotation * \brief Rotation given by a cosine-sine pair. * * This class represents a Jacobi or Givens rotation. * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by * its cosine \c c and sine \c s as follow: * \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s & \overline c \end{array} \right ) \f$ * * You can apply the respective counter-clockwise rotation to a column vector \c v by * applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code: * \code * v.applyOnTheLeft(J.adjoint()); * \endcode * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template<typename Scalar> class JacobiRotation { public: typedef typename NumTraits<Scalar>::Real RealScalar; /** Default constructor without any initialization. */ JacobiRotation() {} /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */ JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {} Scalar& c() { return m_c; } Scalar c() const { return m_c; } Scalar& s() { return m_s; } Scalar s() const { return m_s; } /** Concatenates two planar rotation */ JacobiRotation operator*(const JacobiRotation& other) { using numext::conj; return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s, conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c))); } /** Returns the transposed transformation */ JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); } /** Returns the adjoint transformation */ JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); } template<typename Derived> bool makeJacobi(const MatrixBase<Derived>&, Index p, Index q); bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z); void makeGivens(const Scalar& p, const Scalar& q, Scalar* r=0); protected: void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type); void makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type); Scalar m_c, m_s; }; /** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$ * * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template<typename Scalar> bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z) { using std::sqrt; using std::abs; RealScalar deno = RealScalar(2)*abs(y); if(deno < (std::numeric_limits<RealScalar>::min)()) { m_c = Scalar(1); m_s = Scalar(0); return false; } else { RealScalar tau = (x-z)/deno; RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1)); RealScalar t; if(tau>RealScalar(0)) { t = RealScalar(1) / (tau + w); } else { t = RealScalar(1) / (tau - w); } RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1); RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1)); m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n; m_c = n; return true; } } /** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields * a diagonal matrix \f$ A = J^* B J \f$ * * Example: \include Jacobi_makeJacobi.cpp * Output: \verbinclude Jacobi_makeJacobi.out * * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template<typename Scalar> template<typename Derived> inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Index p, Index q) { return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q))); } /** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector * \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields: * \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$. * * The value of \a r is returned if \a r is not null (the default is null). * Also note that G is built such that the cosine is always real. * * Example: \include Jacobi_makeGivens.cpp * Output: \verbinclude Jacobi_makeGivens.out * * This function implements the continuous Givens rotation generation algorithm * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem. * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000. * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template<typename Scalar> void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r) { makeGivens(p, q, r, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type()); } // specialization for complexes template<typename Scalar> void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type) { using std::sqrt; using std::abs; using numext::conj; if(q==Scalar(0)) { m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1); m_s = 0; if(r) *r = m_c * p; } else if(p==Scalar(0)) { m_c = 0; m_s = -q/abs(q); if(r) *r = abs(q); } else { RealScalar p1 = numext::norm1(p); RealScalar q1 = numext::norm1(q); if(p1>=q1) { Scalar ps = p / p1; RealScalar p2 = numext::abs2(ps); Scalar qs = q / p1; RealScalar q2 = numext::abs2(qs); RealScalar u = sqrt(RealScalar(1) + q2/p2); if(numext::real(p)<RealScalar(0)) u = -u; m_c = Scalar(1)/u; m_s = -qs*conj(ps)*(m_c/p2); if(r) *r = p * u; } else { Scalar ps = p / q1; RealScalar p2 = numext::abs2(ps); Scalar qs = q / q1; RealScalar q2 = numext::abs2(qs); RealScalar u = q1 * sqrt(p2 + q2); if(numext::real(p)<RealScalar(0)) u = -u; p1 = abs(p); ps = p/p1; m_c = p1/u; m_s = -conj(ps) * (q/u); if(r) *r = ps * u; } } } // specialization for reals template<typename Scalar> void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type) { using std::sqrt; using std::abs; if(q==Scalar(0)) { m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1); m_s = Scalar(0); if(r) *r = abs(p); } else if(p==Scalar(0)) { m_c = Scalar(0); m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1); if(r) *r = abs(q); } else if(abs(p) > abs(q)) { Scalar t = q/p; Scalar u = sqrt(Scalar(1) + numext::abs2(t)); if(p<Scalar(0)) u = -u; m_c = Scalar(1)/u; m_s = -t * m_c; if(r) *r = p * u; } else { Scalar t = p/q; Scalar u = sqrt(Scalar(1) + numext::abs2(t)); if(q<Scalar(0)) u = -u; m_s = -Scalar(1)/u; m_c = -t * m_s; if(r) *r = q * u; } } /**************************************************************************************** * Implementation of MatrixBase methods ****************************************************************************************/ namespace internal { /** \jacobi_module * Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y: * \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right ) = J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$ * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ template<typename VectorX, typename VectorY, typename OtherScalar> void apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j); } /** \jacobi_module * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B, * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$. * * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane() */ template<typename Derived> template<typename OtherScalar> inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j) { RowXpr x(this->row(p)); RowXpr y(this->row(q)); internal::apply_rotation_in_the_plane(x, y, j); } /** \ingroup Jacobi_Module * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$. * * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane() */ template<typename Derived> template<typename OtherScalar> inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j) { ColXpr x(this->col(p)); ColXpr y(this->col(q)); internal::apply_rotation_in_the_plane(x, y, j.transpose()); } namespace internal { template<typename Scalar, typename OtherScalar, int SizeAtCompileTime, int MinAlignment, bool Vectorizable> struct apply_rotation_in_the_plane_selector { static inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s) { for(Index i=0; i<size; ++i) { Scalar xi = *x; Scalar yi = *y; *x = c * xi + numext::conj(s) * yi; *y = -s * xi + numext::conj(c) * yi; x += incrx; y += incry; } } }; template<typename Scalar, typename OtherScalar, int SizeAtCompileTime, int MinAlignment> struct apply_rotation_in_the_plane_selector<Scalar,OtherScalar,SizeAtCompileTime,MinAlignment,true /* vectorizable */> { static inline void run(Scalar *x, Index incrx, Scalar *y, Index incry, Index size, OtherScalar c, OtherScalar s) { enum { PacketSize = packet_traits<Scalar>::size, OtherPacketSize = packet_traits<OtherScalar>::size }; typedef typename packet_traits<Scalar>::type Packet; typedef typename packet_traits<OtherScalar>::type OtherPacket; /*** dynamic-size vectorized paths ***/ if(SizeAtCompileTime == Dynamic && ((incrx==1 && incry==1) || PacketSize == 1)) { // both vectors are sequentially stored in memory => vectorization enum { Peeling = 2 }; Index alignedStart = internal::first_default_aligned(y, size); Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize; const OtherPacket pc = pset1<OtherPacket>(c); const OtherPacket ps = pset1<OtherPacket>(s); conj_helper<OtherPacket,Packet,NumTraits<OtherScalar>::IsComplex,false> pcj; conj_helper<OtherPacket,Packet,false,false> pm; for(Index i=0; i<alignedStart; ++i) { Scalar xi = x[i]; Scalar yi = y[i]; x[i] = c * xi + numext::conj(s) * yi; y[i] = -s * xi + numext::conj(c) * yi; } Scalar* EIGEN_RESTRICT px = x + alignedStart; Scalar* EIGEN_RESTRICT py = y + alignedStart; if(internal::first_default_aligned(x, size)==alignedStart) { for(Index i=alignedStart; i<alignedEnd; i+=PacketSize) { Packet xi = pload<Packet>(px); Packet yi = pload<Packet>(py); pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); px += PacketSize; py += PacketSize; } } else { Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize); for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize) { Packet xi = ploadu<Packet>(px); Packet xi1 = ploadu<Packet>(px+PacketSize); Packet yi = pload <Packet>(py); Packet yi1 = pload <Packet>(py+PacketSize); pstoreu(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); pstoreu(px+PacketSize, padd(pm.pmul(pc,xi1),pcj.pmul(ps,yi1))); pstore (py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pm.pmul(ps,xi1))); px += Peeling*PacketSize; py += Peeling*PacketSize; } if(alignedEnd!=peelingEnd) { Packet xi = ploadu<Packet>(x+peelingEnd); Packet yi = pload <Packet>(y+peelingEnd); pstoreu(x+peelingEnd, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); } } for(Index i=alignedEnd; i<size; ++i) { Scalar xi = x[i]; Scalar yi = y[i]; x[i] = c * xi + numext::conj(s) * yi; y[i] = -s * xi + numext::conj(c) * yi; } } /*** fixed-size vectorized path ***/ else if(SizeAtCompileTime != Dynamic && MinAlignment>0) // FIXME should be compared to the required alignment { const OtherPacket pc = pset1<OtherPacket>(c); const OtherPacket ps = pset1<OtherPacket>(s); conj_helper<OtherPacket,Packet,NumTraits<OtherPacket>::IsComplex,false> pcj; conj_helper<OtherPacket,Packet,false,false> pm; Scalar* EIGEN_RESTRICT px = x; Scalar* EIGEN_RESTRICT py = y; for(Index i=0; i<size; i+=PacketSize) { Packet xi = pload<Packet>(px); Packet yi = pload<Packet>(py); pstore(px, padd(pm.pmul(pc,xi),pcj.pmul(ps,yi))); pstore(py, psub(pcj.pmul(pc,yi),pm.pmul(ps,xi))); px += PacketSize; py += PacketSize; } } /*** non-vectorized path ***/ else { apply_rotation_in_the_plane_selector<Scalar,OtherScalar,SizeAtCompileTime,MinAlignment,false>::run(x,incrx,y,incry,size,c,s); } } }; template<typename VectorX, typename VectorY, typename OtherScalar> void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j) { typedef typename VectorX::Scalar Scalar; const bool Vectorizable = (VectorX::Flags & VectorY::Flags & PacketAccessBit) && (int(packet_traits<Scalar>::size) == int(packet_traits<OtherScalar>::size)); eigen_assert(xpr_x.size() == xpr_y.size()); Index size = xpr_x.size(); Index incrx = xpr_x.derived().innerStride(); Index incry = xpr_y.derived().innerStride(); Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0); Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0); OtherScalar c = j.c(); OtherScalar s = j.s(); if (c==OtherScalar(1) && s==OtherScalar(0)) return; apply_rotation_in_the_plane_selector< Scalar,OtherScalar, VectorX::SizeAtCompileTime, EIGEN_PLAIN_ENUM_MIN(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment), Vectorizable>::run(x,incrx,y,incry,size,c,s); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_JACOBI_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Homogeneous.h
.h
20,539
498
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOMOGENEOUS_H #define EIGEN_HOMOGENEOUS_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Homogeneous * * \brief Expression of one (or a set of) homogeneous vector(s) * * \param MatrixType the type of the object in which we are making homogeneous * * This class represents an expression of one (or a set of) homogeneous vector(s). * It is the return type of MatrixBase::homogeneous() and most of the time * this is the only way it is used. * * \sa MatrixBase::homogeneous() */ namespace internal { template<typename MatrixType,int Direction> struct traits<Homogeneous<MatrixType,Direction> > : traits<MatrixType> { typedef typename traits<MatrixType>::StorageKind StorageKind; typedef typename ref_selector<MatrixType>::type MatrixTypeNested; typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; enum { RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ? int(MatrixType::RowsAtCompileTime) + 1 : Dynamic, ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ? int(MatrixType::ColsAtCompileTime) + 1 : Dynamic, RowsAtCompileTime = Direction==Vertical ? RowsPlusOne : MatrixType::RowsAtCompileTime, ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, TmpFlags = _MatrixTypeNested::Flags & HereditaryBits, Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit) : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit) : TmpFlags }; }; template<typename MatrixType,typename Lhs> struct homogeneous_left_product_impl; template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl; } // end namespace internal template<typename MatrixType,int _Direction> class Homogeneous : public MatrixBase<Homogeneous<MatrixType,_Direction> >, internal::no_assignment_operator { public: typedef MatrixType NestedExpression; enum { Direction = _Direction }; typedef MatrixBase<Homogeneous> Base; EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous) EIGEN_DEVICE_FUNC explicit inline Homogeneous(const MatrixType& matrix) : m_matrix(matrix) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); } EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; } template<typename Rhs> EIGEN_DEVICE_FUNC inline const Product<Homogeneous,Rhs> operator* (const MatrixBase<Rhs>& rhs) const { eigen_assert(int(Direction)==Horizontal); return Product<Homogeneous,Rhs>(*this,rhs.derived()); } template<typename Lhs> friend EIGEN_DEVICE_FUNC inline const Product<Lhs,Homogeneous> operator* (const MatrixBase<Lhs>& lhs, const Homogeneous& rhs) { eigen_assert(int(Direction)==Vertical); return Product<Lhs,Homogeneous>(lhs.derived(),rhs); } template<typename Scalar, int Dim, int Mode, int Options> friend EIGEN_DEVICE_FUNC inline const Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous > operator* (const Transform<Scalar,Dim,Mode,Options>& lhs, const Homogeneous& rhs) { eigen_assert(int(Direction)==Vertical); return Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous>(lhs,rhs); } template<typename Func> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::result_of<Func(Scalar,Scalar)>::type redux(const Func& func) const { return func(m_matrix.redux(func), Scalar(1)); } protected: typename MatrixType::Nested m_matrix; }; /** \geometry_module \ingroup Geometry_Module * * \returns a vector expression that is one longer than the vector argument, with the value 1 symbolically appended as the last coefficient. * * This can be used to convert affine coordinates to homogeneous coordinates. * * \only_for_vectors * * Example: \include MatrixBase_homogeneous.cpp * Output: \verbinclude MatrixBase_homogeneous.out * * \sa VectorwiseOp::homogeneous(), class Homogeneous */ template<typename Derived> EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::HomogeneousReturnType MatrixBase<Derived>::homogeneous() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); return HomogeneousReturnType(derived()); } /** \geometry_module \ingroup Geometry_Module * * \returns an expression where the value 1 is symbolically appended as the final coefficient to each column (or row) of the matrix. * * This can be used to convert affine coordinates to homogeneous coordinates. * * Example: \include VectorwiseOp_homogeneous.cpp * Output: \verbinclude VectorwiseOp_homogeneous.out * * \sa MatrixBase::homogeneous(), class Homogeneous */ template<typename ExpressionType, int Direction> EIGEN_DEVICE_FUNC inline Homogeneous<ExpressionType,Direction> VectorwiseOp<ExpressionType,Direction>::homogeneous() const { return HomogeneousReturnType(_expression()); } /** \geometry_module \ingroup Geometry_Module * * \brief homogeneous normalization * * \returns a vector expression of the N-1 first coefficients of \c *this divided by that last coefficient. * * This can be used to convert homogeneous coordinates to affine coordinates. * * It is essentially a shortcut for: * \code this->head(this->size()-1)/this->coeff(this->size()-1); \endcode * * Example: \include MatrixBase_hnormalized.cpp * Output: \verbinclude MatrixBase_hnormalized.out * * \sa VectorwiseOp::hnormalized() */ template<typename Derived> EIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::HNormalizedReturnType MatrixBase<Derived>::hnormalized() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); return ConstStartMinusOne(derived(),0,0, ColsAtCompileTime==1?size()-1:1, ColsAtCompileTime==1?1:size()-1) / coeff(size()-1); } /** \geometry_module \ingroup Geometry_Module * * \brief column or row-wise homogeneous normalization * * \returns an expression of the first N-1 coefficients of each column (or row) of \c *this divided by the last coefficient of each column (or row). * * This can be used to convert homogeneous coordinates to affine coordinates. * * It is conceptually equivalent to calling MatrixBase::hnormalized() to each column (or row) of \c *this. * * Example: \include DirectionWise_hnormalized.cpp * Output: \verbinclude DirectionWise_hnormalized.out * * \sa MatrixBase::hnormalized() */ template<typename ExpressionType, int Direction> EIGEN_DEVICE_FUNC inline const typename VectorwiseOp<ExpressionType,Direction>::HNormalizedReturnType VectorwiseOp<ExpressionType,Direction>::hnormalized() const { return HNormalized_Block(_expression(),0,0, Direction==Vertical ? _expression().rows()-1 : _expression().rows(), Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient( Replicate<HNormalized_Factors, Direction==Vertical ? HNormalized_SizeMinusOne : 1, Direction==Horizontal ? HNormalized_SizeMinusOne : 1> (HNormalized_Factors(_expression(), Direction==Vertical ? _expression().rows()-1:0, Direction==Horizontal ? _expression().cols()-1:0, Direction==Vertical ? 1 : _expression().rows(), Direction==Horizontal ? 1 : _expression().cols()), Direction==Vertical ? _expression().rows()-1 : 1, Direction==Horizontal ? _expression().cols()-1 : 1)); } namespace internal { template<typename MatrixOrTransformType> struct take_matrix_for_product { typedef MatrixOrTransformType type; EIGEN_DEVICE_FUNC static const type& run(const type &x) { return x; } }; template<typename Scalar, int Dim, int Mode,int Options> struct take_matrix_for_product<Transform<Scalar, Dim, Mode, Options> > { typedef Transform<Scalar, Dim, Mode, Options> TransformType; typedef typename internal::add_const<typename TransformType::ConstAffinePart>::type type; EIGEN_DEVICE_FUNC static type run (const TransformType& x) { return x.affine(); } }; template<typename Scalar, int Dim, int Options> struct take_matrix_for_product<Transform<Scalar, Dim, Projective, Options> > { typedef Transform<Scalar, Dim, Projective, Options> TransformType; typedef typename TransformType::MatrixType type; EIGEN_DEVICE_FUNC static const type& run (const TransformType& x) { return x.matrix(); } }; template<typename MatrixType,typename Lhs> struct traits<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> > { typedef typename take_matrix_for_product<Lhs>::type LhsMatrixType; typedef typename remove_all<MatrixType>::type MatrixTypeCleaned; typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned; typedef typename make_proper_matrix_type< typename traits<MatrixTypeCleaned>::Scalar, LhsMatrixTypeCleaned::RowsAtCompileTime, MatrixTypeCleaned::ColsAtCompileTime, MatrixTypeCleaned::PlainObject::Options, LhsMatrixTypeCleaned::MaxRowsAtCompileTime, MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType; }; template<typename MatrixType,typename Lhs> struct homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> : public ReturnByValue<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> > { typedef typename traits<homogeneous_left_product_impl>::LhsMatrixType LhsMatrixType; typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned; typedef typename remove_all<typename LhsMatrixTypeCleaned::Nested>::type LhsMatrixTypeNested; EIGEN_DEVICE_FUNC homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs) : m_lhs(take_matrix_for_product<Lhs>::run(lhs)), m_rhs(rhs) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); } template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible dst = Block<const LhsMatrixTypeNested, LhsMatrixTypeNested::RowsAtCompileTime, LhsMatrixTypeNested::ColsAtCompileTime==Dynamic?Dynamic:LhsMatrixTypeNested::ColsAtCompileTime-1> (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs; dst += m_lhs.col(m_lhs.cols()-1).rowwise() .template replicate<MatrixType::ColsAtCompileTime>(m_rhs.cols()); } typename LhsMatrixTypeCleaned::Nested m_lhs; typename MatrixType::Nested m_rhs; }; template<typename MatrixType,typename Rhs> struct traits<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> > { typedef typename make_proper_matrix_type<typename traits<MatrixType>::Scalar, MatrixType::RowsAtCompileTime, Rhs::ColsAtCompileTime, MatrixType::PlainObject::Options, MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime>::type ReturnType; }; template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> : public ReturnByValue<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> > { typedef typename remove_all<typename Rhs::Nested>::type RhsNested; EIGEN_DEVICE_FUNC homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); } template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const { // FIXME investigate how to allow lazy evaluation of this product when possible dst = m_lhs * Block<const RhsNested, RhsNested::RowsAtCompileTime==Dynamic?Dynamic:RhsNested::RowsAtCompileTime-1, RhsNested::ColsAtCompileTime> (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols()); dst += m_rhs.row(m_rhs.rows()-1).colwise() .template replicate<MatrixType::RowsAtCompileTime>(m_lhs.rows()); } typename MatrixType::Nested m_lhs; typename Rhs::Nested m_rhs; }; template<typename ArgType,int Direction> struct evaluator_traits<Homogeneous<ArgType,Direction> > { typedef typename storage_kind_to_evaluator_kind<typename ArgType::StorageKind>::Kind Kind; typedef HomogeneousShape Shape; }; template<> struct AssignmentKind<DenseShape,HomogeneousShape> { typedef Dense2Dense Kind; }; template<typename ArgType,int Direction> struct unary_evaluator<Homogeneous<ArgType,Direction>, IndexBased> : evaluator<typename Homogeneous<ArgType,Direction>::PlainObject > { typedef Homogeneous<ArgType,Direction> XprType; typedef typename XprType::PlainObject PlainObject; typedef evaluator<PlainObject> Base; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : Base(), m_temp(op) { ::new (static_cast<Base*>(this)) Base(m_temp); } protected: PlainObject m_temp; }; // dense = homogeneous template< typename DstXprType, typename ArgType, typename Scalar> struct Assignment<DstXprType, Homogeneous<ArgType,Vertical>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense> { typedef Homogeneous<ArgType,Vertical> SrcXprType; EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); dst.template topRows<ArgType::RowsAtCompileTime>(src.nestedExpression().rows()) = src.nestedExpression(); dst.row(dst.rows()-1).setOnes(); } }; // dense = homogeneous template< typename DstXprType, typename ArgType, typename Scalar> struct Assignment<DstXprType, Homogeneous<ArgType,Horizontal>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense> { typedef Homogeneous<ArgType,Horizontal> SrcXprType; EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); dst.template leftCols<ArgType::ColsAtCompileTime>(src.nestedExpression().cols()) = src.nestedExpression(); dst.col(dst.cols()-1).setOnes(); } }; template<typename LhsArg, typename Rhs, int ProductTag> struct generic_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs, HomogeneousShape, DenseShape, ProductTag> { template<typename Dest> EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Homogeneous<LhsArg,Horizontal>& lhs, const Rhs& rhs) { homogeneous_right_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs>(lhs.nestedExpression(), rhs).evalTo(dst); } }; template<typename Lhs,typename Rhs> struct homogeneous_right_product_refactoring_helper { enum { Dim = Lhs::ColsAtCompileTime, Rows = Lhs::RowsAtCompileTime }; typedef typename Rhs::template ConstNRowsBlockXpr<Dim>::Type LinearBlockConst; typedef typename remove_const<LinearBlockConst>::type LinearBlock; typedef typename Rhs::ConstRowXpr ConstantColumn; typedef Replicate<const ConstantColumn,Rows,1> ConstantBlock; typedef Product<Lhs,LinearBlock,LazyProduct> LinearProduct; typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr; }; template<typename Lhs, typename Rhs, int ProductTag> struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, HomogeneousShape, DenseShape> : public evaluator<typename homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs>::Xpr> { typedef Product<Lhs, Rhs, LazyProduct> XprType; typedef homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs> helper; typedef typename helper::ConstantBlock ConstantBlock; typedef typename helper::Xpr RefactoredXpr; typedef evaluator<RefactoredXpr> Base; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base( xpr.lhs().nestedExpression() .lazyProduct( xpr.rhs().template topRows<helper::Dim>(xpr.lhs().nestedExpression().cols()) ) + ConstantBlock(xpr.rhs().row(xpr.rhs().rows()-1),xpr.lhs().rows(), 1) ) {} }; template<typename Lhs, typename RhsArg, int ProductTag> struct generic_product_impl<Lhs, Homogeneous<RhsArg,Vertical>, DenseShape, HomogeneousShape, ProductTag> { template<typename Dest> EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous<RhsArg,Vertical>& rhs) { homogeneous_left_product_impl<Homogeneous<RhsArg,Vertical>, Lhs>(lhs, rhs.nestedExpression()).evalTo(dst); } }; // TODO: the following specialization is to address a regression from 3.2 to 3.3 // In the future, this path should be optimized. template<typename Lhs, typename RhsArg, int ProductTag> struct generic_product_impl<Lhs, Homogeneous<RhsArg,Vertical>, TriangularShape, HomogeneousShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous<RhsArg,Vertical>& rhs) { dst.noalias() = lhs * rhs.eval(); } }; template<typename Lhs,typename Rhs> struct homogeneous_left_product_refactoring_helper { enum { Dim = Rhs::RowsAtCompileTime, Cols = Rhs::ColsAtCompileTime }; typedef typename Lhs::template ConstNColsBlockXpr<Dim>::Type LinearBlockConst; typedef typename remove_const<LinearBlockConst>::type LinearBlock; typedef typename Lhs::ConstColXpr ConstantColumn; typedef Replicate<const ConstantColumn,1,Cols> ConstantBlock; typedef Product<LinearBlock,Rhs,LazyProduct> LinearProduct; typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr; }; template<typename Lhs, typename Rhs, int ProductTag> struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, HomogeneousShape> : public evaluator<typename homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression>::Xpr> { typedef Product<Lhs, Rhs, LazyProduct> XprType; typedef homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression> helper; typedef typename helper::ConstantBlock ConstantBlock; typedef typename helper::Xpr RefactoredXpr; typedef evaluator<RefactoredXpr> Base; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base( xpr.lhs().template leftCols<helper::Dim>(xpr.rhs().nestedExpression().rows()) .lazyProduct( xpr.rhs().nestedExpression() ) + ConstantBlock(xpr.lhs().col(xpr.lhs().cols()-1),1,xpr.rhs().cols()) ) {} }; template<typename Scalar, int Dim, int Mode,int Options, typename RhsArg, int ProductTag> struct generic_product_impl<Transform<Scalar,Dim,Mode,Options>, Homogeneous<RhsArg,Vertical>, DenseShape, HomogeneousShape, ProductTag> { typedef Transform<Scalar,Dim,Mode,Options> TransformType; template<typename Dest> EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const TransformType& lhs, const Homogeneous<RhsArg,Vertical>& rhs) { homogeneous_left_product_impl<Homogeneous<RhsArg,Vertical>, TransformType>(lhs, rhs.nestedExpression()).evalTo(dst); } }; template<typename ExpressionType, int Side, bool Transposed> struct permutation_matrix_product<ExpressionType, Side, Transposed, HomogeneousShape> : public permutation_matrix_product<ExpressionType, Side, Transposed, DenseShape> {}; } // end namespace internal } // end namespace Eigen #endif // EIGEN_HOMOGENEOUS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/RotationBase.h
.h
8,063
207
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ROTATIONBASE_H #define EIGEN_ROTATIONBASE_H namespace Eigen { // forward declaration namespace internal { template<typename RotationDerived, typename MatrixType, bool IsVector=MatrixType::IsVectorAtCompileTime> struct rotation_base_generic_product_selector; } /** \class RotationBase * * \brief Common base class for compact rotation representations * * \tparam Derived is the derived type, i.e., a rotation type * \tparam _Dim the dimension of the space */ template<typename Derived, int _Dim> class RotationBase { public: enum { Dim = _Dim }; /** the scalar type of the coefficients */ typedef typename internal::traits<Derived>::Scalar Scalar; /** corresponding linear transformation matrix type */ typedef Matrix<Scalar,Dim,Dim> RotationMatrixType; typedef Matrix<Scalar,Dim,1> VectorType; public: EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast<const Derived*>(this); } EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast<Derived*>(this); } /** \returns an equivalent rotation matrix */ EIGEN_DEVICE_FUNC inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); } /** \returns an equivalent rotation matrix * This function is added to be conform with the Transform class' naming scheme. */ EIGEN_DEVICE_FUNC inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); } /** \returns the inverse rotation */ EIGEN_DEVICE_FUNC inline Derived inverse() const { return derived().inverse(); } /** \returns the concatenation of the rotation \c *this with a translation \a t */ EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t) const { return Transform<Scalar,Dim,Isometry>(*this) * t; } /** \returns the concatenation of the rotation \c *this with a uniform scaling \a s */ EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const UniformScaling<Scalar>& s) const { return toRotationMatrix() * s.factor(); } /** \returns the concatenation of the rotation \c *this with a generic expression \a e * \a e can be: * - a DimxDim linear transformation matrix * - a DimxDim diagonal matrix (axis aligned scaling) * - a vector of size Dim */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector<Derived,OtherDerived,OtherDerived::IsVectorAtCompileTime>::ReturnType operator*(const EigenBase<OtherDerived>& e) const { return internal::rotation_base_generic_product_selector<Derived,OtherDerived>::run(derived(), e.derived()); } /** \returns the concatenation of a linear transformation \a l with the rotation \a r */ template<typename OtherDerived> friend EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const EigenBase<OtherDerived>& l, const Derived& r) { return l.derived() * r.toRotationMatrix(); } /** \returns the concatenation of a scaling \a l with the rotation \a r */ EIGEN_DEVICE_FUNC friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar,Dim>& l, const Derived& r) { Transform<Scalar,Dim,Affine> res(r); res.linear().applyOnTheLeft(l); return res; } /** \returns the concatenation of the rotation \c *this with a transformation \a t */ template<int Mode, int Options> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Options>& t) const { return toRotationMatrix() * t; } template<typename OtherVectorType> EIGEN_DEVICE_FUNC inline VectorType _transformVector(const OtherVectorType& v) const { return toRotationMatrix() * v; } }; namespace internal { // implementation of the generic product rotation * matrix template<typename RotationDerived, typename MatrixType> struct rotation_base_generic_product_selector<RotationDerived,MatrixType,false> { enum { Dim = RotationDerived::Dim }; typedef Matrix<typename RotationDerived::Scalar,Dim,Dim> ReturnType; EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const MatrixType& m) { return r.toRotationMatrix() * m; } }; template<typename RotationDerived, typename Scalar, int Dim, int MaxDim> struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix<Scalar,Dim,MaxDim>, false > { typedef Transform<Scalar,Dim,Affine> ReturnType; EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix<Scalar,Dim,MaxDim>& m) { ReturnType res(r); res.linear() *= m; return res; } }; template<typename RotationDerived,typename OtherVectorType> struct rotation_base_generic_product_selector<RotationDerived,OtherVectorType,true> { enum { Dim = RotationDerived::Dim }; typedef Matrix<typename RotationDerived::Scalar,Dim,1> ReturnType; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v) { return r._transformVector(v); } }; } // end namespace internal /** \geometry_module * * \brief Constructs a Dim x Dim rotation matrix from the rotation \a r */ template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols> template<typename OtherDerived> EIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> ::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) *this = r.toRotationMatrix(); } /** \geometry_module * * \brief Set a Dim x Dim rotation matrix from the rotation \a r */ template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols> template<typename OtherDerived> EIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>& Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> ::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) return *this = r.toRotationMatrix(); } namespace internal { /** \internal * * Helper function to return an arbitrary rotation object to a rotation matrix. * * \tparam Scalar the numeric type of the matrix coefficients * \tparam Dim the dimension of the current space * * It returns a Dim x Dim fixed size matrix. * * Default specializations are provided for: * - any scalar type (2D), * - any matrix expression, * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D) * * Currently toRotationMatrix is only used by Transform. * * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis */ template<typename Scalar, int Dim> EIGEN_DEVICE_FUNC static inline Matrix<Scalar,2,2> toRotationMatrix(const Scalar& s) { EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) return Rotation2D<Scalar>(s).toRotationMatrix(); } template<typename Scalar, int Dim, typename OtherDerived> EIGEN_DEVICE_FUNC static inline Matrix<Scalar,Dim,Dim> toRotationMatrix(const RotationBase<OtherDerived,Dim>& r) { return r.toRotationMatrix(); } template<typename Scalar, int Dim, typename OtherDerived> EIGEN_DEVICE_FUNC static inline const MatrixBase<OtherDerived>& toRotationMatrix(const MatrixBase<OtherDerived>& mat) { EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, YOU_MADE_A_PROGRAMMING_MISTAKE) return mat; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_ROTATIONBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Quaternion.h
.h
32,704
833
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Mathieu Gautier <mathieu.gautier@cea.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_QUATERNION_H #define EIGEN_QUATERNION_H namespace Eigen { /*************************************************************************** * Definition of QuaternionBase<Derived> * The implementation is at the end of the file ***************************************************************************/ namespace internal { template<typename Other, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> struct quaternionbase_assign_impl; } /** \geometry_module \ingroup Geometry_Module * \class QuaternionBase * \brief Base class for quaternion expressions * \tparam Derived derived type (CRTP) * \sa class Quaternion */ template<class Derived> class QuaternionBase : public RotationBase<Derived, 3> { public: typedef RotationBase<Derived, 3> Base; using Base::operator*; using Base::derived; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename internal::traits<Derived>::Coefficients Coefficients; typedef typename Coefficients::CoeffReturnType CoeffReturnType; typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit), Scalar&, CoeffReturnType>::type NonConstCoeffReturnType; enum { Flags = Eigen::internal::traits<Derived>::Flags }; // typedef typename Matrix<Scalar,4,1> Coefficients; /** the type of a 3D vector */ typedef Matrix<Scalar,3,1> Vector3; /** the equivalent rotation matrix type */ typedef Matrix<Scalar,3,3> Matrix3; /** the equivalent angle-axis type */ typedef AngleAxis<Scalar> AngleAxisType; /** \returns the \c x coefficient */ EIGEN_DEVICE_FUNC inline CoeffReturnType x() const { return this->derived().coeffs().coeff(0); } /** \returns the \c y coefficient */ EIGEN_DEVICE_FUNC inline CoeffReturnType y() const { return this->derived().coeffs().coeff(1); } /** \returns the \c z coefficient */ EIGEN_DEVICE_FUNC inline CoeffReturnType z() const { return this->derived().coeffs().coeff(2); } /** \returns the \c w coefficient */ EIGEN_DEVICE_FUNC inline CoeffReturnType w() const { return this->derived().coeffs().coeff(3); } /** \returns a reference to the \c x coefficient (if Derived is a non-const lvalue) */ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType x() { return this->derived().coeffs().x(); } /** \returns a reference to the \c y coefficient (if Derived is a non-const lvalue) */ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType y() { return this->derived().coeffs().y(); } /** \returns a reference to the \c z coefficient (if Derived is a non-const lvalue) */ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType z() { return this->derived().coeffs().z(); } /** \returns a reference to the \c w coefficient (if Derived is a non-const lvalue) */ EIGEN_DEVICE_FUNC inline NonConstCoeffReturnType w() { return this->derived().coeffs().w(); } /** \returns a read-only vector expression of the imaginary part (x,y,z) */ EIGEN_DEVICE_FUNC inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); } /** \returns a vector expression of the imaginary part (x,y,z) */ EIGEN_DEVICE_FUNC inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); } /** \returns a read-only vector expression of the coefficients (x,y,z,w) */ EIGEN_DEVICE_FUNC inline const typename internal::traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); } /** \returns a vector expression of the coefficients (x,y,z,w) */ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase<Derived>& operator=(const QuaternionBase<Derived>& other); template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase<OtherDerived>& other); // disabled this copy operator as it is giving very strange compilation errors when compiling // test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's // useful; however notice that we already have the templated operator= above and e.g. in MatrixBase // we didn't have to add, in addition to templated operator=, such a non-templated copy operator. // Derived& operator=(const QuaternionBase& other) // { return operator=<Derived>(other); } EIGEN_DEVICE_FUNC Derived& operator=(const AngleAxisType& aa); template<class OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const MatrixBase<OtherDerived>& m); /** \returns a quaternion representing an identity rotation * \sa MatrixBase::Identity() */ EIGEN_DEVICE_FUNC static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); } /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity() */ EIGEN_DEVICE_FUNC inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; } /** \returns the squared norm of the quaternion's coefficients * \sa QuaternionBase::norm(), MatrixBase::squaredNorm() */ EIGEN_DEVICE_FUNC inline Scalar squaredNorm() const { return coeffs().squaredNorm(); } /** \returns the norm of the quaternion's coefficients * \sa QuaternionBase::squaredNorm(), MatrixBase::norm() */ EIGEN_DEVICE_FUNC inline Scalar norm() const { return coeffs().norm(); } /** Normalizes the quaternion \c *this * \sa normalized(), MatrixBase::normalize() */ EIGEN_DEVICE_FUNC inline void normalize() { coeffs().normalize(); } /** \returns a normalized copy of \c *this * \sa normalize(), MatrixBase::normalized() */ EIGEN_DEVICE_FUNC inline Quaternion<Scalar> normalized() const { return Quaternion<Scalar>(coeffs().normalized()); } /** \returns the dot product of \c *this and \a other * Geometrically speaking, the dot product of two unit quaternions * corresponds to the cosine of half the angle between the two rotations. * \sa angularDistance() */ template<class OtherDerived> EIGEN_DEVICE_FUNC inline Scalar dot(const QuaternionBase<OtherDerived>& other) const { return coeffs().dot(other.coeffs()); } template<class OtherDerived> EIGEN_DEVICE_FUNC Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const; /** \returns an equivalent 3x3 rotation matrix */ EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix() const; /** \returns the quaternion which transform \a a into \a b through a rotation */ template<typename Derived1, typename Derived2> EIGEN_DEVICE_FUNC Derived& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b); template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion<Scalar> operator* (const QuaternionBase<OtherDerived>& q) const; template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase<OtherDerived>& q); /** \returns the quaternion describing the inverse rotation */ EIGEN_DEVICE_FUNC Quaternion<Scalar> inverse() const; /** \returns the conjugated quaternion */ EIGEN_DEVICE_FUNC Quaternion<Scalar> conjugate() const; template<class OtherDerived> EIGEN_DEVICE_FUNC Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const; /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ template<class OtherDerived> EIGEN_DEVICE_FUNC bool isApprox(const QuaternionBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const { return coeffs().isApprox(other.coeffs(), prec); } /** return the result vector of \a v through the rotation*/ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const; #ifdef EIGEN_PARSED_BY_DOXYGEN /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const; #else template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::enable_if<internal::is_same<Scalar,NewScalarType>::value,const Derived&>::type cast() const { return derived(); } template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::enable_if<!internal::is_same<Scalar,NewScalarType>::value,Quaternion<NewScalarType> >::type cast() const { return Quaternion<NewScalarType>(coeffs().template cast<NewScalarType>()); } #endif #ifdef EIGEN_QUATERNIONBASE_PLUGIN # include EIGEN_QUATERNIONBASE_PLUGIN #endif protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(QuaternionBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(QuaternionBase) }; /*************************************************************************** * Definition/implementation of Quaternion<Scalar> ***************************************************************************/ /** \geometry_module \ingroup Geometry_Module * * \class Quaternion * * \brief The quaternion class used to represent 3D orientations and rotations * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign. * * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of * orientations and rotations of objects in three dimensions. Compared to other representations * like Euler angles or 3x3 matrices, quaternions offer the following advantages: * \li \b compact storage (4 scalars) * \li \b efficient to compose (28 flops), * \li \b stable spherical interpolation * * The following two typedefs are provided for convenience: * \li \c Quaternionf for \c float * \li \c Quaterniond for \c double * * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized. * * \sa class AngleAxis, class Transform */ namespace internal { template<typename _Scalar,int _Options> struct traits<Quaternion<_Scalar,_Options> > { typedef Quaternion<_Scalar,_Options> PlainObject; typedef _Scalar Scalar; typedef Matrix<_Scalar,4,1,_Options> Coefficients; enum{ Alignment = internal::traits<Coefficients>::Alignment, Flags = LvalueBit }; }; } template<typename _Scalar, int _Options> class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> > { public: typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base; enum { NeedsAlignment = internal::traits<Quaternion>::Alignment>0 }; typedef _Scalar Scalar; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion) using Base::operator*=; typedef typename internal::traits<Quaternion>::Coefficients Coefficients; typedef typename Base::AngleAxisType AngleAxisType; /** Default constructor leaving the quaternion uninitialized. */ EIGEN_DEVICE_FUNC inline Quaternion() {} /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from * its four coefficients \a w, \a x, \a y and \a z. * * \warning Note the order of the arguments: the real \a w coefficient first, * while internally the coefficients are stored in the following order: * [\c x, \c y, \c z, \c w] */ EIGEN_DEVICE_FUNC inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){} /** Constructs and initialize a quaternion from the array data */ EIGEN_DEVICE_FUNC explicit inline Quaternion(const Scalar* data) : m_coeffs(data) {} /** Copy constructor */ template<class Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion(const QuaternionBase<Derived>& other) { this->Base::operator=(other); } /** Constructs and initializes a quaternion from the angle-axis \a aa */ EIGEN_DEVICE_FUNC explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; } /** Constructs and initializes a quaternion from either: * - a rotation matrix expression, * - a 4D vector expression representing quaternion coefficients. */ template<typename Derived> EIGEN_DEVICE_FUNC explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; } /** Explicit copy constructor with scalar conversion */ template<typename OtherScalar, int OtherOptions> EIGEN_DEVICE_FUNC explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other) { m_coeffs = other.coeffs().template cast<Scalar>(); } EIGEN_DEVICE_FUNC static Quaternion UnitRandom(); template<typename Derived1, typename Derived2> EIGEN_DEVICE_FUNC static Quaternion FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b); EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs;} EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;} EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(NeedsAlignment)) #ifdef EIGEN_QUATERNION_PLUGIN # include EIGEN_QUATERNION_PLUGIN #endif protected: Coefficients m_coeffs; #ifndef EIGEN_PARSED_BY_DOXYGEN static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif }; /** \ingroup Geometry_Module * single precision quaternion type */ typedef Quaternion<float> Quaternionf; /** \ingroup Geometry_Module * double precision quaternion type */ typedef Quaternion<double> Quaterniond; /*************************************************************************** * Specialization of Map<Quaternion<Scalar>> ***************************************************************************/ namespace internal { template<typename _Scalar, int _Options> struct traits<Map<Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> > { typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients; }; } namespace internal { template<typename _Scalar, int _Options> struct traits<Map<const Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> > { typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients; typedef traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> > TraitsBase; enum { Flags = TraitsBase::Flags & ~LvalueBit }; }; } /** \ingroup Geometry_Module * \brief Quaternion expression mapping a constant memory buffer * * \tparam _Scalar the type of the Quaternion coefficients * \tparam _Options see class Map * * This is a specialization of class Map for Quaternion. This class allows to view * a 4 scalar memory buffer as an Eigen's Quaternion object. * * \sa class Map, class Quaternion, class QuaternionBase */ template<typename _Scalar, int _Options> class Map<const Quaternion<_Scalar>, _Options > : public QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > { public: typedef QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > Base; typedef _Scalar Scalar; typedef typename internal::traits<Map>::Coefficients Coefficients; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {} EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;} protected: const Coefficients m_coeffs; }; /** \ingroup Geometry_Module * \brief Expression of a quaternion from a memory buffer * * \tparam _Scalar the type of the Quaternion coefficients * \tparam _Options see class Map * * This is a specialization of class Map for Quaternion. This class allows to view * a 4 scalar memory buffer as an Eigen's Quaternion object. * * \sa class Map, class Quaternion, class QuaternionBase */ template<typename _Scalar, int _Options> class Map<Quaternion<_Scalar>, _Options > : public QuaternionBase<Map<Quaternion<_Scalar>, _Options> > { public: typedef QuaternionBase<Map<Quaternion<_Scalar>, _Options> > Base; typedef _Scalar Scalar; typedef typename internal::traits<Map>::Coefficients Coefficients; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {} EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; } EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; } protected: Coefficients m_coeffs; }; /** \ingroup Geometry_Module * Map an unaligned array of single precision scalars as a quaternion */ typedef Map<Quaternion<float>, 0> QuaternionMapf; /** \ingroup Geometry_Module * Map an unaligned array of double precision scalars as a quaternion */ typedef Map<Quaternion<double>, 0> QuaternionMapd; /** \ingroup Geometry_Module * Map a 16-byte aligned array of single precision scalars as a quaternion */ typedef Map<Quaternion<float>, Aligned> QuaternionMapAlignedf; /** \ingroup Geometry_Module * Map a 16-byte aligned array of double precision scalars as a quaternion */ typedef Map<Quaternion<double>, Aligned> QuaternionMapAlignedd; /*************************************************************************** * Implementation of QuaternionBase methods ***************************************************************************/ // Generic Quaternion * Quaternion product // This product can be specialized for a given architecture via the Arch template argument. namespace internal { template<int Arch, class Derived1, class Derived2, typename Scalar> struct quat_product { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){ return Quaternion<Scalar> ( a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(), a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(), a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x() ); } }; } /** \returns the concatenation of two rotations as a quaternion-quaternion product */ template <class Derived> template <class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) return internal::quat_product<Architecture::Target, Derived, OtherDerived, typename internal::traits<Derived>::Scalar>::run(*this, other); } /** \sa operator*(Quaternion) */ template <class Derived> template <class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other) { derived() = derived() * other.derived(); return derived(); } /** Rotation of a vector by a quaternion. * \remarks If the quaternion is used to rotate several points (>1) * then it is much more efficient to first convert it to a 3x3 Matrix. * Comparison of the operation cost for n transformations: * - Quaternion2: 30n * - Via a Matrix3: 24 + 15n */ template <class Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3 QuaternionBase<Derived>::_transformVector(const Vector3& v) const { // Note that this algorithm comes from the optimization by hand // of the conversion to a Matrix followed by a Matrix/Vector product. // It appears to be much faster than the common algorithm found // in the literature (30 versus 39 flops). It also requires two // Vector3 as temporaries. Vector3 uv = this->vec().cross(v); uv += uv; return v + this->w() * uv + this->vec().cross(uv); } template<class Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase<Derived>& QuaternionBase<Derived>::operator=(const QuaternionBase<Derived>& other) { coeffs() = other.coeffs(); return derived(); } template<class Derived> template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const QuaternionBase<OtherDerived>& other) { coeffs() = other.coeffs(); return derived(); } /** Set \c *this from an angle-axis \a aa and returns a reference to \c *this */ template<class Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa) { EIGEN_USING_STD_MATH(cos) EIGEN_USING_STD_MATH(sin) Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings this->w() = cos(ha); this->vec() = sin(ha) * aa.axis(); return derived(); } /** Set \c *this from the expression \a xpr: * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix * and \a xpr is converted to a quaternion */ template<class Derived> template<class MatrixDerived> EIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::operator=(const MatrixBase<MatrixDerived>& xpr) { EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename MatrixDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) internal::quaternionbase_assign_impl<MatrixDerived>::run(*this, xpr.derived()); return derived(); } /** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to * be normalized, otherwise the result is undefined. */ template<class Derived> EIGEN_DEVICE_FUNC inline typename QuaternionBase<Derived>::Matrix3 QuaternionBase<Derived>::toRotationMatrix(void) const { // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!) // if not inlined then the cost of the return by value is huge ~ +35%, // however, not inlining this function is an order of magnitude slower, so // it has to be inlined, and so the return by value is not an issue Matrix3 res; const Scalar tx = Scalar(2)*this->x(); const Scalar ty = Scalar(2)*this->y(); const Scalar tz = Scalar(2)*this->z(); const Scalar twx = tx*this->w(); const Scalar twy = ty*this->w(); const Scalar twz = tz*this->w(); const Scalar txx = tx*this->x(); const Scalar txy = ty*this->x(); const Scalar txz = tz*this->x(); const Scalar tyy = ty*this->y(); const Scalar tyz = tz*this->y(); const Scalar tzz = tz*this->z(); res.coeffRef(0,0) = Scalar(1)-(tyy+tzz); res.coeffRef(0,1) = txy-twz; res.coeffRef(0,2) = txz+twy; res.coeffRef(1,0) = txy+twz; res.coeffRef(1,1) = Scalar(1)-(txx+tzz); res.coeffRef(1,2) = tyz-twx; res.coeffRef(2,0) = txz-twy; res.coeffRef(2,1) = tyz+twx; res.coeffRef(2,2) = Scalar(1)-(txx+tyy); return res; } /** Sets \c *this to be a quaternion representing a rotation between * the two arbitrary vectors \a a and \a b. In other words, the built * rotation represent a rotation sending the line of direction \a a * to the line of direction \a b, both lines passing through the origin. * * \returns a reference to \c *this. * * Note that the two input vectors do \b not have to be normalized, and * do not need to have the same norm. */ template<class Derived> template<typename Derived1, typename Derived2> EIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b) { EIGEN_USING_STD_MATH(sqrt) Vector3 v0 = a.normalized(); Vector3 v1 = b.normalized(); Scalar c = v1.dot(v0); // if dot == -1, vectors are nearly opposites // => accurately compute the rotation axis by computing the // intersection of the two planes. This is done by solving: // x^T v0 = 0 // x^T v1 = 0 // under the constraint: // ||x|| = 1 // which yields a singular value problem if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision()) { c = numext::maxi(c,Scalar(-1)); Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose(); JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV); Vector3 axis = svd.matrixV().col(2); Scalar w2 = (Scalar(1)+c)*Scalar(0.5); this->w() = sqrt(w2); this->vec() = axis * sqrt(Scalar(1) - w2); return derived(); } Vector3 axis = v0.cross(v1); Scalar s = sqrt((Scalar(1)+c)*Scalar(2)); Scalar invs = Scalar(1)/s; this->vec() = axis * invs; this->w() = s * Scalar(0.5); return derived(); } /** \returns a random unit quaternion following a uniform distribution law on SO(3) * * \note The implementation is based on http://planning.cs.uiuc.edu/node198.html */ template<typename Scalar, int Options> EIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::UnitRandom() { EIGEN_USING_STD_MATH(sqrt) EIGEN_USING_STD_MATH(sin) EIGEN_USING_STD_MATH(cos) const Scalar u1 = internal::random<Scalar>(0, 1), u2 = internal::random<Scalar>(0, 2*EIGEN_PI), u3 = internal::random<Scalar>(0, 2*EIGEN_PI); const Scalar a = sqrt(1 - u1), b = sqrt(u1); return Quaternion (a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3)); } /** Returns a quaternion representing a rotation between * the two arbitrary vectors \a a and \a b. In other words, the built * rotation represent a rotation sending the line of direction \a a * to the line of direction \a b, both lines passing through the origin. * * \returns resulting quaternion * * Note that the two input vectors do \b not have to be normalized, and * do not need to have the same norm. */ template<typename Scalar, int Options> template<typename Derived1, typename Derived2> EIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b) { Quaternion quat; quat.setFromTwoVectors(a, b); return quat; } /** \returns the multiplicative inverse of \c *this * Note that in most cases, i.e., if you simply want the opposite rotation, * and/or the quaternion is normalized, then it is enough to use the conjugate. * * \sa QuaternionBase::conjugate() */ template <class Derived> EIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::inverse() const { // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? Scalar n2 = this->squaredNorm(); if (n2 > Scalar(0)) return Quaternion<Scalar>(conjugate().coeffs() / n2); else { // return an invalid result to flag the error return Quaternion<Scalar>(Coefficients::Zero()); } } // Generic conjugate of a Quaternion namespace internal { template<int Arch, class Derived, typename Scalar> struct quat_conj { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived>& q){ return Quaternion<Scalar>(q.w(),-q.x(),-q.y(),-q.z()); } }; } /** \returns the conjugate of the \c *this which is equal to the multiplicative inverse * if the quaternion is normalized. * The conjugate of a quaternion represents the opposite rotation. * * \sa Quaternion2::inverse() */ template <class Derived> EIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::conjugate() const { return internal::quat_conj<Architecture::Target, Derived, typename internal::traits<Derived>::Scalar>::run(*this); } /** \returns the angle (in radian) between two rotations * \sa dot() */ template <class Derived> template <class OtherDerived> EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const { EIGEN_USING_STD_MATH(atan2) Quaternion<Scalar> d = (*this) * other.conjugate(); return Scalar(2) * atan2( d.vec().norm(), numext::abs(d.w()) ); } /** \returns the spherical linear interpolation between the two quaternions * \c *this and \a other at the parameter \a t in [0;1]. * * This represents an interpolation for a constant motion between \c *this and \a other, * see also http://en.wikipedia.org/wiki/Slerp. */ template <class Derived> template <class OtherDerived> EIGEN_DEVICE_FUNC Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const { EIGEN_USING_STD_MATH(acos) EIGEN_USING_STD_MATH(sin) const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon(); Scalar d = this->dot(other); Scalar absD = numext::abs(d); Scalar scale0; Scalar scale1; if(absD>=one) { scale0 = Scalar(1) - t; scale1 = t; } else { // theta is the angle between the 2 quaternions Scalar theta = acos(absD); Scalar sinTheta = sin(theta); scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta; scale1 = sin( ( t * theta) ) / sinTheta; } if(d<Scalar(0)) scale1 = -scale1; return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs()); } namespace internal { // set from a rotation matrix template<typename Other> struct quaternionbase_assign_impl<Other,3,3> { typedef typename Other::Scalar Scalar; template<class Derived> EIGEN_DEVICE_FUNC static inline void run(QuaternionBase<Derived>& q, const Other& a_mat) { const typename internal::nested_eval<Other,2>::type mat(a_mat); EIGEN_USING_STD_MATH(sqrt) // This algorithm comes from "Quaternion Calculus and Fast Animation", // Ken Shoemake, 1987 SIGGRAPH course notes Scalar t = mat.trace(); if (t > Scalar(0)) { t = sqrt(t + Scalar(1.0)); q.w() = Scalar(0.5)*t; t = Scalar(0.5)/t; q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t; q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t; q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t; } else { Index i = 0; if (mat.coeff(1,1) > mat.coeff(0,0)) i = 1; if (mat.coeff(2,2) > mat.coeff(i,i)) i = 2; Index j = (i+1)%3; Index k = (j+1)%3; t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); q.coeffs().coeffRef(i) = Scalar(0.5) * t; t = Scalar(0.5)/t; q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t; q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t; q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t; } } }; // set from a vector of coefficients assumed to be a quaternion template<typename Other> struct quaternionbase_assign_impl<Other,4,1> { typedef typename Other::Scalar Scalar; template<class Derived> EIGEN_DEVICE_FUNC static inline void run(QuaternionBase<Derived>& q, const Other& vec) { q.coeffs() = vec; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_QUATERNION_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Scaling.h
.h
6,331
171
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SCALING_H #define EIGEN_SCALING_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class UniformScaling * * \brief Represents a generic uniform scaling transformation * * \tparam _Scalar the scalar type, i.e., the type of the coefficients. * * This class represent a uniform scaling transformation. It is the return * type of Scaling(Scalar), and most of the time this is the only way it * is used. In particular, this class is not aimed to be used to store a scaling transformation, * but rather to make easier the constructions and updates of Transform objects. * * To represent an axis aligned scaling, use the DiagonalMatrix class. * * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform */ template<typename _Scalar> class UniformScaling { public: /** the scalar type of the coefficients */ typedef _Scalar Scalar; protected: Scalar m_factor; public: /** Default constructor without initialization. */ UniformScaling() {} /** Constructs and initialize a uniform scaling transformation */ explicit inline UniformScaling(const Scalar& s) : m_factor(s) {} inline const Scalar& factor() const { return m_factor; } inline Scalar& factor() { return m_factor; } /** Concatenates two uniform scaling */ inline UniformScaling operator* (const UniformScaling& other) const { return UniformScaling(m_factor * other.factor()); } /** Concatenates a uniform scaling and a translation */ template<int Dim> inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const; /** Concatenates a uniform scaling and an affine transformation */ template<int Dim, int Mode, int Options> inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const { Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t; res.prescale(factor()); return res; } /** Concatenates a uniform scaling and a linear transformation matrix */ // TODO returns an expression template<typename Derived> inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const { return other * m_factor; } template<typename Derived,int Dim> inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const { return r.toRotationMatrix() * m_factor; } /** \returns the inverse scaling */ inline UniformScaling inverse() const { return UniformScaling(Scalar(1)/m_factor); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> inline UniformScaling<NewScalarType> cast() const { return UniformScaling<NewScalarType>(NewScalarType(m_factor)); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType> inline explicit UniformScaling(const UniformScaling<OtherScalarType>& other) { m_factor = Scalar(other.factor()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ bool isApprox(const UniformScaling& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const { return internal::isApprox(m_factor, other.factor(), prec); } }; /** \addtogroup Geometry_Module */ //@{ /** Concatenates a linear transformation matrix and a uniform scaling * \relates UniformScaling */ // NOTE this operator is defiend in MatrixBase and not as a friend function // of UniformScaling to fix an internal crash of Intel's ICC template<typename Derived,typename Scalar> EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product) operator*(const MatrixBase<Derived>& matrix, const UniformScaling<Scalar>& s) { return matrix.derived() * s.factor(); } /** Constructs a uniform scaling from scale factor \a s */ inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); } /** Constructs a uniform scaling from scale factor \a s */ inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); } /** Constructs a uniform scaling from scale factor \a s */ template<typename RealScalar> inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s) { return UniformScaling<std::complex<RealScalar> >(s); } /** Constructs a 2D axis aligned scaling */ template<typename Scalar> inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy) { return DiagonalMatrix<Scalar,2>(sx, sy); } /** Constructs a 3D axis aligned scaling */ template<typename Scalar> inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) { return DiagonalMatrix<Scalar,3>(sx, sy, sz); } /** Constructs an axis aligned scaling expression from vector expression \a coeffs * This is an alias for coeffs.asDiagonal() */ template<typename Derived> inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs) { return coeffs.asDiagonal(); } /** \deprecated */ typedef DiagonalMatrix<float, 2> AlignedScaling2f; /** \deprecated */ typedef DiagonalMatrix<double,2> AlignedScaling2d; /** \deprecated */ typedef DiagonalMatrix<float, 3> AlignedScaling3f; /** \deprecated */ typedef DiagonalMatrix<double,3> AlignedScaling3d; //@} template<typename Scalar> template<int Dim> inline Transform<Scalar,Dim,Affine> UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const { Transform<Scalar,Dim,Affine> res; res.matrix().setZero(); res.linear().diagonal().fill(factor()); res.translation() = factor() * t.vector(); res(Dim,Dim) = Scalar(1); return res; } } // end namespace Eigen #endif // EIGEN_SCALING_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/ParametrizedLine.h
.h
8,308
196
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARAMETRIZEDLINE_H #define EIGEN_PARAMETRIZEDLINE_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class ParametrizedLine * * \brief A parametrized line * * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$. * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. */ template <typename _Scalar, int _AmbientDim, int _Options> class ParametrizedLine { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim, Options = _Options }; typedef _Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType; /** Default constructor without initialization */ EIGEN_DEVICE_FUNC inline ParametrizedLine() {} template<int OtherOptions> EIGEN_DEVICE_FUNC ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other) : m_origin(other.origin()), m_direction(other.direction()) {} /** Constructs a dynamic-size line with \a _dim the dimension * of the ambient space */ EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {} /** Initializes a parametrized line of direction \a direction and origin \a origin. * \warning the vector direction is assumed to be normalized. */ EIGEN_DEVICE_FUNC ParametrizedLine(const VectorType& origin, const VectorType& direction) : m_origin(origin), m_direction(direction) {} template <int OtherOptions> EIGEN_DEVICE_FUNC explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane); /** Constructs a parametrized line going from \a p0 to \a p1. */ EIGEN_DEVICE_FUNC static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1) { return ParametrizedLine(p0, (p1-p0).normalized()); } EIGEN_DEVICE_FUNC ~ParametrizedLine() {} /** \returns the dimension in which the line holds */ EIGEN_DEVICE_FUNC inline Index dim() const { return m_direction.size(); } EIGEN_DEVICE_FUNC const VectorType& origin() const { return m_origin; } EIGEN_DEVICE_FUNC VectorType& origin() { return m_origin; } EIGEN_DEVICE_FUNC const VectorType& direction() const { return m_direction; } EIGEN_DEVICE_FUNC VectorType& direction() { return m_direction; } /** \returns the squared distance of a point \a p to its projection onto the line \c *this. * \sa distance() */ EIGEN_DEVICE_FUNC RealScalar squaredDistance(const VectorType& p) const { VectorType diff = p - origin(); return (diff - direction().dot(diff) * direction()).squaredNorm(); } /** \returns the distance of a point \a p to its projection onto the line \c *this. * \sa squaredDistance() */ EIGEN_DEVICE_FUNC RealScalar distance(const VectorType& p) const { EIGEN_USING_STD_MATH(sqrt) return sqrt(squaredDistance(p)); } /** \returns the projection of a point \a p onto the line \c *this. */ EIGEN_DEVICE_FUNC VectorType projection(const VectorType& p) const { return origin() + direction().dot(p-origin()) * direction(); } EIGEN_DEVICE_FUNC VectorType pointAt(const Scalar& t) const; template <int OtherOptions> EIGEN_DEVICE_FUNC Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; template <int OtherOptions> EIGEN_DEVICE_FUNC Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; template <int OtherOptions> EIGEN_DEVICE_FUNC VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<ParametrizedLine, ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const { return typename internal::cast_return_type<ParametrizedLine, ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType,int OtherOptions> EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other) { m_origin = other.origin().template cast<Scalar>(); m_direction = other.direction().template cast<Scalar>(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ EIGEN_DEVICE_FUNC bool isApprox(const ParametrizedLine& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); } protected: VectorType m_origin, m_direction; }; /** Constructs a parametrized line from a 2D hyperplane * * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line */ template <typename _Scalar, int _AmbientDim, int _Options> template <int OtherOptions> EIGEN_DEVICE_FUNC inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) direction() = hyperplane.normal().unitOrthogonal(); origin() = -hyperplane.normal()*hyperplane.offset(); } /** \returns the point at \a t along this line */ template <typename _Scalar, int _AmbientDim, int _Options> EIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const { return origin() + (direction()*t); } /** \returns the parameter value of the intersection between \c *this and the given \a hyperplane */ template <typename _Scalar, int _AmbientDim, int _Options> template <int OtherOptions> EIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return -(hyperplane.offset()+hyperplane.normal().dot(origin())) / hyperplane.normal().dot(direction()); } /** \deprecated use intersectionParameter() * \returns the parameter value of the intersection between \c *this and the given \a hyperplane */ template <typename _Scalar, int _AmbientDim, int _Options> template <int OtherOptions> EIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return intersectionParameter(hyperplane); } /** \returns the point of the intersection between \c *this and the given hyperplane */ template <typename _Scalar, int _AmbientDim, int _Options> template <int OtherOptions> EIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const { return pointAt(intersectionParameter(hyperplane)); } } // end namespace Eigen #endif // EIGEN_PARAMETRIZEDLINE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Umeyama.h
.h
6,190
167
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_UMEYAMA_H #define EIGEN_UMEYAMA_H // This file requires the user to include // * Eigen/Core // * Eigen/LU // * Eigen/SVD // * Eigen/Array namespace Eigen { #ifndef EIGEN_PARSED_BY_DOXYGEN // These helpers are required since it allows to use mixed types as parameters // for the Umeyama. The problem with mixed parameters is that the return type // cannot trivially be deduced when float and double types are mixed. namespace internal { // Compile time return type deduction for different MatrixBase types. // Different means here different alignment and parameters but the same underlying // real scalar type. template<typename MatrixType, typename OtherMatrixType> struct umeyama_transform_matrix_type { enum { MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime), // When possible we want to choose some small fixed size value since the result // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want. HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1 }; typedef Matrix<typename traits<MatrixType>::Scalar, HomogeneousDimension, HomogeneousDimension, AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor), HomogeneousDimension, HomogeneousDimension > type; }; } #endif /** * \geometry_module \ingroup Geometry_Module * * \brief Returns the transformation between two point sets. * * The algorithm is based on: * "Least-squares estimation of transformation parameters between two point patterns", * Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 * * It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that * \f{align*} * \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2 * \f} * is minimized. * * The algorithm is based on the analysis of the covariance matrix * \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$ * of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where * \f$d\f$ is corresponding to the dimension (which is typically small). * The analysis is involving the SVD having a complexity of \f$O(d^3)\f$ * though the actual computational effort lies in the covariance * matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when * the input point sets have dimension \f$d \times m\f$. * * Currently the method is working only for floating point matrices. * * \todo Should the return type of umeyama() become a Transform? * * \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$. * \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$. * \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed. * \return The homogeneous transformation * \f{align*} * T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix} * \f} * minimizing the residual above. This transformation is always returned as an * Eigen::Matrix. */ template <typename Derived, typename OtherDerived> typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true) { typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType; typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) }; typedef Matrix<Scalar, Dimension, 1> VectorType; typedef Matrix<Scalar, Dimension, Dimension> MatrixType; typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType; const Index m = src.rows(); // dimension const Index n = src.cols(); // number of measurements // required for demeaning ... const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n); // computation of mean const VectorType src_mean = src.rowwise().sum() * one_over_n; const VectorType dst_mean = dst.rowwise().sum() * one_over_n; // demeaning of src and dst points const RowMajorMatrixType src_demean = src.colwise() - src_mean; const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean; // Eq. (36)-(37) const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n; // Eq. (38) const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose(); JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV); // Initialize the resulting transformation with an identity matrix... TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1); // Eq. (39) VectorType S = VectorType::Ones(m); if ( svd.matrixU().determinant() * svd.matrixV().determinant() < 0 ) S(m-1) = -1; // Eq. (40) and (43) Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); if (with_scaling) { // Eq. (42) const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); // Eq. (41) Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean; Rt.block(0,0,m,m) *= c; } else { Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean; } return Rt; } } // end namespace Eigen #endif // EIGEN_UMEYAMA_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Transform.h
.h
60,555
1,543
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSFORM_H #define EIGEN_TRANSFORM_H namespace Eigen { namespace internal { template<typename Transform> struct transform_traits { enum { Dim = Transform::Dim, HDim = Transform::HDim, Mode = Transform::Mode, IsProjective = (int(Mode)==int(Projective)) }; }; template< typename TransformType, typename MatrixType, int Case = transform_traits<TransformType>::IsProjective ? 0 : int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1 : 2, int RhsCols = MatrixType::ColsAtCompileTime> struct transform_right_product_impl; template< typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> struct transform_left_product_impl; template< typename Lhs, typename Rhs, bool AnyProjective = transform_traits<Lhs>::IsProjective || transform_traits<Rhs>::IsProjective> struct transform_transform_product_impl; template< typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows=Other::RowsAtCompileTime, int OtherCols=Other::ColsAtCompileTime> struct transform_construct_from_matrix; template<typename TransformType> struct transform_take_affine_part; template<typename _Scalar, int _Dim, int _Mode, int _Options> struct traits<Transform<_Scalar,_Dim,_Mode,_Options> > { typedef _Scalar Scalar; typedef Eigen::Index StorageIndex; typedef Dense StorageKind; enum { Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1, RowsAtCompileTime = _Mode==Projective ? Dim1 : _Dim, ColsAtCompileTime = Dim1, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, Flags = 0 }; }; template<int Mode> struct transform_make_affine; } // end namespace internal /** \geometry_module \ingroup Geometry_Module * * \class Transform * * \brief Represents an homogeneous transformation in a N dimensional space * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _Dim the dimension of the space * \tparam _Mode the type of the transformation. Can be: * - #Affine: the transformation is stored as a (Dim+1)^2 matrix, * where the last row is assumed to be [0 ... 0 1]. * - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix. * - #Projective: the transformation is stored as a (Dim+1)^2 matrix * without any assumption. * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor. * These Options are passed directly to the underlying matrix type. * * The homography is internally represented and stored by a matrix which * is available through the matrix() method. To understand the behavior of * this class you have to think a Transform object as its internal * matrix representation. The chosen convention is right multiply: * * \code v' = T * v \endcode * * Therefore, an affine transformation matrix M is shaped like this: * * \f$ \left( \begin{array}{cc} * linear & translation\\ * 0 ... 0 & 1 * \end{array} \right) \f$ * * Note that for a projective transformation the last row can be anything, * and then the interpretation of different parts might be sightly different. * * However, unlike a plain matrix, the Transform class provides many features * simplifying both its assembly and usage. In particular, it can be composed * with any other transformations (Transform,Translation,RotationBase,DiagonalMatrix) * and can be directly used to transform implicit homogeneous vectors. All these * operations are handled via the operator*. For the composition of transformations, * its principle consists to first convert the right/left hand sides of the product * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product. * Of course, internally, operator* tries to perform the minimal number of operations * according to the nature of each terms. Likewise, when applying the transform * to points, the latters are automatically promoted to homogeneous vectors * before doing the matrix product. The conventions to homogeneous representations * are performed as follow: * * \b Translation t (Dim)x(1): * \f$ \left( \begin{array}{cc} * I & t \\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Rotation R (Dim)x(Dim): * \f$ \left( \begin{array}{cc} * R & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ *<!-- * \b Linear \b Matrix L (Dim)x(Dim): * \f$ \left( \begin{array}{cc} * L & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Affine \b Matrix A (Dim)x(Dim+1): * \f$ \left( \begin{array}{c} * A\\ * 0\,...\,0\,1 * \end{array} \right) \f$ *--> * \b Scaling \b DiagonalMatrix S (Dim)x(Dim): * \f$ \left( \begin{array}{cc} * S & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ * * \b Column \b point v (Dim)x(1): * \f$ \left( \begin{array}{c} * v\\ * 1 * \end{array} \right) \f$ * * \b Set \b of \b column \b points V1...Vn (Dim)x(n): * \f$ \left( \begin{array}{ccc} * v_1 & ... & v_n\\ * 1 & ... & 1 * \end{array} \right) \f$ * * The concatenation of a Transform object with any kind of other transformation * always returns a Transform object. * * A little exception to the "as pure matrix product" rule is the case of the * transformation of non homogeneous vectors by an affine transformation. In * that case the last matrix row can be ignored, and the product returns non * homogeneous vectors. * * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation, * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix. * The solution is either to use a Dim x Dynamic matrix or explicitly request a * vector transformation by making the vector homogeneous: * \code * m' = T * m.colwise().homogeneous(); * \endcode * Note that there is zero overhead. * * Conversion methods from/to Qt's QMatrix and QTransform are available if the * preprocessor token EIGEN_QT_SUPPORT is defined. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN. * * \sa class Matrix, class Quaternion */ template<typename _Scalar, int _Dim, int _Mode, int _Options> class Transform { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1)) enum { Mode = _Mode, Options = _Options, Dim = _Dim, ///< space dimension in which the transformation holds HDim = _Dim+1, ///< size of a respective homogeneous vector Rows = int(Mode)==(AffineCompact) ? Dim : HDim }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Eigen::Index StorageIndex; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 /** type of the matrix used to represent the transformation */ typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType; /** constified MatrixType */ typedef const MatrixType ConstMatrixType; /** type of the matrix used to represent the linear part of the transformation */ typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType; /** type of read/write reference to the linear part of the transformation */ typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart; /** type of read reference to the linear part of the transformation */ typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart; /** type of read/write reference to the affine part of the transformation */ typedef typename internal::conditional<int(Mode)==int(AffineCompact), MatrixType&, Block<MatrixType,Dim,HDim> >::type AffinePart; /** type of read reference to the affine part of the transformation */ typedef typename internal::conditional<int(Mode)==int(AffineCompact), const MatrixType&, const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart; /** type of a vector */ typedef Matrix<Scalar,Dim,1> VectorType; /** type of a read/write reference to the translation part of the rotation */ typedef Block<MatrixType,Dim,1,!(internal::traits<MatrixType>::Flags & RowMajorBit)> TranslationPart; /** type of a read reference to the translation part of the rotation */ typedef const Block<ConstMatrixType,Dim,1,!(internal::traits<MatrixType>::Flags & RowMajorBit)> ConstTranslationPart; /** corresponding translation type */ typedef Translation<Scalar,Dim> TranslationType; // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0 enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) }; /** The return type of the product between a diagonal matrix and a transform */ typedef Transform<Scalar,Dim,TransformTimeDiagonalMode> TransformTimeDiagonalReturnType; protected: MatrixType m_matrix; public: /** Default constructor without initialization of the meaningful coefficients. * If Mode==Affine or Mode==Isometry, then the last row is set to [0 ... 0 1] */ EIGEN_DEVICE_FUNC inline Transform() { check_template_params(); internal::transform_make_affine<(int(Mode)==Affine || int(Mode)==Isometry) ? Affine : AffineCompact>::run(m_matrix); } EIGEN_DEVICE_FUNC inline Transform(const Transform& other) { check_template_params(); m_matrix = other.m_matrix; } EIGEN_DEVICE_FUNC inline explicit Transform(const TranslationType& t) { check_template_params(); *this = t; } EIGEN_DEVICE_FUNC inline explicit Transform(const UniformScaling<Scalar>& s) { check_template_params(); *this = s; } template<typename Derived> EIGEN_DEVICE_FUNC inline explicit Transform(const RotationBase<Derived, Dim>& r) { check_template_params(); *this = r; } EIGEN_DEVICE_FUNC inline Transform& operator=(const Transform& other) { m_matrix = other.m_matrix; return *this; } typedef internal::transform_take_affine_part<Transform> take_affine_part; /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline explicit Transform(const EigenBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); check_template_params(); internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived()); } /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline Transform& operator=(const EigenBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY); internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived()); return *this; } template<int OtherOptions> EIGEN_DEVICE_FUNC inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other) { check_template_params(); // only the options change, we can directly copy the matrices m_matrix = other.matrix(); } template<int OtherMode,int OtherOptions> EIGEN_DEVICE_FUNC inline Transform(const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) { check_template_params(); // prevent conversions as: // Affine | AffineCompact | Isometry = Projective EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)), YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) // prevent conversions as: // Isometry = Affine | AffineCompact EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)), YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION) enum { ModeIsAffineCompact = Mode == int(AffineCompact), OtherModeIsAffineCompact = OtherMode == int(AffineCompact) }; if(ModeIsAffineCompact == OtherModeIsAffineCompact) { // We need the block expression because the code is compiled for all // combinations of transformations and will trigger a compile time error // if one tries to assign the matrices directly m_matrix.template block<Dim,Dim+1>(0,0) = other.matrix().template block<Dim,Dim+1>(0,0); makeAffine(); } else if(OtherModeIsAffineCompact) { typedef typename Transform<Scalar,Dim,OtherMode,OtherOptions>::MatrixType OtherMatrixType; internal::transform_construct_from_matrix<OtherMatrixType,Mode,Options,Dim,HDim>::run(this, other.matrix()); } else { // here we know that Mode == AffineCompact and OtherMode != AffineCompact. // if OtherMode were Projective, the static assert above would already have caught it. // So the only possibility is that OtherMode == Affine linear() = other.linear(); translation() = other.translation(); } } template<typename OtherDerived> EIGEN_DEVICE_FUNC Transform(const ReturnByValue<OtherDerived>& other) { check_template_params(); other.evalTo(*this); } template<typename OtherDerived> EIGEN_DEVICE_FUNC Transform& operator=(const ReturnByValue<OtherDerived>& other) { other.evalTo(*this); return *this; } #ifdef EIGEN_QT_SUPPORT inline Transform(const QMatrix& other); inline Transform& operator=(const QMatrix& other); inline QMatrix toQMatrix(void) const; inline Transform(const QTransform& other); inline Transform& operator=(const QTransform& other); inline QTransform toQTransform(void) const; #endif EIGEN_DEVICE_FUNC Index rows() const { return int(Mode)==int(Projective) ? m_matrix.cols() : (m_matrix.cols()-1); } EIGEN_DEVICE_FUNC Index cols() const { return m_matrix.cols(); } /** shortcut for m_matrix(row,col); * \sa MatrixBase::operator(Index,Index) const */ EIGEN_DEVICE_FUNC inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); } /** shortcut for m_matrix(row,col); * \sa MatrixBase::operator(Index,Index) */ EIGEN_DEVICE_FUNC inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); } /** \returns a read-only expression of the transformation matrix */ EIGEN_DEVICE_FUNC inline const MatrixType& matrix() const { return m_matrix; } /** \returns a writable expression of the transformation matrix */ EIGEN_DEVICE_FUNC inline MatrixType& matrix() { return m_matrix; } /** \returns a read-only expression of the linear part of the transformation */ EIGEN_DEVICE_FUNC inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); } /** \returns a writable expression of the linear part of the transformation */ EIGEN_DEVICE_FUNC inline LinearPart linear() { return LinearPart(m_matrix,0,0); } /** \returns a read-only expression of the Dim x HDim affine part of the transformation */ EIGEN_DEVICE_FUNC inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); } /** \returns a writable expression of the Dim x HDim affine part of the transformation */ EIGEN_DEVICE_FUNC inline AffinePart affine() { return take_affine_part::run(m_matrix); } /** \returns a read-only expression of the translation vector of the transformation */ EIGEN_DEVICE_FUNC inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); } /** \returns a writable expression of the translation vector of the transformation */ EIGEN_DEVICE_FUNC inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); } /** \returns an expression of the product between the transform \c *this and a matrix expression \a other. * * The right-hand-side \a other can be either: * \li an homogeneous vector of size Dim+1, * \li a set of homogeneous vectors of size Dim+1 x N, * \li a transformation matrix of size Dim+1 x Dim+1. * * Moreover, if \c *this represents an affine transformation (i.e., Mode!=Projective), then \a other can also be: * \li a point of size Dim (computes: \code this->linear() * other + this->translation()\endcode), * \li a set of N points as a Dim x N matrix (computes: \code (this->linear() * other).colwise() + this->translation()\endcode), * * In all cases, the return type is a matrix or vector of same sizes as the right-hand-side \a other. * * If you want to interpret \a other as a linear or affine transformation, then first convert it to a Transform<> type, * or do your own cooking. * * Finally, if you want to apply Affine transformations to vectors, then explicitly apply the linear part only: * \code * Affine3f A; * Vector3f v1, v2; * v2 = A.linear() * v1; * \endcode * */ // note: this function is defined here because some compilers cannot find the respective declaration template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType operator * (const EigenBase<OtherDerived> &other) const { return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); } /** \returns the product expression of a transformation matrix \a a times a transform \a b * * The left hand side \a other can be either: * \li a linear transformation matrix of size Dim x Dim, * \li an affine transformation matrix of size Dim x Dim+1, * \li a general transformation matrix of size Dim+1 x Dim+1. */ template<typename OtherDerived> friend EIGEN_DEVICE_FUNC inline const typename internal::transform_left_product_impl<OtherDerived,Mode,Options,_Dim,_Dim+1>::ResultType operator * (const EigenBase<OtherDerived> &a, const Transform &b) { return internal::transform_left_product_impl<OtherDerived,Mode,Options,Dim,HDim>::run(a.derived(),b); } /** \returns The product expression of a transform \a a times a diagonal matrix \a b * * The rhs diagonal matrix is interpreted as an affine scaling transformation. The * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ template<typename DiagonalDerived> EIGEN_DEVICE_FUNC inline const TransformTimeDiagonalReturnType operator * (const DiagonalBase<DiagonalDerived> &b) const { TransformTimeDiagonalReturnType res(*this); res.linearExt() *= b; return res; } /** \returns The product expression of a diagonal matrix \a a times a transform \a b * * The lhs diagonal matrix is interpreted as an affine scaling transformation. The * product results in a Transform of the same type (mode) as the lhs only if the lhs * mode is no isometry. In that case, the returned transform is an affinity. */ template<typename DiagonalDerived> EIGEN_DEVICE_FUNC friend inline TransformTimeDiagonalReturnType operator * (const DiagonalBase<DiagonalDerived> &a, const Transform &b) { TransformTimeDiagonalReturnType res; res.linear().noalias() = a*b.linear(); res.translation().noalias() = a*b.translation(); if (Mode!=int(AffineCompact)) res.matrix().row(Dim) = b.matrix().row(Dim); return res; } template<typename OtherDerived> EIGEN_DEVICE_FUNC inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; } /** Concatenates two transformations */ EIGEN_DEVICE_FUNC inline const Transform operator * (const Transform& other) const { return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other); } #if EIGEN_COMP_ICC private: // this intermediate structure permits to workaround a bug in ICC 11: // error: template instantiation resulted in unexpected function type of "Eigen::Transform<double, 3, 32, 0> // (const Eigen::Transform<double, 3, 2, 0> &) const" // (the meaning of a name may have changed since the template declaration -- the type of the template is: // "Eigen::internal::transform_transform_product_impl<Eigen::Transform<double, 3, 32, 0>, // Eigen::Transform<double, 3, Mode, Options>, <expression>>::ResultType (const Eigen::Transform<double, 3, Mode, Options> &) const") // template<int OtherMode,int OtherOptions> struct icc_11_workaround { typedef internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> > ProductType; typedef typename ProductType::ResultType ResultType; }; public: /** Concatenates two different transformations */ template<int OtherMode,int OtherOptions> inline typename icc_11_workaround<OtherMode,OtherOptions>::ResultType operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const { typedef typename icc_11_workaround<OtherMode,OtherOptions>::ProductType ProductType; return ProductType::run(*this,other); } #else /** Concatenates two different transformations */ template<int OtherMode,int OtherOptions> EIGEN_DEVICE_FUNC inline typename internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::ResultType operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const { return internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::run(*this,other); } #endif /** \sa MatrixBase::setIdentity() */ EIGEN_DEVICE_FUNC void setIdentity() { m_matrix.setIdentity(); } /** * \brief Returns an identity transformation. * \todo In the future this function should be returning a Transform expression. */ EIGEN_DEVICE_FUNC static const Transform Identity() { return Transform(MatrixType::Identity()); } template<typename OtherDerived> EIGEN_DEVICE_FUNC inline Transform& scale(const MatrixBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC inline Transform& prescale(const MatrixBase<OtherDerived> &other); EIGEN_DEVICE_FUNC inline Transform& scale(const Scalar& s); EIGEN_DEVICE_FUNC inline Transform& prescale(const Scalar& s); template<typename OtherDerived> EIGEN_DEVICE_FUNC inline Transform& translate(const MatrixBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC inline Transform& pretranslate(const MatrixBase<OtherDerived> &other); template<typename RotationType> EIGEN_DEVICE_FUNC inline Transform& rotate(const RotationType& rotation); template<typename RotationType> EIGEN_DEVICE_FUNC inline Transform& prerotate(const RotationType& rotation); EIGEN_DEVICE_FUNC Transform& shear(const Scalar& sx, const Scalar& sy); EIGEN_DEVICE_FUNC Transform& preshear(const Scalar& sx, const Scalar& sy); EIGEN_DEVICE_FUNC inline Transform& operator=(const TranslationType& t); EIGEN_DEVICE_FUNC inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); } EIGEN_DEVICE_FUNC inline Transform operator*(const TranslationType& t) const; EIGEN_DEVICE_FUNC inline Transform& operator=(const UniformScaling<Scalar>& t); EIGEN_DEVICE_FUNC inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); } EIGEN_DEVICE_FUNC inline TransformTimeDiagonalReturnType operator*(const UniformScaling<Scalar>& s) const { TransformTimeDiagonalReturnType res = *this; res.scale(s.factor()); return res; } EIGEN_DEVICE_FUNC inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linearExt() *= s; return *this; } template<typename Derived> EIGEN_DEVICE_FUNC inline Transform& operator=(const RotationBase<Derived,Dim>& r); template<typename Derived> EIGEN_DEVICE_FUNC inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); } template<typename Derived> EIGEN_DEVICE_FUNC inline Transform operator*(const RotationBase<Derived,Dim>& r) const; EIGEN_DEVICE_FUNC const LinearMatrixType rotation() const; template<typename RotationMatrixType, typename ScalingMatrixType> EIGEN_DEVICE_FUNC void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const; template<typename ScalingMatrixType, typename RotationMatrixType> EIGEN_DEVICE_FUNC void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const; template<typename PositionDerived, typename OrientationType, typename ScaleDerived> EIGEN_DEVICE_FUNC Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position, const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale); EIGEN_DEVICE_FUNC inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const; /** \returns a const pointer to the column major internal matrix */ EIGEN_DEVICE_FUNC const Scalar* data() const { return m_matrix.data(); } /** \returns a non-const pointer to the column major internal matrix */ EIGEN_DEVICE_FUNC Scalar* data() { return m_matrix.data(); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type cast() const { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type(*this); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType> EIGEN_DEVICE_FUNC inline explicit Transform(const Transform<OtherScalarType,Dim,Mode,Options>& other) { check_template_params(); m_matrix = other.matrix().template cast<Scalar>(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ EIGEN_DEVICE_FUNC bool isApprox(const Transform& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const { return m_matrix.isApprox(other.m_matrix, prec); } /** Sets the last row to [0 ... 0 1] */ EIGEN_DEVICE_FUNC void makeAffine() { internal::transform_make_affine<int(Mode)>::run(m_matrix); } /** \internal * \returns the Dim x Dim linear part if the transformation is affine, * and the HDim x Dim part for projective transformations. */ EIGEN_DEVICE_FUNC inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); } /** \internal * \returns the Dim x Dim linear part if the transformation is affine, * and the HDim x Dim part for projective transformations. */ EIGEN_DEVICE_FUNC inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() const { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); } /** \internal * \returns the translation part if the transformation is affine, * and the last column for projective transformations. */ EIGEN_DEVICE_FUNC inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); } /** \internal * \returns the translation part if the transformation is affine, * and the last column for projective transformations. */ EIGEN_DEVICE_FUNC inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() const { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); } #ifdef EIGEN_TRANSFORM_PLUGIN #include EIGEN_TRANSFORM_PLUGIN #endif protected: #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void check_template_params() { EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS) } #endif }; /** \ingroup Geometry_Module */ typedef Transform<float,2,Isometry> Isometry2f; /** \ingroup Geometry_Module */ typedef Transform<float,3,Isometry> Isometry3f; /** \ingroup Geometry_Module */ typedef Transform<double,2,Isometry> Isometry2d; /** \ingroup Geometry_Module */ typedef Transform<double,3,Isometry> Isometry3d; /** \ingroup Geometry_Module */ typedef Transform<float,2,Affine> Affine2f; /** \ingroup Geometry_Module */ typedef Transform<float,3,Affine> Affine3f; /** \ingroup Geometry_Module */ typedef Transform<double,2,Affine> Affine2d; /** \ingroup Geometry_Module */ typedef Transform<double,3,Affine> Affine3d; /** \ingroup Geometry_Module */ typedef Transform<float,2,AffineCompact> AffineCompact2f; /** \ingroup Geometry_Module */ typedef Transform<float,3,AffineCompact> AffineCompact3f; /** \ingroup Geometry_Module */ typedef Transform<double,2,AffineCompact> AffineCompact2d; /** \ingroup Geometry_Module */ typedef Transform<double,3,AffineCompact> AffineCompact3d; /** \ingroup Geometry_Module */ typedef Transform<float,2,Projective> Projective2f; /** \ingroup Geometry_Module */ typedef Transform<float,3,Projective> Projective3f; /** \ingroup Geometry_Module */ typedef Transform<double,2,Projective> Projective2d; /** \ingroup Geometry_Module */ typedef Transform<double,3,Projective> Projective3d; /************************** *** Optional QT support *** **************************/ #ifdef EIGEN_QT_SUPPORT /** Initializes \c *this from a QMatrix assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template<typename Scalar, int Dim, int Mode,int Options> Transform<Scalar,Dim,Mode,Options>::Transform(const QMatrix& other) { check_template_params(); *this = other; } /** Set \c *this from a QMatrix assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template<typename Scalar, int Dim, int Mode,int Options> Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other) { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) if (Mode == int(AffineCompact)) m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(); else m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(), 0, 0, 1; return *this; } /** \returns a QMatrix from \c *this assuming the dimension is 2. * * \warning this conversion might loss data if \c *this is not affine * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template<typename Scalar, int Dim, int Mode, int Options> QMatrix Transform<Scalar,Dim,Mode,Options>::toQMatrix(void) const { check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2)); } /** Initializes \c *this from a QTransform assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template<typename Scalar, int Dim, int Mode,int Options> Transform<Scalar,Dim,Mode,Options>::Transform(const QTransform& other) { check_template_params(); *this = other; } /** Set \c *this from a QTransform assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template<typename Scalar, int Dim, int Mode, int Options> Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QTransform& other) { check_template_params(); EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) if (Mode == int(AffineCompact)) m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(); else m_matrix << other.m11(), other.m21(), other.dx(), other.m12(), other.m22(), other.dy(), other.m13(), other.m23(), other.m33(); return *this; } /** \returns a QTransform from \c *this assuming the dimension is 2. * * This function is available only if the token EIGEN_QT_SUPPORT is defined. */ template<typename Scalar, int Dim, int Mode, int Options> QTransform Transform<Scalar,Dim,Mode,Options>::toQTransform(void) const { EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) if (Mode == int(AffineCompact)) return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2)); else return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0), m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1), m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2)); } #endif /********************* *** Procedural API *** *********************/ /** Applies on the right the non uniform scale transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \sa prescale() */ template<typename Scalar, int Dim, int Mode, int Options> template<typename OtherDerived> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) linearExt().noalias() = (linearExt() * other.asDiagonal()); return *this; } /** Applies on the right a uniform scale of a factor \a c to \c *this * and returns a reference to \c *this. * \sa prescale(Scalar) */ template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const Scalar& s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) linearExt() *= s; return *this; } /** Applies on the left the non uniform scale transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \sa scale() */ template<typename Scalar, int Dim, int Mode, int Options> template<typename OtherDerived> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) affine().noalias() = (other.asDiagonal() * affine()); return *this; } /** Applies on the left a uniform scale of a factor \a c to \c *this * and returns a reference to \c *this. * \sa scale(Scalar) */ template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const Scalar& s) { EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template topRows<Dim>() *= s; return *this; } /** Applies on the right the translation matrix represented by the vector \a other * to \c *this and returns a reference to \c *this. * \sa pretranslate() */ template<typename Scalar, int Dim, int Mode, int Options> template<typename OtherDerived> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::translate(const MatrixBase<OtherDerived> &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) translationExt() += linearExt() * other; return *this; } /** Applies on the left the translation matrix represented by the vector \a other * to \c *this and returns a reference to \c *this. * \sa translate() */ template<typename Scalar, int Dim, int Mode, int Options> template<typename OtherDerived> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::pretranslate(const MatrixBase<OtherDerived> &other) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) if(int(Mode)==int(Projective)) affine() += other * m_matrix.row(Dim); else translation() += other; return *this; } /** Applies on the right the rotation represented by the rotation \a rotation * to \c *this and returns a reference to \c *this. * * The template parameter \a RotationType is the type of the rotation which * must be known by internal::toRotationMatrix<>. * * Natively supported types includes: * - any scalar (2D), * - a Dim x Dim matrix expression, * - a Quaternion (3D), * - a AngleAxis (3D) * * This mechanism is easily extendable to support user types such as Euler angles, * or a pair of Quaternion for 4D rotations. * * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType) */ template<typename Scalar, int Dim, int Mode, int Options> template<typename RotationType> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::rotate(const RotationType& rotation) { linearExt() *= internal::toRotationMatrix<Scalar,Dim>(rotation); return *this; } /** Applies on the left the rotation represented by the rotation \a rotation * to \c *this and returns a reference to \c *this. * * See rotate() for further details. * * \sa rotate() */ template<typename Scalar, int Dim, int Mode, int Options> template<typename RotationType> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation) { m_matrix.template block<Dim,HDim>(0,0) = internal::toRotationMatrix<Scalar,Dim>(rotation) * m_matrix.template block<Dim,HDim>(0,0); return *this; } /** Applies on the right the shear transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \warning 2D only. * \sa preshear() */ template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::shear(const Scalar& sx, const Scalar& sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) VectorType tmp = linear().col(0)*sy + linear().col(1); linear() << linear().col(0) + linear().col(1)*sx, tmp; return *this; } /** Applies on the left the shear transformation represented * by the vector \a other to \c *this and returns a reference to \c *this. * \warning 2D only. * \sa shear() */ template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::preshear(const Scalar& sx, const Scalar& sy) { EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS) m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0); return *this; } /****************************************************** *** Scaling, Translation and Rotation compatibility *** ******************************************************/ template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const TranslationType& t) { linear().setIdentity(); translation() = t.vector(); makeAffine(); return *this; } template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const TranslationType& t) const { Transform res = *this; res.translate(t.vector()); return res; } template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const UniformScaling<Scalar>& s) { m_matrix.setZero(); linear().diagonal().fill(s.factor()); makeAffine(); return *this; } template<typename Scalar, int Dim, int Mode, int Options> template<typename Derived> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const RotationBase<Derived,Dim>& r) { linear() = internal::toRotationMatrix<Scalar,Dim>(r); translation().setZero(); makeAffine(); return *this; } template<typename Scalar, int Dim, int Mode, int Options> template<typename Derived> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const RotationBase<Derived,Dim>& r) const { Transform res = *this; res.rotate(r.derived()); return res; } /************************ *** Special functions *** ************************/ /** \returns the rotation part of the transformation * * * \svd_module * * \sa computeRotationScaling(), computeScalingRotation(), class SVD */ template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType Transform<Scalar,Dim,Mode,Options>::rotation() const { LinearMatrixType result; computeRotationScaling(&result, (LinearMatrixType*)0); return result; } /** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being * not necessarily positive. * * If either pointer is zero, the corresponding computation is skipped. * * * * \svd_module * * \sa computeScalingRotation(), rotation(), class SVD */ template<typename Scalar, int Dim, int Mode, int Options> template<typename RotationMatrixType, typename ScalingMatrixType> EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const { JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 VectorType sv(svd.singularValues()); sv.coeffRef(0) *= x; if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint()); if(rotation) { LinearMatrixType m(svd.matrixU()); m.col(0) /= x; rotation->lazyAssign(m * svd.matrixV().adjoint()); } } /** decomposes the linear part of the transformation as a product scaling x rotation, the scaling being * not necessarily positive. * * If either pointer is zero, the corresponding computation is skipped. * * * * \svd_module * * \sa computeRotationScaling(), rotation(), class SVD */ template<typename Scalar, int Dim, int Mode, int Options> template<typename ScalingMatrixType, typename RotationMatrixType> EIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const { JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV); Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1 VectorType sv(svd.singularValues()); sv.coeffRef(0) *= x; if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint()); if(rotation) { LinearMatrixType m(svd.matrixU()); m.col(0) /= x; rotation->lazyAssign(m * svd.matrixV().adjoint()); } } /** Convenient method to set \c *this from a position, orientation and scale * of a 3D object. */ template<typename Scalar, int Dim, int Mode, int Options> template<typename PositionDerived, typename OrientationType, typename ScaleDerived> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position, const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale) { linear() = internal::toRotationMatrix<Scalar,Dim>(orientation); linear() *= scale.asDiagonal(); translation() = position; makeAffine(); return *this; } namespace internal { template<int Mode> struct transform_make_affine { template<typename MatrixType> EIGEN_DEVICE_FUNC static void run(MatrixType &mat) { static const int Dim = MatrixType::ColsAtCompileTime-1; mat.template block<1,Dim>(Dim,0).setZero(); mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1); } }; template<> struct transform_make_affine<AffineCompact> { template<typename MatrixType> EIGEN_DEVICE_FUNC static void run(MatrixType &) { } }; // selector needed to avoid taking the inverse of a 3x4 matrix template<typename TransformType, int Mode=TransformType::Mode> struct projective_transform_inverse { EIGEN_DEVICE_FUNC static inline void run(const TransformType&, TransformType&) {} }; template<typename TransformType> struct projective_transform_inverse<TransformType, Projective> { EIGEN_DEVICE_FUNC static inline void run(const TransformType& m, TransformType& res) { res.matrix() = m.matrix().inverse(); } }; } // end namespace internal /** * * \returns the inverse transformation according to some given knowledge * on \c *this. * * \param hint allows to optimize the inversion process when the transformation * is known to be not a general transformation (optional). The possible values are: * - #Projective if the transformation is not necessarily affine, i.e., if the * last row is not guaranteed to be [0 ... 0 1] * - #Affine if the last row can be assumed to be [0 ... 0 1] * - #Isometry if the transformation is only a concatenations of translations * and rotations. * The default is the template class parameter \c Mode. * * \warning unless \a traits is always set to NoShear or NoScaling, this function * requires the generic inverse method of MatrixBase defined in the LU module. If * you forget to include this module, then you will get hard to debug linking errors. * * \sa MatrixBase::inverse() */ template<typename Scalar, int Dim, int Mode, int Options> EIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::inverse(TransformTraits hint) const { Transform res; if (hint == Projective) { internal::projective_transform_inverse<Transform>::run(*this, res); } else { if (hint == Isometry) { res.matrix().template topLeftCorner<Dim,Dim>() = linear().transpose(); } else if(hint&Affine) { res.matrix().template topLeftCorner<Dim,Dim>() = linear().inverse(); } else { eigen_assert(false && "Invalid transform traits in Transform::Inverse"); } // translation and remaining parts res.matrix().template topRightCorner<Dim,1>() = - res.matrix().template topLeftCorner<Dim,Dim>() * translation(); res.makeAffine(); // we do need this, because in the beginning res is uninitialized } return res; } namespace internal { /***************************************************** *** Specializations of take affine part *** *****************************************************/ template<typename TransformType> struct transform_take_affine_part { typedef typename TransformType::MatrixType MatrixType; typedef typename TransformType::AffinePart AffinePart; typedef typename TransformType::ConstAffinePart ConstAffinePart; static inline AffinePart run(MatrixType& m) { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); } static inline ConstAffinePart run(const MatrixType& m) { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); } }; template<typename Scalar, int Dim, int Options> struct transform_take_affine_part<Transform<Scalar,Dim,AffineCompact, Options> > { typedef typename Transform<Scalar,Dim,AffineCompact,Options>::MatrixType MatrixType; static inline MatrixType& run(MatrixType& m) { return m; } static inline const MatrixType& run(const MatrixType& m) { return m; } }; /***************************************************** *** Specializations of construct from matrix *** *****************************************************/ template<typename Other, int Mode, int Options, int Dim, int HDim> struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,Dim> { static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other) { transform->linear() = other; transform->translation().setZero(); transform->makeAffine(); } }; template<typename Other, int Mode, int Options, int Dim, int HDim> struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,HDim> { static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other) { transform->affine() = other; transform->makeAffine(); } }; template<typename Other, int Mode, int Options, int Dim, int HDim> struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, HDim,HDim> { static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other) { transform->matrix() = other; } }; template<typename Other, int Options, int Dim, int HDim> struct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HDim,HDim> { static inline void run(Transform<typename Other::Scalar,Dim,AffineCompact,Options> *transform, const Other& other) { transform->matrix() = other.template block<Dim,HDim>(0,0); } }; /********************************************************** *** Specializations of operator* with rhs EigenBase *** **********************************************************/ template<int LhsMode,int RhsMode> struct transform_product_result { enum { Mode = (LhsMode == (int)Projective || RhsMode == (int)Projective ) ? Projective : (LhsMode == (int)Affine || RhsMode == (int)Affine ) ? Affine : (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact : (LhsMode == (int)Isometry || RhsMode == (int)Isometry ) ? Isometry : Projective }; }; template< typename TransformType, typename MatrixType, int RhsCols> struct transform_right_product_impl< TransformType, MatrixType, 0, RhsCols> { typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { return T.matrix() * other; } }; template< typename TransformType, typename MatrixType, int RhsCols> struct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols> { enum { Dim = TransformType::Dim, HDim = TransformType::HDim, OtherRows = MatrixType::RowsAtCompileTime, OtherCols = MatrixType::ColsAtCompileTime }; typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); typedef Block<ResultType, Dim, OtherCols, int(MatrixType::RowsAtCompileTime)==Dim> TopLeftLhs; ResultType res(other.rows(),other.cols()); TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other; res.row(OtherRows-1) = other.row(OtherRows-1); return res; } }; template< typename TransformType, typename MatrixType, int RhsCols> struct transform_right_product_impl< TransformType, MatrixType, 2, RhsCols> { enum { Dim = TransformType::Dim, HDim = TransformType::HDim, OtherRows = MatrixType::RowsAtCompileTime, OtherCols = MatrixType::ColsAtCompileTime }; typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); typedef Block<ResultType, Dim, OtherCols, true> TopLeftLhs; ResultType res(Replicate<typename TransformType::ConstTranslationPart, 1, OtherCols>(T.translation(),1,other.cols())); TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other; return res; } }; template< typename TransformType, typename MatrixType > struct transform_right_product_impl< TransformType, MatrixType, 2, 1> // rhs is a vector of size Dim { typedef typename TransformType::MatrixType TransformMatrix; enum { Dim = TransformType::Dim, HDim = TransformType::HDim, OtherRows = MatrixType::RowsAtCompileTime, WorkingRows = EIGEN_PLAIN_ENUM_MIN(TransformMatrix::RowsAtCompileTime,HDim) }; typedef typename MatrixType::PlainObject ResultType; static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other) { EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES); Matrix<typename ResultType::Scalar, Dim+1, 1> rhs; rhs.template head<Dim>() = other; rhs[Dim] = typename ResultType::Scalar(1); Matrix<typename ResultType::Scalar, WorkingRows, 1> res(T.matrix() * rhs); return res.template head<Dim>(); } }; /********************************************************** *** Specializations of operator* with lhs EigenBase *** **********************************************************/ // generic HDim x HDim matrix * T => Projective template<typename Other,int Mode, int Options, int Dim, int HDim> struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, HDim,HDim> { typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType; typedef typename TransformType::MatrixType MatrixType; typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType; static ResultType run(const Other& other,const TransformType& tr) { return ResultType(other * tr.matrix()); } }; // generic HDim x HDim matrix * AffineCompact => Projective template<typename Other, int Options, int Dim, int HDim> struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, HDim,HDim> { typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType; typedef typename TransformType::MatrixType MatrixType; typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.matrix().noalias() = other.template block<HDim,Dim>(0,0) * tr.matrix(); res.matrix().col(Dim) += other.col(Dim); return res; } }; // affine matrix * T template<typename Other,int Mode, int Options, int Dim, int HDim> struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,HDim> { typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.affine().noalias() = other * tr.matrix(); res.matrix().row(Dim) = tr.matrix().row(Dim); return res; } }; // affine matrix * AffineCompact template<typename Other, int Options, int Dim, int HDim> struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, Dim,HDim> { typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other,const TransformType& tr) { ResultType res; res.matrix().noalias() = other.template block<Dim,Dim>(0,0) * tr.matrix(); res.translation() += other.col(Dim); return res; } }; // linear matrix * T template<typename Other,int Mode, int Options, int Dim, int HDim> struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,Dim> { typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType; typedef typename TransformType::MatrixType MatrixType; typedef TransformType ResultType; static ResultType run(const Other& other, const TransformType& tr) { TransformType res; if(Mode!=int(AffineCompact)) res.matrix().row(Dim) = tr.matrix().row(Dim); res.matrix().template topRows<Dim>().noalias() = other * tr.matrix().template topRows<Dim>(); return res; } }; /********************************************************** *** Specializations of operator* with another Transform *** **********************************************************/ template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions> struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,false > { enum { ResultMode = transform_product_result<LhsMode,RhsMode>::Mode }; typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs; typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs; typedef Transform<Scalar,Dim,ResultMode,LhsOptions> ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res; res.linear() = lhs.linear() * rhs.linear(); res.translation() = lhs.linear() * rhs.translation() + lhs.translation(); res.makeAffine(); return res; } }; template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions> struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,true > { typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs; typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs; typedef Transform<Scalar,Dim,Projective> ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { return ResultType( lhs.matrix() * rhs.matrix() ); } }; template<typename Scalar, int Dim, int LhsOptions, int RhsOptions> struct transform_transform_product_impl<Transform<Scalar,Dim,AffineCompact,LhsOptions>,Transform<Scalar,Dim,Projective,RhsOptions>,true > { typedef Transform<Scalar,Dim,AffineCompact,LhsOptions> Lhs; typedef Transform<Scalar,Dim,Projective,RhsOptions> Rhs; typedef Transform<Scalar,Dim,Projective> ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res; res.matrix().template topRows<Dim>() = lhs.matrix() * rhs.matrix(); res.matrix().row(Dim) = rhs.matrix().row(Dim); return res; } }; template<typename Scalar, int Dim, int LhsOptions, int RhsOptions> struct transform_transform_product_impl<Transform<Scalar,Dim,Projective,LhsOptions>,Transform<Scalar,Dim,AffineCompact,RhsOptions>,true > { typedef Transform<Scalar,Dim,Projective,LhsOptions> Lhs; typedef Transform<Scalar,Dim,AffineCompact,RhsOptions> Rhs; typedef Transform<Scalar,Dim,Projective> ResultType; static ResultType run(const Lhs& lhs, const Rhs& rhs) { ResultType res(lhs.matrix().template leftCols<Dim>() * rhs.matrix()); res.matrix().col(Dim) += lhs.matrix().col(Dim); return res; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_TRANSFORM_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/AlignedBox.h
.h
14,815
393
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALIGNEDBOX_H #define EIGEN_ALIGNEDBOX_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * * \class AlignedBox * * \brief An axis aligned box * * \tparam _Scalar the type of the scalar coefficients * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. * * This class represents an axis aligned box as a pair of the minimal and maximal corners. * \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty(). * \sa alignedboxtypedefs */ template <typename _Scalar, int _AmbientDim> class AlignedBox { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) enum { AmbientDimAtCompileTime = _AmbientDim }; typedef _Scalar Scalar; typedef NumTraits<Scalar> ScalarTraits; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename ScalarTraits::Real RealScalar; typedef typename ScalarTraits::NonInteger NonInteger; typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType; typedef CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> VectorTypeSum; /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */ enum CornerType { /** 1D names @{ */ Min=0, Max=1, /** @} */ /** Identifier for 2D corner @{ */ BottomLeft=0, BottomRight=1, TopLeft=2, TopRight=3, /** @} */ /** Identifier for 3D corner @{ */ BottomLeftFloor=0, BottomRightFloor=1, TopLeftFloor=2, TopRightFloor=3, BottomLeftCeil=4, BottomRightCeil=5, TopLeftCeil=6, TopRightCeil=7 /** @} */ }; /** Default constructor initializing a null box. */ EIGEN_DEVICE_FUNC inline AlignedBox() { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); } /** Constructs a null box with \a _dim the dimension of the ambient space. */ EIGEN_DEVICE_FUNC inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim) { setEmpty(); } /** Constructs a box with extremities \a _min and \a _max. * \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */ template<typename OtherVectorType1, typename OtherVectorType2> EIGEN_DEVICE_FUNC inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {} /** Constructs a box containing a single point \a p. */ template<typename Derived> EIGEN_DEVICE_FUNC inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min) { } EIGEN_DEVICE_FUNC ~AlignedBox() {} /** \returns the dimension in which the box holds */ EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); } /** \deprecated use isEmpty() */ EIGEN_DEVICE_FUNC inline bool isNull() const { return isEmpty(); } /** \deprecated use setEmpty() */ EIGEN_DEVICE_FUNC inline void setNull() { setEmpty(); } /** \returns true if the box is empty. * \sa setEmpty */ EIGEN_DEVICE_FUNC inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); } /** Makes \c *this an empty box. * \sa isEmpty */ EIGEN_DEVICE_FUNC inline void setEmpty() { m_min.setConstant( ScalarTraits::highest() ); m_max.setConstant( ScalarTraits::lowest() ); } /** \returns the minimal corner */ EIGEN_DEVICE_FUNC inline const VectorType& (min)() const { return m_min; } /** \returns a non const reference to the minimal corner */ EIGEN_DEVICE_FUNC inline VectorType& (min)() { return m_min; } /** \returns the maximal corner */ EIGEN_DEVICE_FUNC inline const VectorType& (max)() const { return m_max; } /** \returns a non const reference to the maximal corner */ EIGEN_DEVICE_FUNC inline VectorType& (max)() { return m_max; } /** \returns the center of the box */ EIGEN_DEVICE_FUNC inline const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(VectorTypeSum, RealScalar, quotient) center() const { return (m_min+m_max)/RealScalar(2); } /** \returns the lengths of the sides of the bounding box. * Note that this function does not get the same * result for integral or floating scalar types: see */ EIGEN_DEVICE_FUNC inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> sizes() const { return m_max - m_min; } /** \returns the volume of the bounding box */ EIGEN_DEVICE_FUNC inline Scalar volume() const { return sizes().prod(); } /** \returns an expression for the bounding box diagonal vector * if the length of the diagonal is needed: diagonal().norm() * will provide it. */ EIGEN_DEVICE_FUNC inline CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> diagonal() const { return sizes(); } /** \returns the vertex of the bounding box at the corner defined by * the corner-id corner. It works only for a 1D, 2D or 3D bounding box. * For 1D bounding boxes corners are named by 2 enum constants: * BottomLeft and BottomRight. * For 2D bounding boxes, corners are named by 4 enum constants: * BottomLeft, BottomRight, TopLeft, TopRight. * For 3D bounding boxes, the following names are added: * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil. */ EIGEN_DEVICE_FUNC inline VectorType corner(CornerType corner) const { EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE); VectorType res; Index mult = 1; for(Index d=0; d<dim(); ++d) { if( mult & corner ) res[d] = m_max[d]; else res[d] = m_min[d]; mult *= 2; } return res; } /** \returns a random point inside the bounding box sampled with * a uniform distribution */ EIGEN_DEVICE_FUNC inline VectorType sample() const { VectorType r(dim()); for(Index d=0; d<dim(); ++d) { if(!ScalarTraits::IsInteger) { r[d] = m_min[d] + (m_max[d]-m_min[d]) * internal::random<Scalar>(Scalar(0), Scalar(1)); } else r[d] = internal::random(m_min[d], m_max[d]); } return r; } /** \returns true if the point \a p is inside the box \c *this. */ template<typename Derived> EIGEN_DEVICE_FUNC inline bool contains(const MatrixBase<Derived>& p) const { typename internal::nested_eval<Derived,2>::type p_n(p.derived()); return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all(); } /** \returns true if the box \a b is entirely inside the box \c *this. */ EIGEN_DEVICE_FUNC inline bool contains(const AlignedBox& b) const { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); } /** \returns true if the box \a b is intersecting the box \c *this. * \sa intersection, clamp */ EIGEN_DEVICE_FUNC inline bool intersects(const AlignedBox& b) const { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); } /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. * \sa extend(const AlignedBox&) */ template<typename Derived> EIGEN_DEVICE_FUNC inline AlignedBox& extend(const MatrixBase<Derived>& p) { typename internal::nested_eval<Derived,2>::type p_n(p.derived()); m_min = m_min.cwiseMin(p_n); m_max = m_max.cwiseMax(p_n); return *this; } /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. * \sa merged, extend(const MatrixBase&) */ EIGEN_DEVICE_FUNC inline AlignedBox& extend(const AlignedBox& b) { m_min = m_min.cwiseMin(b.m_min); m_max = m_max.cwiseMax(b.m_max); return *this; } /** Clamps \c *this by the box \a b and returns a reference to \c *this. * \note If the boxes don't intersect, the resulting box is empty. * \sa intersection(), intersects() */ EIGEN_DEVICE_FUNC inline AlignedBox& clamp(const AlignedBox& b) { m_min = m_min.cwiseMax(b.m_min); m_max = m_max.cwiseMin(b.m_max); return *this; } /** Returns an AlignedBox that is the intersection of \a b and \c *this * \note If the boxes don't intersect, the resulting box is empty. * \sa intersects(), clamp, contains() */ EIGEN_DEVICE_FUNC inline AlignedBox intersection(const AlignedBox& b) const {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); } /** Returns an AlignedBox that is the union of \a b and \c *this. * \note Merging with an empty box may result in a box bigger than \c *this. * \sa extend(const AlignedBox&) */ EIGEN_DEVICE_FUNC inline AlignedBox merged(const AlignedBox& b) const { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); } /** Translate \c *this by the vector \a t and returns a reference to \c *this. */ template<typename Derived> EIGEN_DEVICE_FUNC inline AlignedBox& translate(const MatrixBase<Derived>& a_t) { const typename internal::nested_eval<Derived,2>::type t(a_t.derived()); m_min += t; m_max += t; return *this; } /** \returns the squared distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. * \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&) */ template<typename Derived> EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const; /** \returns the squared distance between the boxes \a b and \c *this, * and zero if the boxes intersect. * \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&) */ EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const AlignedBox& b) const; /** \returns the distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. * \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&) */ template<typename Derived> EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(p))); } /** \returns the distance between the boxes \a b and \c *this, * and zero if the boxes intersect. * \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&) */ EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const AlignedBox& b) const { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(b))); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<AlignedBox, AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const { return typename internal::cast_return_type<AlignedBox, AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType> EIGEN_DEVICE_FUNC inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other) { m_min = (other.min)().template cast<Scalar>(); m_max = (other.max)().template cast<Scalar>(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ EIGEN_DEVICE_FUNC bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); } protected: VectorType m_min, m_max; }; template<typename Scalar,int AmbientDim> template<typename Derived> EIGEN_DEVICE_FUNC inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const { typename internal::nested_eval<Derived,2*AmbientDim>::type p(a_p.derived()); Scalar dist2(0); Scalar aux; for (Index k=0; k<dim(); ++k) { if( m_min[k] > p[k] ) { aux = m_min[k] - p[k]; dist2 += aux*aux; } else if( p[k] > m_max[k] ) { aux = p[k] - m_max[k]; dist2 += aux*aux; } } return dist2; } template<typename Scalar,int AmbientDim> EIGEN_DEVICE_FUNC inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const { Scalar dist2(0); Scalar aux; for (Index k=0; k<dim(); ++k) { if( m_min[k] > b.m_max[k] ) { aux = m_min[k] - b.m_max[k]; dist2 += aux*aux; } else if( b.m_min[k] > m_max[k] ) { aux = b.m_min[k] - m_max[k]; dist2 += aux*aux; } } return dist2; } /** \defgroup alignedboxtypedefs Global aligned box typedefs * * \ingroup Geometry_Module * * Eigen defines several typedef shortcuts for most common aligned box types. * * The general patterns are the following: * * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double. * * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats. * * \sa class AlignedBox */ #define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup alignedboxtypedefs */ \ typedef AlignedBox<Type, Size> AlignedBox##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_TYPEDEFS } // end namespace Eigen #endif // EIGEN_ALIGNEDBOX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/OrthoMethods.h
.h
8,949
235
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ORTHOMETHODS_H #define EIGEN_ORTHOMETHODS_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \returns the cross product of \c *this and \a other * * Here is a very good explanation of cross-product: http://xkcd.com/199/ * * With complex numbers, the cross product is implemented as * \f$ (\mathbf{a}+i\mathbf{b}) \times (\mathbf{c}+i\mathbf{d}) = (\mathbf{a} \times \mathbf{c} - \mathbf{b} \times \mathbf{d}) - i(\mathbf{a} \times \mathbf{d} - \mathbf{b} \times \mathbf{c})\f$ * * \sa MatrixBase::cross3() */ template<typename Derived> template<typename OtherDerived> #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type #else inline typename MatrixBase<Derived>::PlainObject #endif MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) // Note that there is no need for an expression here since the compiler // optimize such a small temporary very well (even within a complex expression) typename internal::nested_eval<Derived,2>::type lhs(derived()); typename internal::nested_eval<OtherDerived,2>::type rhs(other.derived()); return typename cross_product_return_type<OtherDerived>::type( numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)) ); } namespace internal { template< int Arch,typename VectorLhs,typename VectorRhs, typename Scalar = typename VectorLhs::Scalar, bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)> struct cross3_impl { EIGEN_DEVICE_FUNC static inline typename internal::plain_matrix_type<VectorLhs>::type run(const VectorLhs& lhs, const VectorRhs& rhs) { return typename internal::plain_matrix_type<VectorLhs>::type( numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)), numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)), numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)), 0 ); } }; } /** \geometry_module \ingroup Geometry_Module * * \returns the cross product of \c *this and \a other using only the x, y, and z coefficients * * The size of \c *this and \a other must be four. This function is especially useful * when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization. * * \sa MatrixBase::cross() */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::PlainObject MatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4) EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4) typedef typename internal::nested_eval<Derived,2>::type DerivedNested; typedef typename internal::nested_eval<OtherDerived,2>::type OtherDerivedNested; DerivedNested lhs(derived()); OtherDerivedNested rhs(other.derived()); return internal::cross3_impl<Architecture::Target, typename internal::remove_all<DerivedNested>::type, typename internal::remove_all<OtherDerivedNested>::type>::run(lhs,rhs); } /** \geometry_module \ingroup Geometry_Module * * \returns a matrix expression of the cross product of each column or row * of the referenced expression with the \a other vector. * * The referenced matrix must have one dimension equal to 3. * The result matrix has the same dimensions than the referenced one. * * \sa MatrixBase::cross() */ template<typename ExpressionType, int Direction> template<typename OtherDerived> EIGEN_DEVICE_FUNC const typename VectorwiseOp<ExpressionType,Direction>::CrossReturnType VectorwiseOp<ExpressionType,Direction>::cross(const MatrixBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) typename internal::nested_eval<ExpressionType,2>::type mat(_expression()); typename internal::nested_eval<OtherDerived,2>::type vec(other.derived()); CrossReturnType res(_expression().rows(),_expression().cols()); if(Direction==Vertical) { eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows"); res.row(0) = (mat.row(1) * vec.coeff(2) - mat.row(2) * vec.coeff(1)).conjugate(); res.row(1) = (mat.row(2) * vec.coeff(0) - mat.row(0) * vec.coeff(2)).conjugate(); res.row(2) = (mat.row(0) * vec.coeff(1) - mat.row(1) * vec.coeff(0)).conjugate(); } else { eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns"); res.col(0) = (mat.col(1) * vec.coeff(2) - mat.col(2) * vec.coeff(1)).conjugate(); res.col(1) = (mat.col(2) * vec.coeff(0) - mat.col(0) * vec.coeff(2)).conjugate(); res.col(2) = (mat.col(0) * vec.coeff(1) - mat.col(1) * vec.coeff(0)).conjugate(); } return res; } namespace internal { template<typename Derived, int Size = Derived::SizeAtCompileTime> struct unitOrthogonal_selector { typedef typename plain_matrix_type<Derived>::type VectorType; typedef typename traits<Derived>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar,2,1> Vector2; EIGEN_DEVICE_FUNC static inline VectorType run(const Derived& src) { VectorType perp = VectorType::Zero(src.size()); Index maxi = 0; Index sndi = 0; src.cwiseAbs().maxCoeff(&maxi); if (maxi==0) sndi = 1; RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm(); perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm; perp.coeffRef(sndi) = numext::conj(src.coeff(maxi)) * invnm; return perp; } }; template<typename Derived> struct unitOrthogonal_selector<Derived,3> { typedef typename plain_matrix_type<Derived>::type VectorType; typedef typename traits<Derived>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline VectorType run(const Derived& src) { VectorType perp; /* Let us compute the crossed product of *this with a vector * that is not too close to being colinear to *this. */ /* unless the x and y coords are both close to zero, we can * simply take ( -y, x, 0 ) and normalize it. */ if((!isMuchSmallerThan(src.x(), src.z())) || (!isMuchSmallerThan(src.y(), src.z()))) { RealScalar invnm = RealScalar(1)/src.template head<2>().norm(); perp.coeffRef(0) = -numext::conj(src.y())*invnm; perp.coeffRef(1) = numext::conj(src.x())*invnm; perp.coeffRef(2) = 0; } /* if both x and y are close to zero, then the vector is close * to the z-axis, so it's far from colinear to the x-axis for instance. * So we take the crossed product with (1,0,0) and normalize it. */ else { RealScalar invnm = RealScalar(1)/src.template tail<2>().norm(); perp.coeffRef(0) = 0; perp.coeffRef(1) = -numext::conj(src.z())*invnm; perp.coeffRef(2) = numext::conj(src.y())*invnm; } return perp; } }; template<typename Derived> struct unitOrthogonal_selector<Derived,2> { typedef typename plain_matrix_type<Derived>::type VectorType; EIGEN_DEVICE_FUNC static inline VectorType run(const Derived& src) { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); } }; } // end namespace internal /** \geometry_module \ingroup Geometry_Module * * \returns a unit vector which is orthogonal to \c *this * * The size of \c *this must be at least 2. If the size is exactly 2, * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized(). * * \sa cross() */ template<typename Derived> EIGEN_DEVICE_FUNC typename MatrixBase<Derived>::PlainObject MatrixBase<Derived>::unitOrthogonal() const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return internal::unitOrthogonal_selector<Derived>::run(derived()); } } // end namespace Eigen #endif // EIGEN_ORTHOMETHODS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/AngleAxis.h
.h
8,423
248
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ANGLEAXIS_H #define EIGEN_ANGLEAXIS_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class AngleAxis * * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis * * \param _Scalar the scalar type, i.e., the type of the coefficients. * * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized. * * The following two typedefs are provided for convenience: * \li \c AngleAxisf for \c float * \li \c AngleAxisd for \c double * * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily * mimic Euler-angles. Here is an example: * \include AngleAxis_mimic_euler.cpp * Output: \verbinclude AngleAxis_mimic_euler.out * * \note This class is not aimed to be used to store a rotation transformation, * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix) * and transformation objects. * * \sa class Quaternion, class Transform, MatrixBase::UnitX() */ namespace internal { template<typename _Scalar> struct traits<AngleAxis<_Scalar> > { typedef _Scalar Scalar; }; } template<typename _Scalar> class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3> { typedef RotationBase<AngleAxis<_Scalar>,3> Base; public: using Base::operator*; enum { Dim = 3 }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Matrix<Scalar,3,3> Matrix3; typedef Matrix<Scalar,3,1> Vector3; typedef Quaternion<Scalar> QuaternionType; protected: Vector3 m_axis; Scalar m_angle; public: /** Default constructor without initialization. */ EIGEN_DEVICE_FUNC AngleAxis() {} /** Constructs and initialize the angle-axis rotation from an \a angle in radian * and an \a axis which \b must \b be \b normalized. * * \warning If the \a axis vector is not normalized, then the angle-axis object * represents an invalid rotation. */ template<typename Derived> EIGEN_DEVICE_FUNC inline AngleAxis(const Scalar& angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {} /** Constructs and initialize the angle-axis rotation from a quaternion \a q. * This function implicitly normalizes the quaternion \a q. */ template<typename QuatDerived> EIGEN_DEVICE_FUNC inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; } /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */ template<typename Derived> EIGEN_DEVICE_FUNC inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; } /** \returns the value of the rotation angle in radian */ EIGEN_DEVICE_FUNC Scalar angle() const { return m_angle; } /** \returns a read-write reference to the stored angle in radian */ EIGEN_DEVICE_FUNC Scalar& angle() { return m_angle; } /** \returns the rotation axis */ EIGEN_DEVICE_FUNC const Vector3& axis() const { return m_axis; } /** \returns a read-write reference to the stored rotation axis. * * \warning The rotation axis must remain a \b unit vector. */ EIGEN_DEVICE_FUNC Vector3& axis() { return m_axis; } /** Concatenates two rotations */ EIGEN_DEVICE_FUNC inline QuaternionType operator* (const AngleAxis& other) const { return QuaternionType(*this) * QuaternionType(other); } /** Concatenates two rotations */ EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& other) const { return QuaternionType(*this) * other; } /** Concatenates two rotations */ friend EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b) { return a * QuaternionType(b); } /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */ EIGEN_DEVICE_FUNC AngleAxis inverse() const { return AngleAxis(-m_angle, m_axis); } template<class QuatDerived> EIGEN_DEVICE_FUNC AngleAxis& operator=(const QuaternionBase<QuatDerived>& q); template<typename Derived> EIGEN_DEVICE_FUNC AngleAxis& operator=(const MatrixBase<Derived>& m); template<typename Derived> EIGEN_DEVICE_FUNC AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m); EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix(void) const; /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType> EIGEN_DEVICE_FUNC inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other) { m_axis = other.axis().template cast<Scalar>(); m_angle = Scalar(other.angle()); } EIGEN_DEVICE_FUNC static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ EIGEN_DEVICE_FUNC bool isApprox(const AngleAxis& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module * single precision angle-axis type */ typedef AngleAxis<float> AngleAxisf; /** \ingroup Geometry_Module * double precision angle-axis type */ typedef AngleAxis<double> AngleAxisd; /** Set \c *this from a \b unit quaternion. * * The resulting axis is normalized, and the computed angle is in the [0,pi] range. * * This function implicitly normalizes the quaternion \a q. */ template<typename Scalar> template<typename QuatDerived> EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q) { EIGEN_USING_STD_MATH(atan2) EIGEN_USING_STD_MATH(abs) Scalar n = q.vec().norm(); if(n<NumTraits<Scalar>::epsilon()) n = q.vec().stableNorm(); if (n != Scalar(0)) { m_angle = Scalar(2)*atan2(n, abs(q.w())); if(q.w() < Scalar(0)) n = -n; m_axis = q.vec() / n; } else { m_angle = Scalar(0); m_axis << Scalar(1), Scalar(0), Scalar(0); } return *this; } /** Set \c *this from a 3x3 rotation matrix \a mat. */ template<typename Scalar> template<typename Derived> EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat) { // Since a direct conversion would not be really faster, // let's use the robust Quaternion implementation: return *this = QuaternionType(mat); } /** * \brief Sets \c *this from a 3x3 rotation matrix. **/ template<typename Scalar> template<typename Derived> EIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat) { return *this = QuaternionType(mat); } /** Constructs and \returns an equivalent 3x3 rotation matrix. */ template<typename Scalar> typename AngleAxis<Scalar>::Matrix3 EIGEN_DEVICE_FUNC AngleAxis<Scalar>::toRotationMatrix(void) const { EIGEN_USING_STD_MATH(sin) EIGEN_USING_STD_MATH(cos) Matrix3 res; Vector3 sin_axis = sin(m_angle) * m_axis; Scalar c = cos(m_angle); Vector3 cos1_axis = (Scalar(1)-c) * m_axis; Scalar tmp; tmp = cos1_axis.x() * m_axis.y(); res.coeffRef(0,1) = tmp - sin_axis.z(); res.coeffRef(1,0) = tmp + sin_axis.z(); tmp = cos1_axis.x() * m_axis.z(); res.coeffRef(0,2) = tmp + sin_axis.y(); res.coeffRef(2,0) = tmp - sin_axis.y(); tmp = cos1_axis.y() * m_axis.z(); res.coeffRef(1,2) = tmp - sin_axis.x(); res.coeffRef(2,1) = tmp + sin_axis.x(); res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c; return res; } } // end namespace Eigen #endif // EIGEN_ANGLEAXIS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Translation.h
.h
7,664
203
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSLATION_H #define EIGEN_TRANSLATION_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Translation * * \brief Represents a translation transformation * * \tparam _Scalar the scalar type, i.e., the type of the coefficients. * \tparam _Dim the dimension of the space, can be a compile time value or Dynamic * * \note This class is not aimed to be used to store a translation transformation, * but rather to make easier the constructions and updates of Transform objects. * * \sa class Scaling, class Transform */ template<typename _Scalar, int _Dim> class Translation { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) /** dimension of the space */ enum { Dim = _Dim }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; /** corresponding vector type */ typedef Matrix<Scalar,Dim,1> VectorType; /** corresponding linear transformation matrix type */ typedef Matrix<Scalar,Dim,Dim> LinearMatrixType; /** corresponding affine transformation type */ typedef Transform<Scalar,Dim,Affine> AffineTransformType; /** corresponding isometric transformation type */ typedef Transform<Scalar,Dim,Isometry> IsometryTransformType; protected: VectorType m_coeffs; public: /** Default constructor without initialization. */ EIGEN_DEVICE_FUNC Translation() {} /** */ EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy) { eigen_assert(Dim==2); m_coeffs.x() = sx; m_coeffs.y() = sy; } /** */ EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz) { eigen_assert(Dim==3); m_coeffs.x() = sx; m_coeffs.y() = sy; m_coeffs.z() = sz; } /** Constructs and initialize the translation transformation from a vector of translation coefficients */ EIGEN_DEVICE_FUNC explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {} /** \brief Retruns the x-translation by value. **/ EIGEN_DEVICE_FUNC inline Scalar x() const { return m_coeffs.x(); } /** \brief Retruns the y-translation by value. **/ EIGEN_DEVICE_FUNC inline Scalar y() const { return m_coeffs.y(); } /** \brief Retruns the z-translation by value. **/ EIGEN_DEVICE_FUNC inline Scalar z() const { return m_coeffs.z(); } /** \brief Retruns the x-translation as a reference. **/ EIGEN_DEVICE_FUNC inline Scalar& x() { return m_coeffs.x(); } /** \brief Retruns the y-translation as a reference. **/ EIGEN_DEVICE_FUNC inline Scalar& y() { return m_coeffs.y(); } /** \brief Retruns the z-translation as a reference. **/ EIGEN_DEVICE_FUNC inline Scalar& z() { return m_coeffs.z(); } EIGEN_DEVICE_FUNC const VectorType& vector() const { return m_coeffs; } EIGEN_DEVICE_FUNC VectorType& vector() { return m_coeffs; } EIGEN_DEVICE_FUNC const VectorType& translation() const { return m_coeffs; } EIGEN_DEVICE_FUNC VectorType& translation() { return m_coeffs; } /** Concatenates two translation */ EIGEN_DEVICE_FUNC inline Translation operator* (const Translation& other) const { return Translation(m_coeffs + other.m_coeffs); } /** Concatenates a translation and a uniform scaling */ EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const UniformScaling<Scalar>& other) const; /** Concatenates a translation and a linear transformation */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const EigenBase<OtherDerived>& linear) const; /** Concatenates a translation and a rotation */ template<typename Derived> EIGEN_DEVICE_FUNC inline IsometryTransformType operator*(const RotationBase<Derived,Dim>& r) const { return *this * IsometryTransformType(r); } /** \returns the concatenation of a linear transformation \a l with the translation \a t */ // its a nightmare to define a templated friend function outside its declaration template<typename OtherDerived> friend EIGEN_DEVICE_FUNC inline AffineTransformType operator*(const EigenBase<OtherDerived>& linear, const Translation& t) { AffineTransformType res; res.matrix().setZero(); res.linear() = linear.derived(); res.translation() = linear.derived() * t.m_coeffs; res.matrix().row(Dim).setZero(); res(Dim,Dim) = Scalar(1); return res; } /** Concatenates a translation and a transformation */ template<int Mode, int Options> EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator* (const Transform<Scalar,Dim,Mode,Options>& t) const { Transform<Scalar,Dim,Mode> res = t; res.pretranslate(m_coeffs); return res; } /** Applies translation to vector */ template<typename Derived> inline typename internal::enable_if<Derived::IsVectorAtCompileTime,VectorType>::type operator* (const MatrixBase<Derived>& vec) const { return m_coeffs + vec.derived(); } /** \returns the inverse translation (opposite) */ Translation inverse() const { return Translation(-m_coeffs); } static const Translation Identity() { return Translation(VectorType::Zero()); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType> EIGEN_DEVICE_FUNC inline explicit Translation(const Translation<OtherScalarType,Dim>& other) { m_coeffs = other.vector().template cast<Scalar>(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ EIGEN_DEVICE_FUNC bool isApprox(const Translation& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } }; /** \addtogroup Geometry_Module */ //@{ typedef Translation<float, 2> Translation2f; typedef Translation<double,2> Translation2d; typedef Translation<float, 3> Translation3f; typedef Translation<double,3> Translation3d; //@} template<typename Scalar, int Dim> EIGEN_DEVICE_FUNC inline typename Translation<Scalar,Dim>::AffineTransformType Translation<Scalar,Dim>::operator* (const UniformScaling<Scalar>& other) const { AffineTransformType res; res.matrix().setZero(); res.linear().diagonal().fill(other.factor()); res.translation() = m_coeffs; res(Dim,Dim) = Scalar(1); return res; } template<typename Scalar, int Dim> template<typename OtherDerived> EIGEN_DEVICE_FUNC inline typename Translation<Scalar,Dim>::AffineTransformType Translation<Scalar,Dim>::operator* (const EigenBase<OtherDerived>& linear) const { AffineTransformType res; res.matrix().setZero(); res.linear() = linear.derived(); res.translation() = m_coeffs; res.matrix().row(Dim).setZero(); res(Dim,Dim) = Scalar(1); return res; } } // end namespace Eigen #endif // EIGEN_TRANSLATION_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Hyperplane.h
.h
11,961
283
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HYPERPLANE_H #define EIGEN_HYPERPLANE_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Hyperplane * * \brief A hyperplane * * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n. * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane. * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. * Notice that the dimension of the hyperplane is _AmbientDim-1. * * This class represents an hyperplane as the zero set of the implicit equation * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part) * and \f$ d \f$ is the distance (offset) to the origin. */ template <typename _Scalar, int _AmbientDim, int _Options> class Hyperplane { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) enum { AmbientDimAtCompileTime = _AmbientDim, Options = _Options }; typedef _Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType; typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic ? Dynamic : Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients; typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType; typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType; /** Default constructor without initialization */ EIGEN_DEVICE_FUNC inline Hyperplane() {} template<int OtherOptions> EIGEN_DEVICE_FUNC Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other) : m_coeffs(other.coeffs()) {} /** Constructs a dynamic-size hyperplane with \a _dim the dimension * of the ambient space */ EIGEN_DEVICE_FUNC inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {} /** Construct a plane from its normal \a n and a point \a e onto the plane. * \warning the vector normal is assumed to be normalized. */ EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const VectorType& e) : m_coeffs(n.size()+1) { normal() = n; offset() = -n.dot(e); } /** Constructs a plane from its normal \a n and distance to the origin \a d * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$. * \warning the vector normal is assumed to be normalized. */ EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const Scalar& d) : m_coeffs(n.size()+1) { normal() = n; offset() = d; } /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made. */ EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1) { Hyperplane result(p0.size()); result.normal() = (p1 - p0).unitOrthogonal(); result.offset() = -p0.dot(result.normal()); return result; } /** Constructs a hyperplane passing through the three points. The dimension of the ambient space * is required to be exactly 3. */ EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) Hyperplane result(p0.size()); VectorType v0(p2 - p0), v1(p1 - p0); result.normal() = v0.cross(v1); RealScalar norm = result.normal().norm(); if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon()) { Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose(); JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV); result.normal() = svd.matrixV().col(2); } else result.normal() /= norm; result.offset() = -p0.dot(result.normal()); return result; } /** Constructs a hyperplane passing through the parametrized line \a parametrized. * If the dimension of the ambient space is greater than 2, then there isn't uniqueness, * so an arbitrary choice is made. */ // FIXME to be consitent with the rest this could be implemented as a static Through function ?? EIGEN_DEVICE_FUNC explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized) { normal() = parametrized.direction().unitOrthogonal(); offset() = -parametrized.origin().dot(normal()); } EIGEN_DEVICE_FUNC ~Hyperplane() {} /** \returns the dimension in which the plane holds */ EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); } /** normalizes \c *this */ EIGEN_DEVICE_FUNC void normalize(void) { m_coeffs /= normal().norm(); } /** \returns the signed distance between the plane \c *this and a point \a p. * \sa absDistance() */ EIGEN_DEVICE_FUNC inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); } /** \returns the absolute distance between the plane \c *this and a point \a p. * \sa signedDistance() */ EIGEN_DEVICE_FUNC inline Scalar absDistance(const VectorType& p) const { return numext::abs(signedDistance(p)); } /** \returns the projection of a point \a p onto the plane \c *this. */ EIGEN_DEVICE_FUNC inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); } /** \returns a constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. */ EIGEN_DEVICE_FUNC inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); } /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds * to the linear part of the implicit equation. */ EIGEN_DEVICE_FUNC inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); } /** \returns the distance to the origin, which is also the "constant term" of the implicit equation * \warning the vector normal is assumed to be normalized. */ EIGEN_DEVICE_FUNC inline const Scalar& offset() const { return m_coeffs.coeff(dim()); } /** \returns a non-constant reference to the distance to the origin, which is also the constant part * of the implicit equation */ EIGEN_DEVICE_FUNC inline Scalar& offset() { return m_coeffs(dim()); } /** \returns a constant reference to the coefficients c_i of the plane equation: * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ */ EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; } /** \returns a non-constant reference to the coefficients c_i of the plane equation: * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ */ EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; } /** \returns the intersection of *this with \a other. * * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines. * * \note If \a other is approximately parallel to *this, this method will return any point on *this. */ EIGEN_DEVICE_FUNC VectorType intersection(const Hyperplane& other) const { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests // whether the two lines are approximately parallel. if(internal::isMuchSmallerThan(det, Scalar(1))) { // special case where the two lines are approximately parallel. Pick any point on the first line. if(numext::abs(coeffs().coeff(1))>numext::abs(coeffs().coeff(0))) return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); else return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); } else { // general case Scalar invdet = Scalar(1) / det; return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)), invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2))); } } /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this. * * \param mat the Dim x Dim transformation matrix * \param traits specifies whether the matrix \a mat represents an #Isometry * or a more generic #Affine transformation. The default is #Affine. */ template<typename XprType> EIGEN_DEVICE_FUNC inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine) { if (traits==Affine) { normal() = mat.inverse().transpose() * normal(); m_coeffs /= normal().norm(); } else if (traits==Isometry) normal() = mat * normal(); else { eigen_assert(0 && "invalid traits value in Hyperplane::transform()"); } return *this; } /** Applies the transformation \a t to \c *this and returns a reference to \c *this. * * \param t the transformation of dimension Dim * \param traits specifies whether the transformation \a t represents an #Isometry * or a more generic #Affine transformation. The default is #Affine. * Other kind of transformations are not supported. */ template<int TrOptions> EIGEN_DEVICE_FUNC inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t, TransformTraits traits = Affine) { transform(t.linear(), traits); offset() -= normal().dot(t.translation()); return *this; } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Hyperplane, Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const { return typename internal::cast_return_type<Hyperplane, Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType,int OtherOptions> EIGEN_DEVICE_FUNC inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other) { m_coeffs = other.coeffs().template cast<Scalar>(); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ template<int OtherOptions> EIGEN_DEVICE_FUNC bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } protected: Coefficients m_coeffs; }; } // end namespace Eigen #endif // EIGEN_HYPERPLANE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/Rotation2D.h
.h
6,877
200
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ROTATION2D_H #define EIGEN_ROTATION2D_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * \class Rotation2D * * \brief Represents a rotation/orientation in a 2 dimensional space. * * \tparam _Scalar the scalar type, i.e., the type of the coefficients * * This class is equivalent to a single scalar representing a counter clock wise rotation * as a single angle in radian. It provides some additional features such as the automatic * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar * interface to Quaternion in order to facilitate the writing of generic algorithms * dealing with rotations. * * \sa class Quaternion, class Transform */ namespace internal { template<typename _Scalar> struct traits<Rotation2D<_Scalar> > { typedef _Scalar Scalar; }; } // end namespace internal template<typename _Scalar> class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2> { typedef RotationBase<Rotation2D<_Scalar>,2> Base; public: using Base::operator*; enum { Dim = 2 }; /** the scalar type of the coefficients */ typedef _Scalar Scalar; typedef Matrix<Scalar,2,1> Vector2; typedef Matrix<Scalar,2,2> Matrix2; protected: Scalar m_angle; public: /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ EIGEN_DEVICE_FUNC explicit inline Rotation2D(const Scalar& a) : m_angle(a) {} /** Default constructor wihtout initialization. The represented rotation is undefined. */ EIGEN_DEVICE_FUNC Rotation2D() {} /** Construct a 2D rotation from a 2x2 rotation matrix \a mat. * * \sa fromRotationMatrix() */ template<typename Derived> EIGEN_DEVICE_FUNC explicit Rotation2D(const MatrixBase<Derived>& m) { fromRotationMatrix(m.derived()); } /** \returns the rotation angle */ EIGEN_DEVICE_FUNC inline Scalar angle() const { return m_angle; } /** \returns a read-write reference to the rotation angle */ EIGEN_DEVICE_FUNC inline Scalar& angle() { return m_angle; } /** \returns the rotation angle in [0,2pi] */ EIGEN_DEVICE_FUNC inline Scalar smallestPositiveAngle() const { Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI)); return tmp<Scalar(0) ? tmp + Scalar(2*EIGEN_PI) : tmp; } /** \returns the rotation angle in [-pi,pi] */ EIGEN_DEVICE_FUNC inline Scalar smallestAngle() const { Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI)); if(tmp>Scalar(EIGEN_PI)) tmp -= Scalar(2*EIGEN_PI); else if(tmp<-Scalar(EIGEN_PI)) tmp += Scalar(2*EIGEN_PI); return tmp; } /** \returns the inverse rotation */ EIGEN_DEVICE_FUNC inline Rotation2D inverse() const { return Rotation2D(-m_angle); } /** Concatenates two rotations */ EIGEN_DEVICE_FUNC inline Rotation2D operator*(const Rotation2D& other) const { return Rotation2D(m_angle + other.m_angle); } /** Concatenates two rotations */ EIGEN_DEVICE_FUNC inline Rotation2D& operator*=(const Rotation2D& other) { m_angle += other.m_angle; return *this; } /** Applies the rotation to a 2D vector */ EIGEN_DEVICE_FUNC Vector2 operator* (const Vector2& vec) const { return toRotationMatrix() * vec; } template<typename Derived> EIGEN_DEVICE_FUNC Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m); EIGEN_DEVICE_FUNC Matrix2 toRotationMatrix() const; /** Set \c *this from a 2x2 rotation matrix \a mat. * In other words, this function extract the rotation angle from the rotation matrix. * * This method is an alias for fromRotationMatrix() * * \sa fromRotationMatrix() */ template<typename Derived> EIGEN_DEVICE_FUNC Rotation2D& operator=(const MatrixBase<Derived>& m) { return fromRotationMatrix(m.derived()); } /** \returns the spherical interpolation between \c *this and \a other using * parameter \a t. It is in fact equivalent to a linear interpolation. */ EIGEN_DEVICE_FUNC inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const { Scalar dist = Rotation2D(other.m_angle-m_angle).smallestAngle(); return Rotation2D(m_angle + dist*t); } /** \returns \c *this with scalar type casted to \a NewScalarType * * Note that if \a NewScalarType is equal to the current scalar type of \c *this * then this function smartly returns a const reference to \c *this. */ template<typename NewScalarType> EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); } /** Copy constructor with scalar type conversion */ template<typename OtherScalarType> EIGEN_DEVICE_FUNC inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other) { m_angle = Scalar(other.angle()); } EIGEN_DEVICE_FUNC static inline Rotation2D Identity() { return Rotation2D(0); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \sa MatrixBase::isApprox() */ EIGEN_DEVICE_FUNC bool isApprox(const Rotation2D& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const { return internal::isApprox(m_angle,other.m_angle, prec); } }; /** \ingroup Geometry_Module * single precision 2D rotation type */ typedef Rotation2D<float> Rotation2Df; /** \ingroup Geometry_Module * double precision 2D rotation type */ typedef Rotation2D<double> Rotation2Dd; /** Set \c *this from a 2x2 rotation matrix \a mat. * In other words, this function extract the rotation angle * from the rotation matrix. */ template<typename Scalar> template<typename Derived> EIGEN_DEVICE_FUNC Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat) { EIGEN_USING_STD_MATH(atan2) EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0)); return *this; } /** Constructs and \returns an equivalent 2x2 rotation matrix. */ template<typename Scalar> typename Rotation2D<Scalar>::Matrix2 EIGEN_DEVICE_FUNC Rotation2D<Scalar>::toRotationMatrix(void) const { EIGEN_USING_STD_MATH(sin) EIGEN_USING_STD_MATH(cos) Scalar sinA = sin(m_angle); Scalar cosA = cos(m_angle); return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); } } // end namespace Eigen #endif // EIGEN_ROTATION2D_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/EulerAngles.h
.h
3,639
115
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EULERANGLES_H #define EIGEN_EULERANGLES_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * * \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2) * * Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}. * For instance, in: * \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode * "2" represents the z axis and "0" the x axis, etc. The returned angles are such that * we have the following equality: * \code * mat == AngleAxisf(ea[0], Vector3f::UnitZ()) * * AngleAxisf(ea[1], Vector3f::UnitX()) * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode * This corresponds to the right-multiply conventions (with right hand side frames). * * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi]. * * \sa class AngleAxis */ template<typename Derived> EIGEN_DEVICE_FUNC inline Matrix<typename MatrixBase<Derived>::Scalar,3,1> MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const { EIGEN_USING_STD_MATH(atan2) EIGEN_USING_STD_MATH(sin) EIGEN_USING_STD_MATH(cos) /* Implemented from Graphics Gems IV */ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) Matrix<Scalar,3,1> res; typedef Matrix<typename Derived::Scalar,2,1> Vector2; const Index odd = ((a0+1)%3 == a1) ? 0 : 1; const Index i = a0; const Index j = (a0 + 1 + odd)%3; const Index k = (a0 + 2 - odd)%3; if (a0==a2) { res[0] = atan2(coeff(j,i), coeff(k,i)); if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) { if(res[0] > Scalar(0)) { res[0] -= Scalar(EIGEN_PI); } else { res[0] += Scalar(EIGEN_PI); } Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = -atan2(s2, coeff(i,i)); } else { Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = atan2(s2, coeff(i,i)); } // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles, // we can compute their respective rotation, and apply its inverse to M. Since the result must // be a rotation around x, we have: // // c2 s1.s2 c1.s2 1 0 0 // 0 c1 -s1 * M = 0 c3 s3 // -s2 s1.c2 c1.c2 0 -s3 c3 // // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3 Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j)); } else { res[0] = atan2(coeff(j,k), coeff(k,k)); Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm(); if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) { if(res[0] > Scalar(0)) { res[0] -= Scalar(EIGEN_PI); } else { res[0] += Scalar(EIGEN_PI); } res[1] = atan2(-coeff(i,k), -c2); } else res[1] = atan2(-coeff(i,k), c2); Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j)); } if (!odd) res = -res; return res; } } // end namespace Eigen #endif // EIGEN_EULERANGLES_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Geometry/arch/Geometry_SSE.h
.h
5,387
162
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com> // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GEOMETRY_SSE_H #define EIGEN_GEOMETRY_SSE_H namespace Eigen { namespace internal { template<class Derived, class OtherDerived> struct quat_product<Architecture::SSE, Derived, OtherDerived, float> { enum { AAlignment = traits<Derived>::Alignment, BAlignment = traits<OtherDerived>::Alignment, ResAlignment = traits<Quaternion<float> >::Alignment }; static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b) { Quaternion<float> res; const __m128 mask = _mm_setr_ps(0.f,0.f,0.f,-0.f); __m128 a = _a.coeffs().template packet<AAlignment>(0); __m128 b = _b.coeffs().template packet<BAlignment>(0); __m128 s1 = _mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2)); __m128 s2 = _mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1)); pstoret<float,Packet4f,ResAlignment>( &res.x(), _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)), _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0), vec4f_swizzle1(b,1,2,0,0))), _mm_xor_ps(mask,_mm_add_ps(s1,s2)))); return res; } }; template<class Derived> struct quat_conj<Architecture::SSE, Derived, float> { enum { ResAlignment = traits<Quaternion<float> >::Alignment }; static inline Quaternion<float> run(const QuaternionBase<Derived>& q) { Quaternion<float> res; const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f); pstoret<float,Packet4f,ResAlignment>(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<traits<Derived>::Alignment>(0))); return res; } }; template<typename VectorLhs,typename VectorRhs> struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true> { enum { ResAlignment = traits<typename plain_matrix_type<VectorLhs>::type>::Alignment }; static inline typename plain_matrix_type<VectorLhs>::type run(const VectorLhs& lhs, const VectorRhs& rhs) { __m128 a = lhs.template packet<traits<VectorLhs>::Alignment>(0); __m128 b = rhs.template packet<traits<VectorRhs>::Alignment>(0); __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3)); __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3)); typename plain_matrix_type<VectorLhs>::type res; pstoret<float,Packet4f,ResAlignment>(&res.x(),_mm_sub_ps(mul1,mul2)); return res; } }; template<class Derived, class OtherDerived> struct quat_product<Architecture::SSE, Derived, OtherDerived, double> { enum { BAlignment = traits<OtherDerived>::Alignment, ResAlignment = traits<Quaternion<double> >::Alignment }; static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b) { const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); Quaternion<double> res; const double* a = _a.coeffs().data(); Packet2d b_xy = _b.coeffs().template packet<BAlignment>(0); Packet2d b_zw = _b.coeffs().template packet<BAlignment>(2); Packet2d a_xx = pset1<Packet2d>(a[0]); Packet2d a_yy = pset1<Packet2d>(a[1]); Packet2d a_zz = pset1<Packet2d>(a[2]); Packet2d a_ww = pset1<Packet2d>(a[3]); // two temporaries: Packet2d t1, t2; /* * t1 = ww*xy + yy*zw * t2 = zz*xy - xx*zw * res.xy = t1 +/- swap(t2) */ t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_addsub_pd(t1, preverse(t2))); #else pstoret<double,Packet2d,ResAlignment>(&res.x(), padd(t1, pxor(mask,preverse(t2)))); #endif /* * t1 = ww*zw - yy*xy * t2 = zz*zw + xx*xy * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2) */ t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy)); t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstoret<double,Packet2d,ResAlignment>(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2))); #else pstoret<double,Packet2d,ResAlignment>(&res.z(), psub(t1, pxor(mask,preverse(t2)))); #endif return res; } }; template<class Derived> struct quat_conj<Architecture::SSE, Derived, double> { enum { ResAlignment = traits<Quaternion<double> >::Alignment }; static inline Quaternion<double> run(const QuaternionBase<Derived>& q) { Quaternion<double> res; const __m128d mask0 = _mm_setr_pd(-0.,-0.); const __m128d mask2 = _mm_setr_pd(-0.,0.); pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<traits<Derived>::Alignment>(0))); pstoret<double,Packet2d,ResAlignment>(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<traits<Derived>::Alignment>(2))); return res; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GEOMETRY_SSE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Householder/Householder.h
.h
5,345
173
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_H #define EIGEN_HOUSEHOLDER_H namespace Eigen { namespace internal { template<int n> struct decrement_size { enum { ret = n==Dynamic ? n : n-1 }; }; } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * The essential part of the vector \c v is stored in *this. * * On output: * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template<typename Derived> void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) { VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1); makeHouseholder(essentialPart, tau, beta); } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * On output: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template<typename Derived> template<typename EssentialPart> void MatrixBase<Derived>::makeHouseholder( EssentialPart& essential, Scalar& tau, RealScalar& beta) const { using std::sqrt; using numext::conj; EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1); RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); Scalar c0 = coeff(0); const RealScalar tol = (std::numeric_limits<RealScalar>::min)(); if(tailSqNorm <= tol && numext::abs2(numext::imag(c0))<=tol) { tau = RealScalar(0); beta = numext::real(c0); essential.setZero(); } else { beta = sqrt(numext::abs2(c0) + tailSqNorm); if (numext::real(c0)>=RealScalar(0)) beta = -beta; essential = tail / (c0 - beta); tau = conj((beta - c0) / beta); } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the left to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheRight() */ template<typename Derived> template<typename EssentialPart> void MatrixBase<Derived>::applyHouseholderOnTheLeft( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(rows() == 1) { *this *= Scalar(1)-tau; } else if(tau!=Scalar(0)) { Map<typename internal::plain_row_type<PlainObject>::type> tmp(workspace,cols()); Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols()); tmp.noalias() = essential.adjoint() * bottom; tmp += this->row(0); this->row(0) -= tau * tmp; bottom.noalias() -= tau * essential * tmp; } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the right to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheLeft() */ template<typename Derived> template<typename EssentialPart> void MatrixBase<Derived>::applyHouseholderOnTheRight( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(cols() == 1) { *this *= Scalar(1)-tau; } else if(tau!=Scalar(0)) { Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows()); Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1); tmp.noalias() = right * essential.conjugate(); tmp += this->col(0); this->col(0) -= tau * tmp; right.noalias() -= tau * tmp * essential.transpose(); } } } // end namespace Eigen #endif // EIGEN_HOUSEHOLDER_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Householder/BlockHouseholder.h
.h
4,481
104
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Vincent Lejeune // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BLOCK_HOUSEHOLDER_H #define EIGEN_BLOCK_HOUSEHOLDER_H // This file contains some helper function to deal with block householder reflectors namespace Eigen { namespace internal { /** \internal */ // template<typename TriangularFactorType,typename VectorsType,typename CoeffsType> // void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) // { // typedef typename VectorsType::Scalar Scalar; // const Index nbVecs = vectors.cols(); // eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); // // for(Index i = 0; i < nbVecs; i++) // { // Index rs = vectors.rows() - i; // // Warning, note that hCoeffs may alias with vectors. // // It is then necessary to copy it before modifying vectors(i,i). // typename CoeffsType::Scalar h = hCoeffs(i); // // This hack permits to pass trough nested Block<> and Transpose<> expressions. // Scalar *Vii_ptr = const_cast<Scalar*>(vectors.data() + vectors.outerStride()*i + vectors.innerStride()*i); // Scalar Vii = *Vii_ptr; // *Vii_ptr = Scalar(1); // triFactor.col(i).head(i).noalias() = -h * vectors.block(i, 0, rs, i).adjoint() // * vectors.col(i).tail(rs); // *Vii_ptr = Vii; // // FIXME add .noalias() once the triangular product can work inplace // triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView<Upper>() // * triFactor.col(i).head(i); // triFactor(i,i) = hCoeffs(i); // } // } /** \internal */ // This variant avoid modifications in vectors template<typename TriangularFactorType,typename VectorsType,typename CoeffsType> void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs) { const Index nbVecs = vectors.cols(); eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs); for(Index i = nbVecs-1; i >=0 ; --i) { Index rs = vectors.rows() - i - 1; Index rt = nbVecs-i-1; if(rt>0) { triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint() * vectors.bottomRightCorner(rs, rt).template triangularView<UnitLower>(); // FIXME add .noalias() once the triangular product can work inplace triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>(); } triFactor(i,i) = hCoeffs(i); } } /** \internal * if forward then perform mat = H0 * H1 * H2 * mat * otherwise perform mat = H2 * H1 * H0 * mat */ template<typename MatrixType,typename VectorsType,typename CoeffsType> void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs, bool forward) { enum { TFactorSize = MatrixType::ColsAtCompileTime }; Index nbVecs = vectors.cols(); Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, RowMajor> T(nbVecs,nbVecs); if(forward) make_block_householder_triangular_factor(T, vectors, hCoeffs); else make_block_householder_triangular_factor(T, vectors, hCoeffs.conjugate()); const TriangularView<const VectorsType, UnitLower> V(vectors); // A -= V T V^* A Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime, (VectorsType::MaxColsAtCompileTime==1 && MatrixType::MaxColsAtCompileTime!=1)?RowMajor:ColMajor, VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat; // FIXME add .noalias() once the triangular product can work inplace if(forward) tmp = T.template triangularView<Upper>() * tmp; else tmp = T.template triangularView<Upper>().adjoint() * tmp; mat.noalias() -= V * tmp; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_BLOCK_HOUSEHOLDER_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Householder/HouseholderSequence.h
.h
20,603
471
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H #define EIGEN_HOUSEHOLDER_SEQUENCE_H namespace Eigen { /** \ingroup Householder_Module * \householder_module * \class HouseholderSequence * \brief Sequence of Householder reflections acting on subspaces with decreasing size * \tparam VectorsType type of matrix containing the Householder vectors * \tparam CoeffsType type of vector containing the Householder coefficients * \tparam Side either OnTheLeft (the default) or OnTheRight * * This class represents a product sequence of Householder reflections where the first Householder reflection * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(), * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence. * * More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the * form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i * v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$ * v_i \f$ is a vector of the form * \f[ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. * \f] * The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector. * * Typical usages are listed below, where H is a HouseholderSequence: * \code * A.applyOnTheRight(H); // A = A * H * A.applyOnTheLeft(H); // A = H * A * A.applyOnTheRight(H.adjoint()); // A = A * H^* * A.applyOnTheLeft(H.adjoint()); // A = H^* * A * MatrixXd Q = H; // conversion to a dense matrix * \endcode * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators. * * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example. * * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight() */ namespace internal { template<typename VectorsType, typename CoeffsType, int Side> struct traits<HouseholderSequence<VectorsType,CoeffsType,Side> > { typedef typename VectorsType::Scalar Scalar; typedef typename VectorsType::StorageIndex StorageIndex; typedef typename VectorsType::StorageKind StorageKind; enum { RowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::RowsAtCompileTime : traits<VectorsType>::ColsAtCompileTime, ColsAtCompileTime = RowsAtCompileTime, MaxRowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::MaxRowsAtCompileTime : traits<VectorsType>::MaxColsAtCompileTime, MaxColsAtCompileTime = MaxRowsAtCompileTime, Flags = 0 }; }; struct HouseholderSequenceShape {}; template<typename VectorsType, typename CoeffsType, int Side> struct evaluator_traits<HouseholderSequence<VectorsType,CoeffsType,Side> > : public evaluator_traits_base<HouseholderSequence<VectorsType,CoeffsType,Side> > { typedef HouseholderSequenceShape Shape; }; template<typename VectorsType, typename CoeffsType, int Side> struct hseq_side_dependent_impl { typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType; typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; return Block<const VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1); } }; template<typename VectorsType, typename CoeffsType> struct hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight> { typedef Transpose<Block<const VectorsType, 1, Dynamic> > EssentialVectorType; typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType; static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k) { Index start = k+1+h.m_shift; return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose(); } }; template<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type { typedef typename ScalarBinaryOpTraits<OtherScalarType, typename MatrixType::Scalar>::ReturnType ResultScalar; typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type; }; } // end namespace internal template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence : public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> > { typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType; public: enum { RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime, ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits<HouseholderSequence>::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits<HouseholderSequence>::MaxColsAtCompileTime }; typedef typename internal::traits<HouseholderSequence>::Scalar Scalar; typedef HouseholderSequence< typename internal::conditional<NumTraits<Scalar>::IsComplex, typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type, VectorsType>::type, typename internal::conditional<NumTraits<Scalar>::IsComplex, typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type, CoeffsType>::type, Side > ConjugateReturnType; /** \brief Constructor. * \param[in] v %Matrix containing the essential parts of the Householder vectors * \param[in] h Vector containing the Householder coefficients * * Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The * i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th * Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the * i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many * Householder reflections as there are columns. * * \note The %HouseholderSequence object stores \p v and \p h by reference. * * Example: \include HouseholderSequence_HouseholderSequence.cpp * Output: \verbinclude HouseholderSequence_HouseholderSequence.out * * \sa setLength(), setShift() */ HouseholderSequence(const VectorsType& v, const CoeffsType& h) : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()), m_shift(0) { } /** \brief Copy constructor. */ HouseholderSequence(const HouseholderSequence& other) : m_vectors(other.m_vectors), m_coeffs(other.m_coeffs), m_trans(other.m_trans), m_length(other.m_length), m_shift(other.m_shift) { } /** \brief Number of rows of transformation viewed as a matrix. * \returns Number of rows * \details This equals the dimension of the space that the transformation acts on. */ Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); } /** \brief Number of columns of transformation viewed as a matrix. * \returns Number of columns * \details This equals the dimension of the space that the transformation acts on. */ Index cols() const { return rows(); } /** \brief Essential part of a Householder vector. * \param[in] k Index of Householder reflection * \returns Vector containing non-trivial entries of k-th Householder vector * * This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of * length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector * \f[ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. * \f] * The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v * passed to the constructor. * * \sa setShift(), shift() */ const EssentialVectorType essentialVector(Index k) const { eigen_assert(k >= 0 && k < m_length); return internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k); } /** \brief %Transpose of the Householder sequence. */ HouseholderSequence transpose() const { return HouseholderSequence(*this).setTrans(!m_trans); } /** \brief Complex conjugate of the Householder sequence. */ ConjugateReturnType conjugate() const { return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate()) .setTrans(m_trans) .setLength(m_length) .setShift(m_shift); } /** \brief Adjoint (conjugate transpose) of the Householder sequence. */ ConjugateReturnType adjoint() const { return conjugate().setTrans(!m_trans); } /** \brief Inverse of the Householder sequence (equals the adjoint). */ ConjugateReturnType inverse() const { return adjoint(); } /** \internal */ template<typename DestType> inline void evalTo(DestType& dst) const { Matrix<Scalar, DestType::RowsAtCompileTime, 1, AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> workspace(rows()); evalTo(dst, workspace); } /** \internal */ template<typename Dest, typename Workspace> void evalTo(Dest& dst, Workspace& workspace) const { workspace.resize(rows()); Index vecs = m_length; if(internal::is_same_dense(dst,m_vectors)) { // in-place dst.diagonal().setOnes(); dst.template triangularView<StrictlyUpper>().setZero(); for(Index k = vecs-1; k >= 0; --k) { Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data()); else dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data()); // clear the off diagonal vector dst.col(k).tail(rows()-k-1).setZero(); } // clear the remaining columns if needed for(Index k = 0; k<cols()-vecs ; ++k) dst.col(k).tail(rows()-k-1).setZero(); } else { dst.setIdentity(rows(), rows()); for(Index k = vecs-1; k >= 0; --k) { Index cornerSize = rows() - k - m_shift; if(m_trans) dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); else dst.bottomRightCorner(cornerSize, cornerSize) .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0)); } } } /** \internal */ template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const { Matrix<Scalar,1,Dest::RowsAtCompileTime,RowMajor,1,Dest::MaxRowsAtCompileTime> workspace(dst.rows()); applyThisOnTheRight(dst, workspace); } /** \internal */ template<typename Dest, typename Workspace> inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const { workspace.resize(dst.rows()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? m_length-k-1 : k; dst.rightCols(rows()-m_shift-actual_k) .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } /** \internal */ template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const { Matrix<Scalar,1,Dest::ColsAtCompileTime,RowMajor,1,Dest::MaxColsAtCompileTime> workspace; applyThisOnTheLeft(dst, workspace); } /** \internal */ template<typename Dest, typename Workspace> inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const { const Index BlockSize = 48; // if the entries are large enough, then apply the reflectors by block if(m_length>=BlockSize && dst.cols()>1) { for(Index i = 0; i < m_length; i+=BlockSize) { Index end = m_trans ? (std::min)(m_length,i+BlockSize) : m_length-i; Index k = m_trans ? i : (std::max)(Index(0),end-BlockSize); Index bs = end-k; Index start = k + m_shift; typedef Block<typename internal::remove_all<VectorsType>::type,Dynamic,Dynamic> SubVectorsType; SubVectorsType sub_vecs1(m_vectors.const_cast_derived(), Side==OnTheRight ? k : start, Side==OnTheRight ? start : k, Side==OnTheRight ? bs : m_vectors.rows()-start, Side==OnTheRight ? m_vectors.cols()-start : bs); typename internal::conditional<Side==OnTheRight, Transpose<SubVectorsType>, SubVectorsType&>::type sub_vecs(sub_vecs1); Block<Dest,Dynamic,Dynamic> sub_dst(dst,dst.rows()-rows()+m_shift+k,0, rows()-m_shift-k,dst.cols()); apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_trans); } } else { workspace.resize(dst.cols()); for(Index k = 0; k < m_length; ++k) { Index actual_k = m_trans ? k : m_length-k-1; dst.bottomRows(rows()-m_shift-actual_k) .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data()); } } } /** \brief Computes the product of a Householder sequence with a matrix. * \param[in] other %Matrix being multiplied. * \returns Expression object representing the product. * * This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this * and \f$ M \f$ is the matrix \p other. */ template<typename OtherDerived> typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other) const { typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type res(other.template cast<typename internal::matrix_type_times_scalar_type<Scalar,OtherDerived>::ResultScalar>()); applyThisOnTheLeft(res); return res; } template<typename _VectorsType, typename _CoeffsType, int _Side> friend struct internal::hseq_side_dependent_impl; /** \brief Sets the length of the Householder sequence. * \param [in] length New value for the length. * * By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set * to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that * is smaller. After this function is called, the length equals \p length. * * \sa length() */ HouseholderSequence& setLength(Index length) { m_length = length; return *this; } /** \brief Sets the shift of the Householder sequence. * \param [in] shift New value for the shift. * * By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th * column of the matrix \p v passed to the constructor corresponds to the i-th Householder * reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}} * H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th * Householder reflection. * * \sa shift() */ HouseholderSequence& setShift(Index shift) { m_shift = shift; return *this; } Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */ Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */ /* Necessary for .adjoint() and .conjugate() */ template <typename VectorsType2, typename CoeffsType2, int Side2> friend class HouseholderSequence; protected: /** \brief Sets the transpose flag. * \param [in] trans New value of the transpose flag. * * By default, the transpose flag is not set. If the transpose flag is set, then this object represents * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$. * * \sa trans() */ HouseholderSequence& setTrans(bool trans) { m_trans = trans; return *this; } bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */ typename VectorsType::Nested m_vectors; typename CoeffsType::Nested m_coeffs; bool m_trans; Index m_length; Index m_shift; }; /** \brief Computes the product of a matrix with a Householder sequence. * \param[in] other %Matrix being multiplied. * \param[in] h %HouseholderSequence being multiplied. * \returns Expression object representing the product. * * This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the * Householder sequence represented by \p h. */ template<typename OtherDerived, typename VectorsType, typename CoeffsType, int Side> typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other, const HouseholderSequence<VectorsType,CoeffsType,Side>& h) { typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type res(other.template cast<typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::ResultScalar>()); h.applyThisOnTheRight(res); return res; } /** \ingroup Householder_Module \householder_module * \brief Convenience function for constructing a Householder sequence. * \returns A HouseholderSequence constructed from the specified arguments. */ template<typename VectorsType, typename CoeffsType> HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsType& v, const CoeffsType& h) { return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h); } /** \ingroup Householder_Module \householder_module * \brief Convenience function for constructing a Householder sequence. * \returns A HouseholderSequence constructed from the specified arguments. * \details This function differs from householderSequence() in that the template argument \p OnTheSide of * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft. */ template<typename VectorsType, typename CoeffsType> HouseholderSequence<VectorsType,CoeffsType,OnTheRight> rightHouseholderSequence(const VectorsType& v, const CoeffsType& h) { return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h); } } // end namespace Eigen #endif // EIGEN_HOUSEHOLDER_SEQUENCE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/QR/ColPivHouseholderQR.h
.h
24,881
654
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_H #define EIGEN_COLPIVOTINGHOUSEHOLDERQR_H namespace Eigen { namespace internal { template<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> > : traits<_MatrixType> { enum { Flags = 0 }; }; } // end namespace internal /** \ingroup QR_Module * * \class ColPivHouseholderQR * * \brief Householder rank-revealing QR decomposition of a matrix with column-pivoting * * \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition * * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b Q and \b R * such that * \f[ * \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, \mathbf{R} * \f] * by using Householder transformations. Here, \b P is a permutation matrix, \b Q a unitary matrix and \b R an * upper triangular matrix. * * This decomposition performs column pivoting in order to be rank-revealing and improve * numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::colPivHouseholderQr() */ template<typename _MatrixType> class ColPivHouseholderQR { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType; typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType; typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType; typedef typename internal::plain_row_type<MatrixType>::type RowVectorType; typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType; typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType; typedef typename MatrixType::PlainObject PlainObject; private: typedef typename PermutationType::StorageIndex PermIndexType; public: /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via ColPivHouseholderQR::compute(const MatrixType&). */ ColPivHouseholderQR() : m_qr(), m_hCoeffs(), m_colsPermutation(), m_colsTranspositions(), m_temp(), m_colNormsUpdated(), m_colNormsDirect(), m_isInitialized(false), m_usePrescribedThreshold(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa ColPivHouseholderQR() */ ColPivHouseholderQR(Index rows, Index cols) : m_qr(rows, cols), m_hCoeffs((std::min)(rows,cols)), m_colsPermutation(PermIndexType(cols)), m_colsTranspositions(cols), m_temp(cols), m_colNormsUpdated(cols), m_colNormsDirect(cols), m_isInitialized(false), m_usePrescribedThreshold(false) {} /** \brief Constructs a QR factorization from a given matrix * * This constructor computes the QR factorization of the matrix \a matrix by calling * the method compute(). It is a short cut for: * * \code * ColPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols()); * qr.compute(matrix); * \endcode * * \sa compute() */ template<typename InputType> explicit ColPivHouseholderQR(const EigenBase<InputType>& matrix) : m_qr(matrix.rows(), matrix.cols()), m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), m_colsPermutation(PermIndexType(matrix.cols())), m_colsTranspositions(matrix.cols()), m_temp(matrix.cols()), m_colNormsUpdated(matrix.cols()), m_colNormsDirect(matrix.cols()), m_isInitialized(false), m_usePrescribedThreshold(false) { compute(matrix.derived()); } /** \brief Constructs a QR factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. * * \sa ColPivHouseholderQR(const EigenBase&) */ template<typename InputType> explicit ColPivHouseholderQR(EigenBase<InputType>& matrix) : m_qr(matrix.derived()), m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), m_colsPermutation(PermIndexType(matrix.cols())), m_colsTranspositions(matrix.cols()), m_temp(matrix.cols()), m_colNormsUpdated(matrix.cols()), m_colNormsDirect(matrix.cols()), m_isInitialized(false), m_usePrescribedThreshold(false) { computeInPlace(); } /** This method finds a solution x to the equation Ax=b, where A is the matrix of which * *this is the QR decomposition, if any exists. * * \param b the right-hand-side of the equation to solve. * * \returns a solution. * * \note_about_checking_solutions * * \note_about_arbitrary_choice_of_solution * * Example: \include ColPivHouseholderQR_solve.cpp * Output: \verbinclude ColPivHouseholderQR_solve.out */ template<typename Rhs> inline const Solve<ColPivHouseholderQR, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return Solve<ColPivHouseholderQR, Rhs>(*this, b.derived()); } HouseholderSequenceType householderQ() const; HouseholderSequenceType matrixQ() const { return householderQ(); } /** \returns a reference to the matrix where the Householder QR decomposition is stored */ const MatrixType& matrixQR() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return m_qr; } /** \returns a reference to the matrix where the result Householder QR is stored * \warning The strict lower part of this matrix contains internal values. * Only the upper triangular part should be referenced. To get it, use * \code matrixR().template triangularView<Upper>() \endcode * For rank-deficient matrices, use * \code * matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>() * \endcode */ const MatrixType& matrixR() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return m_qr; } template<typename InputType> ColPivHouseholderQR& compute(const EigenBase<InputType>& matrix); /** \returns a const reference to the column permutation matrix */ const PermutationType& colsPermutation() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return m_colsPermutation; } /** \returns the absolute value of the determinant of the matrix of which * *this is the QR decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the QR decomposition has already been computed. * * \note This is only for square matrices. * * \warning a determinant can be very big or small, so for matrices * of large enough dimension, there is a risk of overflow/underflow. * One way to work around that is to use logAbsDeterminant() instead. * * \sa logAbsDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar absDeterminant() const; /** \returns the natural log of the absolute value of the determinant of the matrix of which * *this is the QR decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the QR decomposition has already been computed. * * \note This is only for square matrices. * * \note This method is useful to work around the risk of overflow/underflow that's inherent * to determinant computation. * * \sa absDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar logAbsDeterminant() const; /** \returns the rank of the matrix of which *this is the QR decomposition. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline Index rank() const { using std::abs; eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); Index result = 0; for(Index i = 0; i < m_nonzero_pivots; ++i) result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold); return result; } /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline Index dimensionOfKernel() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return cols() - rank(); } /** \returns true if the matrix of which *this is the QR decomposition represents an injective * linear map, i.e. has trivial kernel; false otherwise. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isInjective() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return rank() == cols(); } /** \returns true if the matrix of which *this is the QR decomposition represents a surjective * linear map; false otherwise. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isSurjective() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return rank() == rows(); } /** \returns true if the matrix of which *this is the QR decomposition is invertible. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isInvertible() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return isInjective() && isSurjective(); } /** \returns the inverse of the matrix of which *this is the QR decomposition. * * \note If this matrix is not invertible, the returned matrix has undefined coefficients. * Use isInvertible() to first determine whether this matrix is invertible. */ inline const Inverse<ColPivHouseholderQR> inverse() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return Inverse<ColPivHouseholderQR>(*this); } inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return m_qr.cols(); } /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q. * * For advanced uses only. */ const HCoeffsType& hCoeffs() const { return m_hCoeffs; } /** Allows to prescribe a threshold to be used by certain methods, such as rank(), * who need to determine when pivots are to be considered nonzero. This is not used for the * QR decomposition itself. * * When it needs to get the threshold value, Eigen calls threshold(). By default, this * uses a formula to automatically determine a reasonable threshold. * Once you have called the present method setThreshold(const RealScalar&), * your value is used instead. * * \param threshold The new value to use as the threshold. * * A pivot will be considered nonzero if its absolute value is strictly greater than * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ * where maxpivot is the biggest pivot. * * If you want to come back to the default behavior, call setThreshold(Default_t) */ ColPivHouseholderQR& setThreshold(const RealScalar& threshold) { m_usePrescribedThreshold = true; m_prescribedThreshold = threshold; return *this; } /** Allows to come back to the default behavior, letting Eigen use its default formula for * determining the threshold. * * You should pass the special object Eigen::Default as parameter here. * \code qr.setThreshold(Eigen::Default); \endcode * * See the documentation of setThreshold(const RealScalar&). */ ColPivHouseholderQR& setThreshold(Default_t) { m_usePrescribedThreshold = false; return *this; } /** Returns the threshold that will be used by certain methods such as rank(). * * See the documentation of setThreshold(const RealScalar&). */ RealScalar threshold() const { eigen_assert(m_isInitialized || m_usePrescribedThreshold); return m_usePrescribedThreshold ? m_prescribedThreshold // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize()); } /** \returns the number of nonzero pivots in the QR decomposition. * Here nonzero is meant in the exact sense, not in a fuzzy sense. * So that notion isn't really intrinsically interesting, but it is * still useful when implementing algorithms. * * \sa rank() */ inline Index nonzeroPivots() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return m_nonzero_pivots; } /** \returns the absolute value of the biggest pivot, i.e. the biggest * diagonal coefficient of R. */ RealScalar maxPivot() const { return m_maxpivot; } /** \brief Reports whether the QR factorization was succesful. * * \note This function always returns \c Success. It is provided for compatibility * with other factorization routines. * \returns \c Success */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return Success; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType &rhs, DstType &dst) const; #endif protected: friend class CompleteOrthogonalDecomposition<MatrixType>; static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } void computeInPlace(); MatrixType m_qr; HCoeffsType m_hCoeffs; PermutationType m_colsPermutation; IntRowVectorType m_colsTranspositions; RowVectorType m_temp; RealRowVectorType m_colNormsUpdated; RealRowVectorType m_colNormsDirect; bool m_isInitialized, m_usePrescribedThreshold; RealScalar m_prescribedThreshold, m_maxpivot; Index m_nonzero_pivots; Index m_det_pq; }; template<typename MatrixType> typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const { using std::abs; eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return abs(m_qr.diagonal().prod()); } template<typename MatrixType> typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDeterminant() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return m_qr.diagonal().cwiseAbs().array().log().sum(); } /** Performs the QR factorization of the given matrix \a matrix. The result of * the factorization is stored into \c *this, and a reference to \c *this * is returned. * * \sa class ColPivHouseholderQR, ColPivHouseholderQR(const MatrixType&) */ template<typename MatrixType> template<typename InputType> ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const EigenBase<InputType>& matrix) { m_qr = matrix.derived(); computeInPlace(); return *this; } template<typename MatrixType> void ColPivHouseholderQR<MatrixType>::computeInPlace() { check_template_parameters(); // the column permutation is stored as int indices, so just to be sure: eigen_assert(m_qr.cols()<=NumTraits<int>::highest()); using std::abs; Index rows = m_qr.rows(); Index cols = m_qr.cols(); Index size = m_qr.diagonalSize(); m_hCoeffs.resize(size); m_temp.resize(cols); m_colsTranspositions.resize(m_qr.cols()); Index number_of_transpositions = 0; m_colNormsUpdated.resize(cols); m_colNormsDirect.resize(cols); for (Index k = 0; k < cols; ++k) { // colNormsDirect(k) caches the most recent directly computed norm of // column k. m_colNormsDirect.coeffRef(k) = m_qr.col(k).norm(); m_colNormsUpdated.coeffRef(k) = m_colNormsDirect.coeffRef(k); } RealScalar threshold_helper = numext::abs2<RealScalar>(m_colNormsUpdated.maxCoeff() * NumTraits<RealScalar>::epsilon()) / RealScalar(rows); RealScalar norm_downdate_threshold = numext::sqrt(NumTraits<RealScalar>::epsilon()); m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_maxpivot = RealScalar(0); for(Index k = 0; k < size; ++k) { // first, we look up in our table m_colNormsUpdated which column has the biggest norm Index biggest_col_index; RealScalar biggest_col_sq_norm = numext::abs2(m_colNormsUpdated.tail(cols-k).maxCoeff(&biggest_col_index)); biggest_col_index += k; // Track the number of meaningful pivots but do not stop the decomposition to make // sure that the initial matrix is properly reproduced. See bug 941. if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k)) m_nonzero_pivots = k; // apply the transposition to the columns m_colsTranspositions.coeffRef(k) = biggest_col_index; if(k != biggest_col_index) { m_qr.col(k).swap(m_qr.col(biggest_col_index)); std::swap(m_colNormsUpdated.coeffRef(k), m_colNormsUpdated.coeffRef(biggest_col_index)); std::swap(m_colNormsDirect.coeffRef(k), m_colNormsDirect.coeffRef(biggest_col_index)); ++number_of_transpositions; } // generate the householder vector, store it below the diagonal RealScalar beta; m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); // apply the householder transformation to the diagonal coefficient m_qr.coeffRef(k,k) = beta; // remember the maximum absolute value of diagonal coefficients if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta); // apply the householder transformation m_qr.bottomRightCorner(rows-k, cols-k-1) .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1)); // update our table of norms of the columns for (Index j = k + 1; j < cols; ++j) { // The following implements the stable norm downgrade step discussed in // http://www.netlib.org/lapack/lawnspdf/lawn176.pdf // and used in LAPACK routines xGEQPF and xGEQP3. // See lines 278-297 in http://www.netlib.org/lapack/explore-html/dc/df4/sgeqpf_8f_source.html if (m_colNormsUpdated.coeffRef(j) != RealScalar(0)) { RealScalar temp = abs(m_qr.coeffRef(k, j)) / m_colNormsUpdated.coeffRef(j); temp = (RealScalar(1) + temp) * (RealScalar(1) - temp); temp = temp < RealScalar(0) ? RealScalar(0) : temp; RealScalar temp2 = temp * numext::abs2<RealScalar>(m_colNormsUpdated.coeffRef(j) / m_colNormsDirect.coeffRef(j)); if (temp2 <= norm_downdate_threshold) { // The updated norm has become too inaccurate so re-compute the column // norm directly. m_colNormsDirect.coeffRef(j) = m_qr.col(j).tail(rows - k - 1).norm(); m_colNormsUpdated.coeffRef(j) = m_colNormsDirect.coeffRef(j); } else { m_colNormsUpdated.coeffRef(j) *= numext::sqrt(temp); } } } } m_colsPermutation.setIdentity(PermIndexType(cols)); for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k) m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k))); m_det_pq = (number_of_transpositions%2) ? -1 : 1; m_isInitialized = true; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename _MatrixType> template<typename RhsType, typename DstType> void ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const { eigen_assert(rhs.rows() == rows()); const Index nonzero_pivots = nonzeroPivots(); if(nonzero_pivots == 0) { dst.setZero(); return; } typename RhsType::PlainObject c(rhs); // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T c.applyOnTheLeft(householderSequence(m_qr, m_hCoeffs) .setLength(nonzero_pivots) .transpose() ); m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots) .template triangularView<Upper>() .solveInPlace(c.topRows(nonzero_pivots)); for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i); for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero(); } #endif namespace internal { template<typename DstXprType, typename MatrixType> struct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename ColPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense> { typedef ColPivHouseholderQR<MatrixType> QrType; typedef Inverse<QrType> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &) { dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } }; } // end namespace internal /** \returns the matrix Q as a sequence of householder transformations. * You can extract the meaningful part only by using: * \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/ template<typename MatrixType> typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType> ::householderQ() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()); } /** \return the column-pivoting Householder QR decomposition of \c *this. * * \sa class ColPivHouseholderQR */ template<typename Derived> const ColPivHouseholderQR<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::colPivHouseholderQr() const { return ColPivHouseholderQR<PlainObject>(eval()); } } // end namespace Eigen #endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/QR/FullPivHouseholderQR.h
.h
25,478
677
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H #define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H namespace Eigen { namespace internal { template<typename _MatrixType> struct traits<FullPivHouseholderQR<_MatrixType> > : traits<_MatrixType> { enum { Flags = 0 }; }; template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType; template<typename MatrixType> struct traits<FullPivHouseholderQRMatrixQReturnType<MatrixType> > { typedef typename MatrixType::PlainObject ReturnType; }; } // end namespace internal /** \ingroup QR_Module * * \class FullPivHouseholderQR * * \brief Householder rank-revealing QR decomposition of a matrix with full pivoting * * \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition * * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b P', \b Q and \b R * such that * \f[ * \mathbf{P} \, \mathbf{A} \, \mathbf{P}' = \mathbf{Q} \, \mathbf{R} * \f] * by using Householder transformations. Here, \b P and \b P' are permutation matrices, \b Q a unitary matrix * and \b R an upper triangular matrix. * * This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal * numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::fullPivHouseholderQr() */ template<typename _MatrixType> class FullPivHouseholderQR { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType; typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType; typedef Matrix<StorageIndex, 1, EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1, EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType; typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType; typedef typename internal::plain_row_type<MatrixType>::type RowVectorType; typedef typename internal::plain_col_type<MatrixType>::type ColVectorType; typedef typename MatrixType::PlainObject PlainObject; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via FullPivHouseholderQR::compute(const MatrixType&). */ FullPivHouseholderQR() : m_qr(), m_hCoeffs(), m_rows_transpositions(), m_cols_transpositions(), m_cols_permutation(), m_temp(), m_isInitialized(false), m_usePrescribedThreshold(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa FullPivHouseholderQR() */ FullPivHouseholderQR(Index rows, Index cols) : m_qr(rows, cols), m_hCoeffs((std::min)(rows,cols)), m_rows_transpositions((std::min)(rows,cols)), m_cols_transpositions((std::min)(rows,cols)), m_cols_permutation(cols), m_temp(cols), m_isInitialized(false), m_usePrescribedThreshold(false) {} /** \brief Constructs a QR factorization from a given matrix * * This constructor computes the QR factorization of the matrix \a matrix by calling * the method compute(). It is a short cut for: * * \code * FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols()); * qr.compute(matrix); * \endcode * * \sa compute() */ template<typename InputType> explicit FullPivHouseholderQR(const EigenBase<InputType>& matrix) : m_qr(matrix.rows(), matrix.cols()), m_hCoeffs((std::min)(matrix.rows(), matrix.cols())), m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())), m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())), m_cols_permutation(matrix.cols()), m_temp(matrix.cols()), m_isInitialized(false), m_usePrescribedThreshold(false) { compute(matrix.derived()); } /** \brief Constructs a QR factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. * * \sa FullPivHouseholderQR(const EigenBase&) */ template<typename InputType> explicit FullPivHouseholderQR(EigenBase<InputType>& matrix) : m_qr(matrix.derived()), m_hCoeffs((std::min)(matrix.rows(), matrix.cols())), m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())), m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())), m_cols_permutation(matrix.cols()), m_temp(matrix.cols()), m_isInitialized(false), m_usePrescribedThreshold(false) { computeInPlace(); } /** This method finds a solution x to the equation Ax=b, where A is the matrix of which * \c *this is the QR decomposition. * * \param b the right-hand-side of the equation to solve. * * \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A, * and an arbitrary solution otherwise. * * \note_about_checking_solutions * * \note_about_arbitrary_choice_of_solution * * Example: \include FullPivHouseholderQR_solve.cpp * Output: \verbinclude FullPivHouseholderQR_solve.out */ template<typename Rhs> inline const Solve<FullPivHouseholderQR, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return Solve<FullPivHouseholderQR, Rhs>(*this, b.derived()); } /** \returns Expression object representing the matrix Q */ MatrixQReturnType matrixQ(void) const; /** \returns a reference to the matrix where the Householder QR decomposition is stored */ const MatrixType& matrixQR() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_qr; } template<typename InputType> FullPivHouseholderQR& compute(const EigenBase<InputType>& matrix); /** \returns a const reference to the column permutation matrix */ const PermutationType& colsPermutation() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_cols_permutation; } /** \returns a const reference to the vector of indices representing the rows transpositions */ const IntDiagSizeVectorType& rowsTranspositions() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_rows_transpositions; } /** \returns the absolute value of the determinant of the matrix of which * *this is the QR decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the QR decomposition has already been computed. * * \note This is only for square matrices. * * \warning a determinant can be very big or small, so for matrices * of large enough dimension, there is a risk of overflow/underflow. * One way to work around that is to use logAbsDeterminant() instead. * * \sa logAbsDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar absDeterminant() const; /** \returns the natural log of the absolute value of the determinant of the matrix of which * *this is the QR decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the QR decomposition has already been computed. * * \note This is only for square matrices. * * \note This method is useful to work around the risk of overflow/underflow that's inherent * to determinant computation. * * \sa absDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar logAbsDeterminant() const; /** \returns the rank of the matrix of which *this is the QR decomposition. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline Index rank() const { using std::abs; eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); Index result = 0; for(Index i = 0; i < m_nonzero_pivots; ++i) result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold); return result; } /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline Index dimensionOfKernel() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return cols() - rank(); } /** \returns true if the matrix of which *this is the QR decomposition represents an injective * linear map, i.e. has trivial kernel; false otherwise. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isInjective() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return rank() == cols(); } /** \returns true if the matrix of which *this is the QR decomposition represents a surjective * linear map; false otherwise. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isSurjective() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return rank() == rows(); } /** \returns true if the matrix of which *this is the QR decomposition is invertible. * * \note This method has to determine which pivots should be considered nonzero. * For that, it uses the threshold value that you can control by calling * setThreshold(const RealScalar&). */ inline bool isInvertible() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return isInjective() && isSurjective(); } /** \returns the inverse of the matrix of which *this is the QR decomposition. * * \note If this matrix is not invertible, the returned matrix has undefined coefficients. * Use isInvertible() to first determine whether this matrix is invertible. */ inline const Inverse<FullPivHouseholderQR> inverse() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return Inverse<FullPivHouseholderQR>(*this); } inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return m_qr.cols(); } /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q. * * For advanced uses only. */ const HCoeffsType& hCoeffs() const { return m_hCoeffs; } /** Allows to prescribe a threshold to be used by certain methods, such as rank(), * who need to determine when pivots are to be considered nonzero. This is not used for the * QR decomposition itself. * * When it needs to get the threshold value, Eigen calls threshold(). By default, this * uses a formula to automatically determine a reasonable threshold. * Once you have called the present method setThreshold(const RealScalar&), * your value is used instead. * * \param threshold The new value to use as the threshold. * * A pivot will be considered nonzero if its absolute value is strictly greater than * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ * where maxpivot is the biggest pivot. * * If you want to come back to the default behavior, call setThreshold(Default_t) */ FullPivHouseholderQR& setThreshold(const RealScalar& threshold) { m_usePrescribedThreshold = true; m_prescribedThreshold = threshold; return *this; } /** Allows to come back to the default behavior, letting Eigen use its default formula for * determining the threshold. * * You should pass the special object Eigen::Default as parameter here. * \code qr.setThreshold(Eigen::Default); \endcode * * See the documentation of setThreshold(const RealScalar&). */ FullPivHouseholderQR& setThreshold(Default_t) { m_usePrescribedThreshold = false; return *this; } /** Returns the threshold that will be used by certain methods such as rank(). * * See the documentation of setThreshold(const RealScalar&). */ RealScalar threshold() const { eigen_assert(m_isInitialized || m_usePrescribedThreshold); return m_usePrescribedThreshold ? m_prescribedThreshold // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize()); } /** \returns the number of nonzero pivots in the QR decomposition. * Here nonzero is meant in the exact sense, not in a fuzzy sense. * So that notion isn't really intrinsically interesting, but it is * still useful when implementing algorithms. * * \sa rank() */ inline Index nonzeroPivots() const { eigen_assert(m_isInitialized && "LU is not initialized."); return m_nonzero_pivots; } /** \returns the absolute value of the biggest pivot, i.e. the biggest * diagonal coefficient of U. */ RealScalar maxPivot() const { return m_maxpivot; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType &rhs, DstType &dst) const; #endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } void computeInPlace(); MatrixType m_qr; HCoeffsType m_hCoeffs; IntDiagSizeVectorType m_rows_transpositions; IntDiagSizeVectorType m_cols_transpositions; PermutationType m_cols_permutation; RowVectorType m_temp; bool m_isInitialized, m_usePrescribedThreshold; RealScalar m_prescribedThreshold, m_maxpivot; Index m_nonzero_pivots; RealScalar m_precision; Index m_det_pq; }; template<typename MatrixType> typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const { using std::abs; eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return abs(m_qr.diagonal().prod()); } template<typename MatrixType> typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDeterminant() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return m_qr.diagonal().cwiseAbs().array().log().sum(); } /** Performs the QR factorization of the given matrix \a matrix. The result of * the factorization is stored into \c *this, and a reference to \c *this * is returned. * * \sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&) */ template<typename MatrixType> template<typename InputType> FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const EigenBase<InputType>& matrix) { m_qr = matrix.derived(); computeInPlace(); return *this; } template<typename MatrixType> void FullPivHouseholderQR<MatrixType>::computeInPlace() { check_template_parameters(); using std::abs; Index rows = m_qr.rows(); Index cols = m_qr.cols(); Index size = (std::min)(rows,cols); m_hCoeffs.resize(size); m_temp.resize(cols); m_precision = NumTraits<Scalar>::epsilon() * RealScalar(size); m_rows_transpositions.resize(size); m_cols_transpositions.resize(size); Index number_of_transpositions = 0; RealScalar biggest(0); m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case) m_maxpivot = RealScalar(0); for (Index k = 0; k < size; ++k) { Index row_of_biggest_in_corner, col_of_biggest_in_corner; typedef internal::scalar_score_coeff_op<Scalar> Scoring; typedef typename Scoring::result_type Score; Score score = m_qr.bottomRightCorner(rows-k, cols-k) .unaryExpr(Scoring()) .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner); row_of_biggest_in_corner += k; col_of_biggest_in_corner += k; RealScalar biggest_in_corner = internal::abs_knowing_score<Scalar>()(m_qr(row_of_biggest_in_corner, col_of_biggest_in_corner), score); if(k==0) biggest = biggest_in_corner; // if the corner is negligible, then we have less than full rank, and we can finish early if(internal::isMuchSmallerThan(biggest_in_corner, biggest, m_precision)) { m_nonzero_pivots = k; for(Index i = k; i < size; i++) { m_rows_transpositions.coeffRef(i) = i; m_cols_transpositions.coeffRef(i) = i; m_hCoeffs.coeffRef(i) = Scalar(0); } break; } m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner; m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner; if(k != row_of_biggest_in_corner) { m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k)); ++number_of_transpositions; } if(k != col_of_biggest_in_corner) { m_qr.col(k).swap(m_qr.col(col_of_biggest_in_corner)); ++number_of_transpositions; } RealScalar beta; m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta); m_qr.coeffRef(k,k) = beta; // remember the maximum absolute value of diagonal coefficients if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta); m_qr.bottomRightCorner(rows-k, cols-k-1) .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1)); } m_cols_permutation.setIdentity(cols); for(Index k = 0; k < size; ++k) m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k)); m_det_pq = (number_of_transpositions%2) ? -1 : 1; m_isInitialized = true; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename _MatrixType> template<typename RhsType, typename DstType> void FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const { eigen_assert(rhs.rows() == rows()); const Index l_rank = rank(); // FIXME introduce nonzeroPivots() and use it here. and more generally, // make the same improvements in this dec as in FullPivLU. if(l_rank==0) { dst.setZero(); return; } typename RhsType::PlainObject c(rhs); Matrix<Scalar,1,RhsType::ColsAtCompileTime> temp(rhs.cols()); for (Index k = 0; k < l_rank; ++k) { Index remainingSize = rows()-k; c.row(k).swap(c.row(m_rows_transpositions.coeff(k))); c.bottomRightCorner(remainingSize, rhs.cols()) .applyHouseholderOnTheLeft(m_qr.col(k).tail(remainingSize-1), m_hCoeffs.coeff(k), &temp.coeffRef(0)); } m_qr.topLeftCorner(l_rank, l_rank) .template triangularView<Upper>() .solveInPlace(c.topRows(l_rank)); for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i); for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero(); } #endif namespace internal { template<typename DstXprType, typename MatrixType> struct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense> { typedef FullPivHouseholderQR<MatrixType> QrType; typedef Inverse<QrType> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &) { dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols())); } }; /** \ingroup QR_Module * * \brief Expression type for return value of FullPivHouseholderQR::matrixQ() * * \tparam MatrixType type of underlying dense matrix */ template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType : public ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> > { public: typedef typename FullPivHouseholderQR<MatrixType>::IntDiagSizeVectorType IntDiagSizeVectorType; typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType; typedef Matrix<typename MatrixType::Scalar, 1, MatrixType::RowsAtCompileTime, RowMajor, 1, MatrixType::MaxRowsAtCompileTime> WorkVectorType; FullPivHouseholderQRMatrixQReturnType(const MatrixType& qr, const HCoeffsType& hCoeffs, const IntDiagSizeVectorType& rowsTranspositions) : m_qr(qr), m_hCoeffs(hCoeffs), m_rowsTranspositions(rowsTranspositions) {} template <typename ResultType> void evalTo(ResultType& result) const { const Index rows = m_qr.rows(); WorkVectorType workspace(rows); evalTo(result, workspace); } template <typename ResultType> void evalTo(ResultType& result, WorkVectorType& workspace) const { using numext::conj; // compute the product H'_0 H'_1 ... H'_n-1, // where H_k is the k-th Householder transformation I - h_k v_k v_k' // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] const Index rows = m_qr.rows(); const Index cols = m_qr.cols(); const Index size = (std::min)(rows, cols); workspace.resize(rows); result.setIdentity(rows, rows); for (Index k = size-1; k >= 0; k--) { result.block(k, k, rows-k, rows-k) .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k)); result.row(k).swap(result.row(m_rowsTranspositions.coeff(k))); } } Index rows() const { return m_qr.rows(); } Index cols() const { return m_qr.rows(); } protected: typename MatrixType::Nested m_qr; typename HCoeffsType::Nested m_hCoeffs; typename IntDiagSizeVectorType::Nested m_rowsTranspositions; }; // template<typename MatrixType> // struct evaluator<FullPivHouseholderQRMatrixQReturnType<MatrixType> > // : public evaluator<ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> > > // {}; } // end namespace internal template<typename MatrixType> inline typename FullPivHouseholderQR<MatrixType>::MatrixQReturnType FullPivHouseholderQR<MatrixType>::matrixQ() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return MatrixQReturnType(m_qr, m_hCoeffs, m_rows_transpositions); } /** \return the full-pivoting Householder QR decomposition of \c *this. * * \sa class FullPivHouseholderQR */ template<typename Derived> const FullPivHouseholderQR<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::fullPivHouseholderQr() const { return FullPivHouseholderQR<PlainObject>(eval()); } } // end namespace Eigen #endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h
.h
4,662
98
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to LAPACKe * Householder QR decomposition of a matrix with column pivoting based on * LAPACKE_?geqp3 function. ******************************************************************************** */ #ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H #define EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H namespace Eigen { /** \internal Specialization for the data types supported by LAPACKe */ #define EIGEN_LAPACKE_QR_COLPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \ template<> template<typename InputType> inline \ ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >& \ ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >::compute( \ const EigenBase<InputType>& matrix) \ \ { \ using std::abs; \ typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \ typedef MatrixType::RealScalar RealScalar; \ Index rows = matrix.rows();\ Index cols = matrix.cols();\ \ m_qr = matrix;\ Index size = m_qr.diagonalSize();\ m_hCoeffs.resize(size);\ \ m_colsTranspositions.resize(cols);\ /*Index number_of_transpositions = 0;*/ \ \ m_nonzero_pivots = 0; \ m_maxpivot = RealScalar(0);\ m_colsPermutation.resize(cols); \ m_colsPermutation.indices().setZero(); \ \ lapack_int lda = internal::convert_index<lapack_int,Index>(m_qr.outerStride()); \ lapack_int matrix_order = LAPACKE_COLROW; \ LAPACKE_##LAPACKE_PREFIX##geqp3( matrix_order, internal::convert_index<lapack_int,Index>(rows), internal::convert_index<lapack_int,Index>(cols), \ (LAPACKE_TYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (LAPACKE_TYPE*)m_hCoeffs.data()); \ m_isInitialized = true; \ m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \ m_hCoeffs.adjointInPlace(); \ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); \ lapack_int *perm = m_colsPermutation.indices().data(); \ for(Index i=0;i<size;i++) { \ m_nonzero_pivots += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\ } \ for(Index i=0;i<cols;i++) perm[i]--;\ \ /*m_det_pq = (number_of_transpositions%2) ? -1 : 1; // TODO: It's not needed now; fix upon availability in Eigen */ \ \ return *this; \ } EIGEN_LAPACKE_QR_COLPIV(double, double, d, ColMajor, LAPACK_COL_MAJOR) EIGEN_LAPACKE_QR_COLPIV(float, float, s, ColMajor, LAPACK_COL_MAJOR) EIGEN_LAPACKE_QR_COLPIV(dcomplex, lapack_complex_double, z, ColMajor, LAPACK_COL_MAJOR) EIGEN_LAPACKE_QR_COLPIV(scomplex, lapack_complex_float, c, ColMajor, LAPACK_COL_MAJOR) EIGEN_LAPACKE_QR_COLPIV(double, double, d, RowMajor, LAPACK_ROW_MAJOR) EIGEN_LAPACKE_QR_COLPIV(float, float, s, RowMajor, LAPACK_ROW_MAJOR) EIGEN_LAPACKE_QR_COLPIV(dcomplex, lapack_complex_double, z, RowMajor, LAPACK_ROW_MAJOR) EIGEN_LAPACKE_QR_COLPIV(scomplex, lapack_complex_float, c, RowMajor, LAPACK_ROW_MAJOR) } // end namespace Eigen #endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/QR/HouseholderQR.h
.h
14,022
410
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2010 Vincent Lejeune // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_QR_H #define EIGEN_QR_H namespace Eigen { /** \ingroup QR_Module * * * \class HouseholderQR * * \brief Householder QR decomposition of a matrix * * \tparam _MatrixType the type of the matrix of which we are computing the QR decomposition * * This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R * such that * \f[ * \mathbf{A} = \mathbf{Q} \, \mathbf{R} * \f] * by using Householder transformations. Here, \b Q a unitary matrix and \b R an upper triangular matrix. * The result is stored in a compact way compatible with LAPACK. * * Note that no pivoting is performed. This is \b not a rank-revealing decomposition. * If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead. * * This Householder QR decomposition is faster, but less numerically stable and less feature-full than * FullPivHouseholderQR or ColPivHouseholderQR. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::householderQr() */ template<typename _MatrixType> class HouseholderQR { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; // FIXME should be int typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType; typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType; typedef typename internal::plain_row_type<MatrixType>::type RowVectorType; typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via HouseholderQR::compute(const MatrixType&). */ HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa HouseholderQR() */ HouseholderQR(Index rows, Index cols) : m_qr(rows, cols), m_hCoeffs((std::min)(rows,cols)), m_temp(cols), m_isInitialized(false) {} /** \brief Constructs a QR factorization from a given matrix * * This constructor computes the QR factorization of the matrix \a matrix by calling * the method compute(). It is a short cut for: * * \code * HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols()); * qr.compute(matrix); * \endcode * * \sa compute() */ template<typename InputType> explicit HouseholderQR(const EigenBase<InputType>& matrix) : m_qr(matrix.rows(), matrix.cols()), m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), m_temp(matrix.cols()), m_isInitialized(false) { compute(matrix.derived()); } /** \brief Constructs a QR factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when * \c MatrixType is a Eigen::Ref. * * \sa HouseholderQR(const EigenBase&) */ template<typename InputType> explicit HouseholderQR(EigenBase<InputType>& matrix) : m_qr(matrix.derived()), m_hCoeffs((std::min)(matrix.rows(),matrix.cols())), m_temp(matrix.cols()), m_isInitialized(false) { computeInPlace(); } /** This method finds a solution x to the equation Ax=b, where A is the matrix of which * *this is the QR decomposition, if any exists. * * \param b the right-hand-side of the equation to solve. * * \returns a solution. * * \note_about_checking_solutions * * \note_about_arbitrary_choice_of_solution * * Example: \include HouseholderQR_solve.cpp * Output: \verbinclude HouseholderQR_solve.out */ template<typename Rhs> inline const Solve<HouseholderQR, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); return Solve<HouseholderQR, Rhs>(*this, b.derived()); } /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations. * * The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object. * Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*: * * Example: \include HouseholderQR_householderQ.cpp * Output: \verbinclude HouseholderQR_householderQ.out */ HouseholderSequenceType householderQ() const { eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()); } /** \returns a reference to the matrix where the Householder QR decomposition is stored * in a LAPACK-compatible way. */ const MatrixType& matrixQR() const { eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); return m_qr; } template<typename InputType> HouseholderQR& compute(const EigenBase<InputType>& matrix) { m_qr = matrix.derived(); computeInPlace(); return *this; } /** \returns the absolute value of the determinant of the matrix of which * *this is the QR decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the QR decomposition has already been computed. * * \note This is only for square matrices. * * \warning a determinant can be very big or small, so for matrices * of large enough dimension, there is a risk of overflow/underflow. * One way to work around that is to use logAbsDeterminant() instead. * * \sa logAbsDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar absDeterminant() const; /** \returns the natural log of the absolute value of the determinant of the matrix of which * *this is the QR decomposition. It has only linear complexity * (that is, O(n) where n is the dimension of the square matrix) * as the QR decomposition has already been computed. * * \note This is only for square matrices. * * \note This method is useful to work around the risk of overflow/underflow that's inherent * to determinant computation. * * \sa absDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar logAbsDeterminant() const; inline Index rows() const { return m_qr.rows(); } inline Index cols() const { return m_qr.cols(); } /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q. * * For advanced uses only. */ const HCoeffsType& hCoeffs() const { return m_hCoeffs; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType &rhs, DstType &dst) const; #endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } void computeInPlace(); MatrixType m_qr; HCoeffsType m_hCoeffs; RowVectorType m_temp; bool m_isInitialized; }; template<typename MatrixType> typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const { using std::abs; eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return abs(m_qr.diagonal().prod()); } template<typename MatrixType> typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const { eigen_assert(m_isInitialized && "HouseholderQR is not initialized."); eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!"); return m_qr.diagonal().cwiseAbs().array().log().sum(); } namespace internal { /** \internal */ template<typename MatrixQR, typename HCoeffs> void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0) { typedef typename MatrixQR::Scalar Scalar; typedef typename MatrixQR::RealScalar RealScalar; Index rows = mat.rows(); Index cols = mat.cols(); Index size = (std::min)(rows,cols); eigen_assert(hCoeffs.size() == size); typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType; TempType tempVector; if(tempData==0) { tempVector.resize(cols); tempData = tempVector.data(); } for(Index k = 0; k < size; ++k) { Index remainingRows = rows - k; Index remainingCols = cols - k - 1; RealScalar beta; mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta); mat.coeffRef(k,k) = beta; // apply H to remaining part of m_qr from the left mat.bottomRightCorner(remainingRows, remainingCols) .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1); } } /** \internal */ template<typename MatrixQR, typename HCoeffs, typename MatrixQRScalar = typename MatrixQR::Scalar, bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)> struct householder_qr_inplace_blocked { // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index maxBlockSize=32, typename MatrixQR::Scalar* tempData = 0) { typedef typename MatrixQR::Scalar Scalar; typedef Block<MatrixQR,Dynamic,Dynamic> BlockType; Index rows = mat.rows(); Index cols = mat.cols(); Index size = (std::min)(rows, cols); typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType; TempType tempVector; if(tempData==0) { tempVector.resize(cols); tempData = tempVector.data(); } Index blockSize = (std::min)(maxBlockSize,size); Index k = 0; for (k = 0; k < size; k += blockSize) { Index bs = (std::min)(size-k,blockSize); // actual size of the block Index tcols = cols - k - bs; // trailing columns Index brows = rows-k; // rows of the block // partition the matrix: // A00 | A01 | A02 // mat = A10 | A11 | A12 // A20 | A21 | A22 // and performs the qr dec of [A11^T A12^T]^T // and update [A21^T A22^T]^T using level 3 operations. // Finally, the algorithm continue on A22 BlockType A11_21 = mat.block(k,k,brows,bs); Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs); householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData); if(tcols) { BlockType A21_22 = mat.block(k,k+bs,brows,tcols); apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment, false); // false == backward } } } }; } // end namespace internal #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename _MatrixType> template<typename RhsType, typename DstType> void HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const { const Index rank = (std::min)(rows(), cols()); eigen_assert(rhs.rows() == rows()); typename RhsType::PlainObject c(rhs); // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T c.applyOnTheLeft(householderSequence( m_qr.leftCols(rank), m_hCoeffs.head(rank)).transpose() ); m_qr.topLeftCorner(rank, rank) .template triangularView<Upper>() .solveInPlace(c.topRows(rank)); dst.topRows(rank) = c.topRows(rank); dst.bottomRows(cols()-rank).setZero(); } #endif /** Performs the QR factorization of the given matrix \a matrix. The result of * the factorization is stored into \c *this, and a reference to \c *this * is returned. * * \sa class HouseholderQR, HouseholderQR(const MatrixType&) */ template<typename MatrixType> void HouseholderQR<MatrixType>::computeInPlace() { check_template_parameters(); Index rows = m_qr.rows(); Index cols = m_qr.cols(); Index size = (std::min)(rows,cols); m_hCoeffs.resize(size); m_temp.resize(cols); internal::householder_qr_inplace_blocked<MatrixType, HCoeffsType>::run(m_qr, m_hCoeffs, 48, m_temp.data()); m_isInitialized = true; } /** \return the Householder QR decomposition of \c *this. * * \sa class HouseholderQR */ template<typename Derived> const HouseholderQR<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::householderQr() const { return HouseholderQR<PlainObject>(eval()); } } // end namespace Eigen #endif // EIGEN_QR_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/QR/CompleteOrthogonalDecomposition.h
.h
20,805
563
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Rasmus Munk Larsen <rmlarsen@google.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H #define EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H namespace Eigen { namespace internal { template <typename _MatrixType> struct traits<CompleteOrthogonalDecomposition<_MatrixType> > : traits<_MatrixType> { enum { Flags = 0 }; }; } // end namespace internal /** \ingroup QR_Module * * \class CompleteOrthogonalDecomposition * * \brief Complete orthogonal decomposition (COD) of a matrix. * * \param MatrixType the type of the matrix of which we are computing the COD. * * This class performs a rank-revealing complete orthogonal decomposition of a * matrix \b A into matrices \b P, \b Q, \b T, and \b Z such that * \f[ * \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, * \begin{bmatrix} \mathbf{T} & \mathbf{0} \\ * \mathbf{0} & \mathbf{0} \end{bmatrix} \, \mathbf{Z} * \f] * by using Householder transformations. Here, \b P is a permutation matrix, * \b Q and \b Z are unitary matrices and \b T an upper triangular matrix of * size rank-by-rank. \b A may be rank deficient. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::completeOrthogonalDecomposition() */ template <typename _MatrixType> class CompleteOrthogonalDecomposition { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType; typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType; typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType; typedef typename internal::plain_row_type<MatrixType>::type RowVectorType; typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType; typedef HouseholderSequence< MatrixType, typename internal::remove_all< typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType; typedef typename MatrixType::PlainObject PlainObject; private: typedef typename PermutationType::Index PermIndexType; public: /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via * \c CompleteOrthogonalDecomposition::compute(const* MatrixType&). */ CompleteOrthogonalDecomposition() : m_cpqr(), m_zCoeffs(), m_temp() {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa CompleteOrthogonalDecomposition() */ CompleteOrthogonalDecomposition(Index rows, Index cols) : m_cpqr(rows, cols), m_zCoeffs((std::min)(rows, cols)), m_temp(cols) {} /** \brief Constructs a complete orthogonal decomposition from a given * matrix. * * This constructor computes the complete orthogonal decomposition of the * matrix \a matrix by calling the method compute(). The default * threshold for rank determination will be used. It is a short cut for: * * \code * CompleteOrthogonalDecomposition<MatrixType> cod(matrix.rows(), * matrix.cols()); * cod.setThreshold(Default); * cod.compute(matrix); * \endcode * * \sa compute() */ template <typename InputType> explicit CompleteOrthogonalDecomposition(const EigenBase<InputType>& matrix) : m_cpqr(matrix.rows(), matrix.cols()), m_zCoeffs((std::min)(matrix.rows(), matrix.cols())), m_temp(matrix.cols()) { compute(matrix.derived()); } /** \brief Constructs a complete orthogonal decomposition from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. * * \sa CompleteOrthogonalDecomposition(const EigenBase&) */ template<typename InputType> explicit CompleteOrthogonalDecomposition(EigenBase<InputType>& matrix) : m_cpqr(matrix.derived()), m_zCoeffs((std::min)(matrix.rows(), matrix.cols())), m_temp(matrix.cols()) { computeInPlace(); } /** This method computes the minimum-norm solution X to a least squares * problem \f[\mathrm{minimize} \|A X - B\|, \f] where \b A is the matrix of * which \c *this is the complete orthogonal decomposition. * * \param b the right-hand sides of the problem to solve. * * \returns a solution. * */ template <typename Rhs> inline const Solve<CompleteOrthogonalDecomposition, Rhs> solve( const MatrixBase<Rhs>& b) const { eigen_assert(m_cpqr.m_isInitialized && "CompleteOrthogonalDecomposition is not initialized."); return Solve<CompleteOrthogonalDecomposition, Rhs>(*this, b.derived()); } HouseholderSequenceType householderQ(void) const; HouseholderSequenceType matrixQ(void) const { return m_cpqr.householderQ(); } /** \returns the matrix \b Z. */ MatrixType matrixZ() const { MatrixType Z = MatrixType::Identity(m_cpqr.cols(), m_cpqr.cols()); applyZAdjointOnTheLeftInPlace(Z); return Z.adjoint(); } /** \returns a reference to the matrix where the complete orthogonal * decomposition is stored */ const MatrixType& matrixQTZ() const { return m_cpqr.matrixQR(); } /** \returns a reference to the matrix where the complete orthogonal * decomposition is stored. * \warning The strict lower part and \code cols() - rank() \endcode right * columns of this matrix contains internal values. * Only the upper triangular part should be referenced. To get it, use * \code matrixT().template triangularView<Upper>() \endcode * For rank-deficient matrices, use * \code * matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>() * \endcode */ const MatrixType& matrixT() const { return m_cpqr.matrixQR(); } template <typename InputType> CompleteOrthogonalDecomposition& compute(const EigenBase<InputType>& matrix) { // Compute the column pivoted QR factorization A P = Q R. m_cpqr.compute(matrix); computeInPlace(); return *this; } /** \returns a const reference to the column permutation matrix */ const PermutationType& colsPermutation() const { return m_cpqr.colsPermutation(); } /** \returns the absolute value of the determinant of the matrix of which * *this is the complete orthogonal decomposition. It has only linear * complexity (that is, O(n) where n is the dimension of the square matrix) * as the complete orthogonal decomposition has already been computed. * * \note This is only for square matrices. * * \warning a determinant can be very big or small, so for matrices * of large enough dimension, there is a risk of overflow/underflow. * One way to work around that is to use logAbsDeterminant() instead. * * \sa logAbsDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar absDeterminant() const; /** \returns the natural log of the absolute value of the determinant of the * matrix of which *this is the complete orthogonal decomposition. It has * only linear complexity (that is, O(n) where n is the dimension of the * square matrix) as the complete orthogonal decomposition has already been * computed. * * \note This is only for square matrices. * * \note This method is useful to work around the risk of overflow/underflow * that's inherent to determinant computation. * * \sa absDeterminant(), MatrixBase::determinant() */ typename MatrixType::RealScalar logAbsDeterminant() const; /** \returns the rank of the matrix of which *this is the complete orthogonal * decomposition. * * \note This method has to determine which pivots should be considered * nonzero. For that, it uses the threshold value that you can control by * calling setThreshold(const RealScalar&). */ inline Index rank() const { return m_cpqr.rank(); } /** \returns the dimension of the kernel of the matrix of which *this is the * complete orthogonal decomposition. * * \note This method has to determine which pivots should be considered * nonzero. For that, it uses the threshold value that you can control by * calling setThreshold(const RealScalar&). */ inline Index dimensionOfKernel() const { return m_cpqr.dimensionOfKernel(); } /** \returns true if the matrix of which *this is the decomposition represents * an injective linear map, i.e. has trivial kernel; false otherwise. * * \note This method has to determine which pivots should be considered * nonzero. For that, it uses the threshold value that you can control by * calling setThreshold(const RealScalar&). */ inline bool isInjective() const { return m_cpqr.isInjective(); } /** \returns true if the matrix of which *this is the decomposition represents * a surjective linear map; false otherwise. * * \note This method has to determine which pivots should be considered * nonzero. For that, it uses the threshold value that you can control by * calling setThreshold(const RealScalar&). */ inline bool isSurjective() const { return m_cpqr.isSurjective(); } /** \returns true if the matrix of which *this is the complete orthogonal * decomposition is invertible. * * \note This method has to determine which pivots should be considered * nonzero. For that, it uses the threshold value that you can control by * calling setThreshold(const RealScalar&). */ inline bool isInvertible() const { return m_cpqr.isInvertible(); } /** \returns the pseudo-inverse of the matrix of which *this is the complete * orthogonal decomposition. * \warning: Do not compute \c this->pseudoInverse()*rhs to solve a linear systems. * It is more efficient and numerically stable to call \c this->solve(rhs). */ inline const Inverse<CompleteOrthogonalDecomposition> pseudoInverse() const { return Inverse<CompleteOrthogonalDecomposition>(*this); } inline Index rows() const { return m_cpqr.rows(); } inline Index cols() const { return m_cpqr.cols(); } /** \returns a const reference to the vector of Householder coefficients used * to represent the factor \c Q. * * For advanced uses only. */ inline const HCoeffsType& hCoeffs() const { return m_cpqr.hCoeffs(); } /** \returns a const reference to the vector of Householder coefficients * used to represent the factor \c Z. * * For advanced uses only. */ const HCoeffsType& zCoeffs() const { return m_zCoeffs; } /** Allows to prescribe a threshold to be used by certain methods, such as * rank(), who need to determine when pivots are to be considered nonzero. * Most be called before calling compute(). * * When it needs to get the threshold value, Eigen calls threshold(). By * default, this uses a formula to automatically determine a reasonable * threshold. Once you have called the present method * setThreshold(const RealScalar&), your value is used instead. * * \param threshold The new value to use as the threshold. * * A pivot will be considered nonzero if its absolute value is strictly * greater than * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$ * where maxpivot is the biggest pivot. * * If you want to come back to the default behavior, call * setThreshold(Default_t) */ CompleteOrthogonalDecomposition& setThreshold(const RealScalar& threshold) { m_cpqr.setThreshold(threshold); return *this; } /** Allows to come back to the default behavior, letting Eigen use its default * formula for determining the threshold. * * You should pass the special object Eigen::Default as parameter here. * \code qr.setThreshold(Eigen::Default); \endcode * * See the documentation of setThreshold(const RealScalar&). */ CompleteOrthogonalDecomposition& setThreshold(Default_t) { m_cpqr.setThreshold(Default); return *this; } /** Returns the threshold that will be used by certain methods such as rank(). * * See the documentation of setThreshold(const RealScalar&). */ RealScalar threshold() const { return m_cpqr.threshold(); } /** \returns the number of nonzero pivots in the complete orthogonal * decomposition. Here nonzero is meant in the exact sense, not in a * fuzzy sense. So that notion isn't really intrinsically interesting, * but it is still useful when implementing algorithms. * * \sa rank() */ inline Index nonzeroPivots() const { return m_cpqr.nonzeroPivots(); } /** \returns the absolute value of the biggest pivot, i.e. the biggest * diagonal coefficient of R. */ inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); } /** \brief Reports whether the complete orthogonal decomposition was * succesful. * * \note This function always returns \c Success. It is provided for * compatibility * with other factorization routines. * \returns \c Success */ ComputationInfo info() const { eigen_assert(m_cpqr.m_isInitialized && "Decomposition is not initialized."); return Success; } #ifndef EIGEN_PARSED_BY_DOXYGEN template <typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType& rhs, DstType& dst) const; #endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } void computeInPlace(); /** Overwrites \b rhs with \f$ \mathbf{Z}^* * \mathbf{rhs} \f$. */ template <typename Rhs> void applyZAdjointOnTheLeftInPlace(Rhs& rhs) const; ColPivHouseholderQR<MatrixType> m_cpqr; HCoeffsType m_zCoeffs; RowVectorType m_temp; }; template <typename MatrixType> typename MatrixType::RealScalar CompleteOrthogonalDecomposition<MatrixType>::absDeterminant() const { return m_cpqr.absDeterminant(); } template <typename MatrixType> typename MatrixType::RealScalar CompleteOrthogonalDecomposition<MatrixType>::logAbsDeterminant() const { return m_cpqr.logAbsDeterminant(); } /** Performs the complete orthogonal decomposition of the given matrix \a * matrix. The result of the factorization is stored into \c *this, and a * reference to \c *this is returned. * * \sa class CompleteOrthogonalDecomposition, * CompleteOrthogonalDecomposition(const MatrixType&) */ template <typename MatrixType> void CompleteOrthogonalDecomposition<MatrixType>::computeInPlace() { check_template_parameters(); // the column permutation is stored as int indices, so just to be sure: eigen_assert(m_cpqr.cols() <= NumTraits<int>::highest()); const Index rank = m_cpqr.rank(); const Index cols = m_cpqr.cols(); const Index rows = m_cpqr.rows(); m_zCoeffs.resize((std::min)(rows, cols)); m_temp.resize(cols); if (rank < cols) { // We have reduced the (permuted) matrix to the form // [R11 R12] // [ 0 R22] // where R11 is r-by-r (r = rank) upper triangular, R12 is // r-by-(n-r), and R22 is empty or the norm of R22 is negligible. // We now compute the complete orthogonal decomposition by applying // Householder transformations from the right to the upper trapezoidal // matrix X = [R11 R12] to zero out R12 and obtain the factorization // [R11 R12] = [T11 0] * Z, where T11 is r-by-r upper triangular and // Z = Z(0) * Z(1) ... Z(r-1) is an n-by-n orthogonal matrix. // We store the data representing Z in R12 and m_zCoeffs. for (Index k = rank - 1; k >= 0; --k) { if (k != rank - 1) { // Given the API for Householder reflectors, it is more convenient if // we swap the leading parts of columns k and r-1 (zero-based) to form // the matrix X_k = [X(0:k, k), X(0:k, r:n)] m_cpqr.m_qr.col(k).head(k + 1).swap( m_cpqr.m_qr.col(rank - 1).head(k + 1)); } // Construct Householder reflector Z(k) to zero out the last row of X_k, // i.e. choose Z(k) such that // [X(k, k), X(k, r:n)] * Z(k) = [beta, 0, .., 0]. RealScalar beta; m_cpqr.m_qr.row(k) .tail(cols - rank + 1) .makeHouseholderInPlace(m_zCoeffs(k), beta); m_cpqr.m_qr(k, rank - 1) = beta; if (k > 0) { // Apply Z(k) to the first k rows of X_k m_cpqr.m_qr.topRightCorner(k, cols - rank + 1) .applyHouseholderOnTheRight( m_cpqr.m_qr.row(k).tail(cols - rank).transpose(), m_zCoeffs(k), &m_temp(0)); } if (k != rank - 1) { // Swap X(0:k,k) back to its proper location. m_cpqr.m_qr.col(k).head(k + 1).swap( m_cpqr.m_qr.col(rank - 1).head(k + 1)); } } } } template <typename MatrixType> template <typename Rhs> void CompleteOrthogonalDecomposition<MatrixType>::applyZAdjointOnTheLeftInPlace( Rhs& rhs) const { const Index cols = this->cols(); const Index nrhs = rhs.cols(); const Index rank = this->rank(); Matrix<typename MatrixType::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs)); for (Index k = 0; k < rank; ++k) { if (k != rank - 1) { rhs.row(k).swap(rhs.row(rank - 1)); } rhs.middleRows(rank - 1, cols - rank + 1) .applyHouseholderOnTheLeft( matrixQTZ().row(k).tail(cols - rank).adjoint(), zCoeffs()(k), &temp(0)); if (k != rank - 1) { rhs.row(k).swap(rhs.row(rank - 1)); } } } #ifndef EIGEN_PARSED_BY_DOXYGEN template <typename _MatrixType> template <typename RhsType, typename DstType> void CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl( const RhsType& rhs, DstType& dst) const { eigen_assert(rhs.rows() == this->rows()); const Index rank = this->rank(); if (rank == 0) { dst.setZero(); return; } // Compute c = Q^* * rhs // Note that the matrix Q = H_0^* H_1^*... so its inverse is // Q^* = (H_0 H_1 ...)^T typename RhsType::PlainObject c(rhs); c.applyOnTheLeft( householderSequence(matrixQTZ(), hCoeffs()).setLength(rank).transpose()); // Solve T z = c(1:rank, :) dst.topRows(rank) = matrixT() .topLeftCorner(rank, rank) .template triangularView<Upper>() .solve(c.topRows(rank)); const Index cols = this->cols(); if (rank < cols) { // Compute y = Z^* * [ z ] // [ 0 ] dst.bottomRows(cols - rank).setZero(); applyZAdjointOnTheLeftInPlace(dst); } // Undo permutation to get x = P^{-1} * y. dst = colsPermutation() * dst; } #endif namespace internal { template<typename DstXprType, typename MatrixType> struct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename CompleteOrthogonalDecomposition<MatrixType>::Scalar>, Dense2Dense> { typedef CompleteOrthogonalDecomposition<MatrixType> CodType; typedef Inverse<CodType> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename CodType::Scalar> &) { dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows())); } }; } // end namespace internal /** \returns the matrix Q as a sequence of householder transformations */ template <typename MatrixType> typename CompleteOrthogonalDecomposition<MatrixType>::HouseholderSequenceType CompleteOrthogonalDecomposition<MatrixType>::householderQ() const { return m_cpqr.householderQ(); } /** \return the complete orthogonal decomposition of \c *this. * * \sa class CompleteOrthogonalDecomposition */ template <typename Derived> const CompleteOrthogonalDecomposition<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::completeOrthogonalDecomposition() const { return CompleteOrthogonalDecomposition<PlainObject>(eval()); } } // end namespace Eigen #endif // EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/QR/HouseholderQR_LAPACKE.h
.h
2,993
69
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to LAPACKe * Householder QR decomposition of a matrix w/o pivoting based on * LAPACKE_?geqrf function. ******************************************************************************** */ #ifndef EIGEN_QR_LAPACKE_H #define EIGEN_QR_LAPACKE_H namespace Eigen { namespace internal { /** \internal Specialization for the data types supported by LAPACKe */ #define EIGEN_LAPACKE_QR_NOPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \ template<typename MatrixQR, typename HCoeffs> \ struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \ { \ static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \ typename MatrixQR::Scalar* = 0) \ { \ lapack_int m = (lapack_int) mat.rows(); \ lapack_int n = (lapack_int) mat.cols(); \ lapack_int lda = (lapack_int) mat.outerStride(); \ lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ LAPACKE_##LAPACKE_PREFIX##geqrf( matrix_order, m, n, (LAPACKE_TYPE*)mat.data(), lda, (LAPACKE_TYPE*)hCoeffs.data()); \ hCoeffs.adjointInPlace(); \ } \ }; EIGEN_LAPACKE_QR_NOPIV(double, double, d) EIGEN_LAPACKE_QR_NOPIV(float, float, s) EIGEN_LAPACKE_QR_NOPIV(dcomplex, lapack_complex_double, z) EIGEN_LAPACKE_QR_NOPIV(scomplex, lapack_complex_float, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_QR_LAPACKE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/StlSupport/StdDeque.h
.h
5,279
129
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDDEQUE_H #define EIGEN_STDDEQUE_H #include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of * std::deque such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \ namespace std \ { \ template<> \ class deque<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \ public: \ typedef __VA_ARGS__ value_type; \ typedef deque_base::allocator_type allocator_type; \ typedef deque_base::size_type size_type; \ typedef deque_base::iterator iterator; \ explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \ template<typename InputIterator> \ deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \ deque(const deque& c) : deque_base(c) {} \ explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \ deque(iterator start, iterator end) : deque_base(start, end) {} \ deque& operator=(const deque& x) { \ deque_base::operator=(x); \ return *this; \ } \ }; \ } // check whether we really need the std::deque specialization #if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_DEQUE) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::deque::resize(size_type,const T&). */ namespace std { #define EIGEN_STD_DEQUE_SPECIALIZATION_BODY \ public: \ typedef T value_type; \ typedef typename deque_base::allocator_type allocator_type; \ typedef typename deque_base::size_type size_type; \ typedef typename deque_base::iterator iterator; \ typedef typename deque_base::const_iterator const_iterator; \ explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \ template<typename InputIterator> \ deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ : deque_base(first, last, a) {} \ deque(const deque& c) : deque_base(c) {} \ explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \ deque(iterator start, iterator end) : deque_base(start, end) {} \ deque& operator=(const deque& x) { \ deque_base::operator=(x); \ return *this; \ } template<typename T> class deque<T,EIGEN_ALIGNED_ALLOCATOR<T> > : public deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > { typedef deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > deque_base; EIGEN_STD_DEQUE_SPECIALIZATION_BODY void resize(size_type new_size) { resize(new_size, T()); } #if defined(_DEQUE_) // workaround MSVC std::deque implementation void resize(size_type new_size, const value_type& x) { if (deque_base::size() < new_size) deque_base::_Insert_n(deque_base::end(), new_size - deque_base::size(), x); else if (new_size < deque_base::size()) deque_base::erase(deque_base::begin() + new_size, deque_base::end()); } void push_back(const value_type& x) { deque_base::push_back(x); } void push_front(const value_type& x) { deque_base::push_front(x); } using deque_base::insert; iterator insert(const_iterator position, const value_type& x) { return deque_base::insert(position,x); } void insert(const_iterator position, size_type new_size, const value_type& x) { deque_base::insert(position, new_size, x); } #elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2) && !EIGEN_GNUC_AT_LEAST(10, 1) // workaround GCC std::deque implementation // GCC 10.1 doesn't let us access _Deque_impl _M_impl anymore and we have to // fall-back to the default case void resize(size_type new_size, const value_type& x) { if (new_size < deque_base::size()) deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size); else deque_base::insert(deque_base::end(), new_size - deque_base::size(), x); } #else // either non-GCC or GCC between 4.1 and 10.1 // default implementation which should always work. void resize(size_type new_size, const value_type& x) { if (new_size < deque_base::size()) deque_base::erase(deque_base::begin() + new_size, deque_base::end()); else if (new_size > deque_base::size()) deque_base::insert(deque_base::end(), new_size - deque_base::size(), x); } #endif }; } #endif // check whether specialization is actually required #endif // EIGEN_STDDEQUE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/StlSupport/StdVector.h
.h
5,330
132
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDVECTOR_H #define EIGEN_STDVECTOR_H #include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of * std::vector such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \ namespace std \ { \ template<> \ class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \ public: \ typedef __VA_ARGS__ value_type; \ typedef vector_base::allocator_type allocator_type; \ typedef vector_base::size_type size_type; \ typedef vector_base::iterator iterator; \ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ template<typename InputIterator> \ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \ vector(const vector& c) : vector_base(c) {} \ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ vector(iterator start, iterator end) : vector_base(start, end) {} \ vector& operator=(const vector& x) { \ vector_base::operator=(x); \ return *this; \ } \ }; \ } // Don't specialize if containers are implemented according to C++11 #if !EIGEN_HAS_CXX11_CONTAINERS namespace std { #define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \ public: \ typedef T value_type; \ typedef typename vector_base::allocator_type allocator_type; \ typedef typename vector_base::size_type size_type; \ typedef typename vector_base::iterator iterator; \ typedef typename vector_base::const_iterator const_iterator; \ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ template<typename InputIterator> \ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ : vector_base(first, last, a) {} \ vector(const vector& c) : vector_base(c) {} \ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ vector(iterator start, iterator end) : vector_base(start, end) {} \ vector& operator=(const vector& x) { \ vector_base::operator=(x); \ return *this; \ } template<typename T> class vector<T,EIGEN_ALIGNED_ALLOCATOR<T> > : public vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > { typedef vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > vector_base; EIGEN_STD_VECTOR_SPECIALIZATION_BODY void resize(size_type new_size) { resize(new_size, T()); } #if defined(_VECTOR_) // workaround MSVC std::vector implementation void resize(size_type new_size, const value_type& x) { if (vector_base::size() < new_size) vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x); else if (new_size < vector_base::size()) vector_base::erase(vector_base::begin() + new_size, vector_base::end()); } void push_back(const value_type& x) { vector_base::push_back(x); } using vector_base::insert; iterator insert(const_iterator position, const value_type& x) { return vector_base::insert(position,x); } void insert(const_iterator position, size_type new_size, const value_type& x) { vector_base::insert(position, new_size, x); } #elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&). * However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */ void resize(size_type new_size, const value_type& x) { vector_base::resize(new_size,x); } #elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2) // workaround GCC std::vector implementation void resize(size_type new_size, const value_type& x) { if (new_size < vector_base::size()) vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size); else vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); } #else // either GCC 4.1 or non-GCC // default implementation which should always work. void resize(size_type new_size, const value_type& x) { if (new_size < vector_base::size()) vector_base::erase(vector_base::begin() + new_size, vector_base::end()); else if (new_size > vector_base::size()) vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); } #endif }; } #endif // !EIGEN_HAS_CXX11_CONTAINERS #endif // EIGEN_STDVECTOR_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/StlSupport/details.h
.h
2,809
85
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STL_DETAILS_H #define EIGEN_STL_DETAILS_H #ifndef EIGEN_ALIGNED_ALLOCATOR #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator #endif namespace Eigen { // This one is needed to prevent reimplementing the whole std::vector. template <class T> class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR<T> { public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; template<class U> struct rebind { typedef aligned_allocator_indirection<U> other; }; aligned_allocator_indirection() {} aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR<T>() {} aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<T>& ) {} template<class U> aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) {} template<class U> aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<U>& ) {} ~aligned_allocator_indirection() {} }; #if EIGEN_COMP_MSVC // sometimes, MSVC detects, at compile time, that the argument x // in std::vector::resize(size_t s,T x) won't be aligned and generate an error // even if this function is never called. Whence this little wrapper. #define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \ typename Eigen::internal::conditional< \ Eigen::internal::is_arithmetic<T>::value, \ T, \ Eigen::internal::workaround_msvc_stl_support<T> \ >::type namespace internal { template<typename T> struct workaround_msvc_stl_support : public T { inline workaround_msvc_stl_support() : T() {} inline workaround_msvc_stl_support(const T& other) : T(other) {} inline operator T& () { return *static_cast<T*>(this); } inline operator const T& () const { return *static_cast<const T*>(this); } template<typename OtherT> inline T& operator=(const OtherT& other) { T::operator=(other); return *this; } inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other) { T::operator=(other); return *this; } }; } #else #define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T #endif } #endif // EIGEN_STL_DETAILS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/StlSupport/StdList.h
.h
4,147
107
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDLIST_H #define EIGEN_STDLIST_H #include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of * std::list such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \ namespace std \ { \ template<> \ class list<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \ public: \ typedef __VA_ARGS__ value_type; \ typedef list_base::allocator_type allocator_type; \ typedef list_base::size_type size_type; \ typedef list_base::iterator iterator; \ explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \ template<typename InputIterator> \ list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \ list(const list& c) : list_base(c) {} \ explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \ list(iterator start, iterator end) : list_base(start, end) {} \ list& operator=(const list& x) { \ list_base::operator=(x); \ return *this; \ } \ }; \ } // check whether we really need the std::list specialization #if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_LIST) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::list::resize(size_type,const T&). */ namespace std { #define EIGEN_STD_LIST_SPECIALIZATION_BODY \ public: \ typedef T value_type; \ typedef typename list_base::allocator_type allocator_type; \ typedef typename list_base::size_type size_type; \ typedef typename list_base::iterator iterator; \ typedef typename list_base::const_iterator const_iterator; \ explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \ template<typename InputIterator> \ list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ : list_base(first, last, a) {} \ list(const list& c) : list_base(c) {} \ explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \ list(iterator start, iterator end) : list_base(start, end) {} \ list& operator=(const list& x) { \ list_base::operator=(x); \ return *this; \ } template<typename T> class list<T,EIGEN_ALIGNED_ALLOCATOR<T> > : public list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > { typedef list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > list_base; EIGEN_STD_LIST_SPECIALIZATION_BODY void resize(size_type new_size) { resize(new_size, T()); } void resize(size_type new_size, const value_type& x) { if (list_base::size() < new_size) list_base::insert(list_base::end(), new_size - list_base::size(), x); else while (new_size < list_base::size()) list_base::pop_back(); } #if defined(_LIST_) // workaround MSVC std::list implementation void push_back(const value_type& x) { list_base::push_back(x); } using list_base::insert; iterator insert(const_iterator position, const value_type& x) { return list_base::insert(position,x); } void insert(const_iterator position, size_type new_size, const value_type& x) { list_base::insert(position, new_size, x); } #endif }; } #endif // check whether specialization is actually required #endif // EIGEN_STDLIST_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/SuperLUSupport/SuperLUSupport.h
.h
34,345
1,028
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SUPERLUSUPPORT_H #define EIGEN_SUPERLUSUPPORT_H namespace Eigen { #if defined(SUPERLU_MAJOR_VERSION) && (SUPERLU_MAJOR_VERSION >= 5) #define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \ extern "C" { \ extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ void *, int, SuperMatrix *, SuperMatrix *, \ FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \ GlobalLU_t *, mem_usage_t *, SuperLUStat_t *, int *); \ } \ inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \ int *perm_c, int *perm_r, int *etree, char *equed, \ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ SuperMatrix *U, void *work, int lwork, \ SuperMatrix *B, SuperMatrix *X, \ FLOATTYPE *recip_pivot_growth, \ FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \ SuperLUStat_t *stats, int *info, KEYTYPE) { \ mem_usage_t mem_usage; \ GlobalLU_t gLU; \ PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ U, work, lwork, B, X, recip_pivot_growth, rcond, \ ferr, berr, &gLU, &mem_usage, stats, info); \ return mem_usage.for_lu; /* bytes used by the factor storage */ \ } #else // version < 5.0 #define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \ extern "C" { \ extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ void *, int, SuperMatrix *, SuperMatrix *, \ FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \ mem_usage_t *, SuperLUStat_t *, int *); \ } \ inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \ int *perm_c, int *perm_r, int *etree, char *equed, \ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ SuperMatrix *U, void *work, int lwork, \ SuperMatrix *B, SuperMatrix *X, \ FLOATTYPE *recip_pivot_growth, \ FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \ SuperLUStat_t *stats, int *info, KEYTYPE) { \ mem_usage_t mem_usage; \ PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ U, work, lwork, B, X, recip_pivot_growth, rcond, \ ferr, berr, &mem_usage, stats, info); \ return mem_usage.for_lu; /* bytes used by the factor storage */ \ } #endif DECL_GSSVX(s,float,float) DECL_GSSVX(c,float,std::complex<float>) DECL_GSSVX(d,double,double) DECL_GSSVX(z,double,std::complex<double>) #ifdef MILU_ALPHA #define EIGEN_SUPERLU_HAS_ILU #endif #ifdef EIGEN_SUPERLU_HAS_ILU // similarly for the incomplete factorization using gsisx #define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE) \ extern "C" { \ extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \ char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \ void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *, \ mem_usage_t *, SuperLUStat_t *, int *); \ } \ inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \ int *perm_c, int *perm_r, int *etree, char *equed, \ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ SuperMatrix *U, void *work, int lwork, \ SuperMatrix *B, SuperMatrix *X, \ FLOATTYPE *recip_pivot_growth, \ FLOATTYPE *rcond, \ SuperLUStat_t *stats, int *info, KEYTYPE) { \ mem_usage_t mem_usage; \ PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L, \ U, work, lwork, B, X, recip_pivot_growth, rcond, \ &mem_usage, stats, info); \ return mem_usage.for_lu; /* bytes used by the factor storage */ \ } DECL_GSISX(s,float,float) DECL_GSISX(c,float,std::complex<float>) DECL_GSISX(d,double,double) DECL_GSISX(z,double,std::complex<double>) #endif template<typename MatrixType> struct SluMatrixMapHelper; /** \internal * * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices * and dense matrices. Supernodal and other fancy format are not supported by this wrapper. * * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure. */ struct SluMatrix : SuperMatrix { SluMatrix() { Store = &storage; } SluMatrix(const SluMatrix& other) : SuperMatrix(other) { Store = &storage; storage = other.storage; } SluMatrix& operator=(const SluMatrix& other) { SuperMatrix::operator=(static_cast<const SuperMatrix&>(other)); Store = &storage; storage = other.storage; return *this; } struct { union {int nnz;int lda;}; void *values; int *innerInd; int *outerInd; } storage; void setStorageType(Stype_t t) { Stype = t; if (t==SLU_NC || t==SLU_NR || t==SLU_DN) Store = &storage; else { eigen_assert(false && "storage type not supported"); Store = 0; } } template<typename Scalar> void setScalarType() { if (internal::is_same<Scalar,float>::value) Dtype = SLU_S; else if (internal::is_same<Scalar,double>::value) Dtype = SLU_D; else if (internal::is_same<Scalar,std::complex<float> >::value) Dtype = SLU_C; else if (internal::is_same<Scalar,std::complex<double> >::value) Dtype = SLU_Z; else { eigen_assert(false && "Scalar type not supported by SuperLU"); } } template<typename MatrixType> static SluMatrix Map(MatrixBase<MatrixType>& _mat) { MatrixType& mat(_mat.derived()); eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU"); SluMatrix res; res.setStorageType(SLU_DN); res.setScalarType<typename MatrixType::Scalar>(); res.Mtype = SLU_GE; res.nrow = internal::convert_index<int>(mat.rows()); res.ncol = internal::convert_index<int>(mat.cols()); res.storage.lda = internal::convert_index<int>(MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride()); res.storage.values = (void*)(mat.data()); return res; } template<typename MatrixType> static SluMatrix Map(SparseMatrixBase<MatrixType>& a_mat) { MatrixType &mat(a_mat.derived()); SluMatrix res; if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) { res.setStorageType(SLU_NR); res.nrow = internal::convert_index<int>(mat.cols()); res.ncol = internal::convert_index<int>(mat.rows()); } else { res.setStorageType(SLU_NC); res.nrow = internal::convert_index<int>(mat.rows()); res.ncol = internal::convert_index<int>(mat.cols()); } res.Mtype = SLU_GE; res.storage.nnz = internal::convert_index<int>(mat.nonZeros()); res.storage.values = mat.valuePtr(); res.storage.innerInd = mat.innerIndexPtr(); res.storage.outerInd = mat.outerIndexPtr(); res.setScalarType<typename MatrixType::Scalar>(); // FIXME the following is not very accurate if (MatrixType::Flags & Upper) res.Mtype = SLU_TRU; if (MatrixType::Flags & Lower) res.Mtype = SLU_TRL; eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU"); return res; } }; template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols> struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> > { typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType; static void run(MatrixType& mat, SluMatrix& res) { eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU"); res.setStorageType(SLU_DN); res.setScalarType<Scalar>(); res.Mtype = SLU_GE; res.nrow = mat.rows(); res.ncol = mat.cols(); res.storage.lda = mat.outerStride(); res.storage.values = mat.data(); } }; template<typename Derived> struct SluMatrixMapHelper<SparseMatrixBase<Derived> > { typedef Derived MatrixType; static void run(MatrixType& mat, SluMatrix& res) { if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) { res.setStorageType(SLU_NR); res.nrow = mat.cols(); res.ncol = mat.rows(); } else { res.setStorageType(SLU_NC); res.nrow = mat.rows(); res.ncol = mat.cols(); } res.Mtype = SLU_GE; res.storage.nnz = mat.nonZeros(); res.storage.values = mat.valuePtr(); res.storage.innerInd = mat.innerIndexPtr(); res.storage.outerInd = mat.outerIndexPtr(); res.setScalarType<typename MatrixType::Scalar>(); // FIXME the following is not very accurate if (MatrixType::Flags & Upper) res.Mtype = SLU_TRU; if (MatrixType::Flags & Lower) res.Mtype = SLU_TRL; eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU"); } }; namespace internal { template<typename MatrixType> SluMatrix asSluMatrix(MatrixType& mat) { return SluMatrix::Map(mat); } /** View a Super LU matrix as an Eigen expression */ template<typename Scalar, int Flags, typename Index> MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat) { eigen_assert(((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR) || ((Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC)); Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow; return MappedSparseMatrix<Scalar,Flags,Index>( sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize], sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) ); } } // end namespace internal /** \ingroup SuperLUSupport_Module * \class SuperLUBase * \brief The base class for the direct and incomplete LU factorization of SuperLU */ template<typename _MatrixType, typename Derived> class SuperLUBase : public SparseSolverBase<Derived> { protected: typedef SparseSolverBase<Derived> Base; using Base::derived; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix<Scalar,Dynamic,1> Vector; typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType; typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType; typedef Map<PermutationMatrix<Dynamic,Dynamic,int> > PermutationMap; typedef SparseMatrix<Scalar> LUMatrixType; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: SuperLUBase() {} ~SuperLUBase() { clearFactors(); } inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */ inline superlu_options_t& options() { return m_sluOptions; } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** Computes the sparse Cholesky decomposition of \a matrix */ void compute(const MatrixType& matrix) { derived().analyzePattern(matrix); derived().factorize(matrix); } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& /*matrix*/) { m_isInitialized = true; m_info = Success; m_analysisIsOk = true; m_factorizationIsOk = false; } template<typename Stream> void dumpMemory(Stream& /*s*/) {} protected: void initFactorization(const MatrixType& a) { set_default_options(&this->m_sluOptions); const Index size = a.rows(); m_matrix = a; m_sluA = internal::asSluMatrix(m_matrix); clearFactors(); m_p.resize(size); m_q.resize(size); m_sluRscale.resize(size); m_sluCscale.resize(size); m_sluEtree.resize(size); // set empty B and X m_sluB.setStorageType(SLU_DN); m_sluB.setScalarType<Scalar>(); m_sluB.Mtype = SLU_GE; m_sluB.storage.values = 0; m_sluB.nrow = 0; m_sluB.ncol = 0; m_sluB.storage.lda = internal::convert_index<int>(size); m_sluX = m_sluB; m_extractedDataAreDirty = true; } void init() { m_info = InvalidInput; m_isInitialized = false; m_sluL.Store = 0; m_sluU.Store = 0; } void extractData() const; void clearFactors() { if(m_sluL.Store) Destroy_SuperNode_Matrix(&m_sluL); if(m_sluU.Store) Destroy_CompCol_Matrix(&m_sluU); m_sluL.Store = 0; m_sluU.Store = 0; memset(&m_sluL,0,sizeof m_sluL); memset(&m_sluU,0,sizeof m_sluU); } // cached data to reduce reallocation, etc. mutable LUMatrixType m_l; mutable LUMatrixType m_u; mutable IntColVectorType m_p; mutable IntRowVectorType m_q; mutable LUMatrixType m_matrix; // copy of the factorized matrix mutable SluMatrix m_sluA; mutable SuperMatrix m_sluL, m_sluU; mutable SluMatrix m_sluB, m_sluX; mutable SuperLUStat_t m_sluStat; mutable superlu_options_t m_sluOptions; mutable std::vector<int> m_sluEtree; mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale; mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr; mutable char m_sluEqued; mutable ComputationInfo m_info; int m_factorizationIsOk; int m_analysisIsOk; mutable bool m_extractedDataAreDirty; private: SuperLUBase(SuperLUBase& ) { } }; /** \ingroup SuperLUSupport_Module * \class SuperLU * \brief A sparse direct LU factorization and solver based on the SuperLU library * * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * * \warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported. * * \implsparsesolverconcept * * \sa \ref TutorialSparseSolverConcept, class SparseLU */ template<typename _MatrixType> class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> > { public: typedef SuperLUBase<_MatrixType,SuperLU> Base; typedef _MatrixType MatrixType; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; typedef typename Base::StorageIndex StorageIndex; typedef typename Base::IntRowVectorType IntRowVectorType; typedef typename Base::IntColVectorType IntColVectorType; typedef typename Base::PermutationMap PermutationMap; typedef typename Base::LUMatrixType LUMatrixType; typedef TriangularView<LUMatrixType, Lower|UnitDiag> LMatrixType; typedef TriangularView<LUMatrixType, Upper> UMatrixType; public: using Base::_solve_impl; SuperLU() : Base() { init(); } explicit SuperLU(const MatrixType& matrix) : Base() { init(); Base::compute(matrix); } ~SuperLU() { } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { m_info = InvalidInput; m_isInitialized = false; Base::analyzePattern(matrix); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& matrix); /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const; inline const LMatrixType& matrixL() const { if (m_extractedDataAreDirty) this->extractData(); return m_l; } inline const UMatrixType& matrixU() const { if (m_extractedDataAreDirty) this->extractData(); return m_u; } inline const IntColVectorType& permutationP() const { if (m_extractedDataAreDirty) this->extractData(); return m_p; } inline const IntRowVectorType& permutationQ() const { if (m_extractedDataAreDirty) this->extractData(); return m_q; } Scalar determinant() const; protected: using Base::m_matrix; using Base::m_sluOptions; using Base::m_sluA; using Base::m_sluB; using Base::m_sluX; using Base::m_p; using Base::m_q; using Base::m_sluEtree; using Base::m_sluEqued; using Base::m_sluRscale; using Base::m_sluCscale; using Base::m_sluL; using Base::m_sluU; using Base::m_sluStat; using Base::m_sluFerr; using Base::m_sluBerr; using Base::m_l; using Base::m_u; using Base::m_analysisIsOk; using Base::m_factorizationIsOk; using Base::m_extractedDataAreDirty; using Base::m_isInitialized; using Base::m_info; void init() { Base::init(); set_default_options(&this->m_sluOptions); m_sluOptions.PrintStat = NO; m_sluOptions.ConditionNumber = NO; m_sluOptions.Trans = NOTRANS; m_sluOptions.ColPerm = COLAMD; } private: SuperLU(SuperLU& ) { } }; template<typename MatrixType> void SuperLU<MatrixType>::factorize(const MatrixType& a) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); if(!m_analysisIsOk) { m_info = InvalidInput; return; } this->initFactorization(a); m_sluOptions.ColPerm = COLAMD; int info = 0; RealScalar recip_pivot_growth, rcond; RealScalar ferr, berr; StatInit(&m_sluStat); SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], &m_sluL, &m_sluU, NULL, 0, &m_sluB, &m_sluX, &recip_pivot_growth, &rcond, &ferr, &berr, &m_sluStat, &info, Scalar()); StatFree(&m_sluStat); m_extractedDataAreDirty = true; // FIXME how to better check for errors ??? m_info = info == 0 ? Success : NumericalIssue; m_factorizationIsOk = true; } template<typename MatrixType> template<typename Rhs,typename Dest> void SuperLU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); const Index size = m_matrix.rows(); const Index rhsCols = b.cols(); eigen_assert(size==b.rows()); m_sluOptions.Trans = NOTRANS; m_sluOptions.Fact = FACTORED; m_sluOptions.IterRefine = NOREFINE; m_sluFerr.resize(rhsCols); m_sluBerr.resize(rhsCols); Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b); Ref<const Matrix<typename Dest::Scalar,Dynamic,Dynamic,ColMajor> > x_ref(x); m_sluB = SluMatrix::Map(b_ref.const_cast_derived()); m_sluX = SluMatrix::Map(x_ref.const_cast_derived()); typename Rhs::PlainObject b_cpy; if(m_sluEqued!='N') { b_cpy = b; m_sluB = SluMatrix::Map(b_cpy.const_cast_derived()); } StatInit(&m_sluStat); int info = 0; RealScalar recip_pivot_growth, rcond; SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], &m_sluL, &m_sluU, NULL, 0, &m_sluB, &m_sluX, &recip_pivot_growth, &rcond, &m_sluFerr[0], &m_sluBerr[0], &m_sluStat, &info, Scalar()); StatFree(&m_sluStat); if(x.derived().data() != x_ref.data()) x = x_ref; m_info = info==0 ? Success : NumericalIssue; } // the code of this extractData() function has been adapted from the SuperLU's Matlab support code, // // Copyright (c) 1994 by Xerox Corporation. All rights reserved. // // THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY // EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. // template<typename MatrixType, typename Derived> void SuperLUBase<MatrixType,Derived>::extractData() const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()"); if (m_extractedDataAreDirty) { int upper; int fsupc, istart, nsupr; int lastl = 0, lastu = 0; SCformat *Lstore = static_cast<SCformat*>(m_sluL.Store); NCformat *Ustore = static_cast<NCformat*>(m_sluU.Store); Scalar *SNptr; const Index size = m_matrix.rows(); m_l.resize(size,size); m_l.resizeNonZeros(Lstore->nnz); m_u.resize(size,size); m_u.resizeNonZeros(Ustore->nnz); int* Lcol = m_l.outerIndexPtr(); int* Lrow = m_l.innerIndexPtr(); Scalar* Lval = m_l.valuePtr(); int* Ucol = m_u.outerIndexPtr(); int* Urow = m_u.innerIndexPtr(); Scalar* Uval = m_u.valuePtr(); Ucol[0] = 0; Ucol[0] = 0; /* for each supernode */ for (int k = 0; k <= Lstore->nsuper; ++k) { fsupc = L_FST_SUPC(k); istart = L_SUB_START(fsupc); nsupr = L_SUB_START(fsupc+1) - istart; upper = 1; /* for each column in the supernode */ for (int j = fsupc; j < L_FST_SUPC(k+1); ++j) { SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)]; /* Extract U */ for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i) { Uval[lastu] = ((Scalar*)Ustore->nzval)[i]; /* Matlab doesn't like explicit zero. */ if (Uval[lastu] != 0.0) Urow[lastu++] = U_SUB(i); } for (int i = 0; i < upper; ++i) { /* upper triangle in the supernode */ Uval[lastu] = SNptr[i]; /* Matlab doesn't like explicit zero. */ if (Uval[lastu] != 0.0) Urow[lastu++] = L_SUB(istart+i); } Ucol[j+1] = lastu; /* Extract L */ Lval[lastl] = 1.0; /* unit diagonal */ Lrow[lastl++] = L_SUB(istart + upper - 1); for (int i = upper; i < nsupr; ++i) { Lval[lastl] = SNptr[i]; /* Matlab doesn't like explicit zero. */ if (Lval[lastl] != 0.0) Lrow[lastl++] = L_SUB(istart+i); } Lcol[j+1] = lastl; ++upper; } /* for j ... */ } /* for k ... */ // squeeze the matrices : m_l.resizeNonZeros(lastl); m_u.resizeNonZeros(lastu); m_extractedDataAreDirty = false; } } template<typename MatrixType> typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()"); if (m_extractedDataAreDirty) this->extractData(); Scalar det = Scalar(1); for (int j=0; j<m_u.cols(); ++j) { if (m_u.outerIndexPtr()[j+1]-m_u.outerIndexPtr()[j] > 0) { int lastId = m_u.outerIndexPtr()[j+1]-1; eigen_assert(m_u.innerIndexPtr()[lastId]<=j); if (m_u.innerIndexPtr()[lastId]==j) det *= m_u.valuePtr()[lastId]; } } if(PermutationMap(m_p.data(),m_p.size()).determinant()*PermutationMap(m_q.data(),m_q.size()).determinant()<0) det = -det; if(m_sluEqued!='N') return det/m_sluRscale.prod()/m_sluCscale.prod(); else return det; } #ifdef EIGEN_PARSED_BY_DOXYGEN #define EIGEN_SUPERLU_HAS_ILU #endif #ifdef EIGEN_SUPERLU_HAS_ILU /** \ingroup SuperLUSupport_Module * \class SuperILU * \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library * * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers. * * \warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * * \implsparsesolverconcept * * \sa \ref TutorialSparseSolverConcept, class IncompleteLUT, class ConjugateGradient, class BiCGSTAB */ template<typename _MatrixType> class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> > { public: typedef SuperLUBase<_MatrixType,SuperILU> Base; typedef _MatrixType MatrixType; typedef typename Base::Scalar Scalar; typedef typename Base::RealScalar RealScalar; public: using Base::_solve_impl; SuperILU() : Base() { init(); } SuperILU(const MatrixType& matrix) : Base() { init(); Base::compute(matrix); } ~SuperILU() { } /** Performs a symbolic decomposition on the sparcity of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { Base::analyzePattern(matrix); } /** Performs a numeric decomposition of \a matrix * * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& matrix); #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const; #endif // EIGEN_PARSED_BY_DOXYGEN protected: using Base::m_matrix; using Base::m_sluOptions; using Base::m_sluA; using Base::m_sluB; using Base::m_sluX; using Base::m_p; using Base::m_q; using Base::m_sluEtree; using Base::m_sluEqued; using Base::m_sluRscale; using Base::m_sluCscale; using Base::m_sluL; using Base::m_sluU; using Base::m_sluStat; using Base::m_sluFerr; using Base::m_sluBerr; using Base::m_l; using Base::m_u; using Base::m_analysisIsOk; using Base::m_factorizationIsOk; using Base::m_extractedDataAreDirty; using Base::m_isInitialized; using Base::m_info; void init() { Base::init(); ilu_set_default_options(&m_sluOptions); m_sluOptions.PrintStat = NO; m_sluOptions.ConditionNumber = NO; m_sluOptions.Trans = NOTRANS; m_sluOptions.ColPerm = MMD_AT_PLUS_A; // no attempt to preserve column sum m_sluOptions.ILU_MILU = SILU; // only basic ILU(k) support -- no direct control over memory consumption // better to use ILU_DropRule = DROP_BASIC | DROP_AREA // and set ILU_FillFactor to max memory growth m_sluOptions.ILU_DropRule = DROP_BASIC; m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10; } private: SuperILU(SuperILU& ) { } }; template<typename MatrixType> void SuperILU<MatrixType>::factorize(const MatrixType& a) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); if(!m_analysisIsOk) { m_info = InvalidInput; return; } this->initFactorization(a); int info = 0; RealScalar recip_pivot_growth, rcond; StatInit(&m_sluStat); SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], &m_sluL, &m_sluU, NULL, 0, &m_sluB, &m_sluX, &recip_pivot_growth, &rcond, &m_sluStat, &info, Scalar()); StatFree(&m_sluStat); // FIXME how to better check for errors ??? m_info = info == 0 ? Success : NumericalIssue; m_factorizationIsOk = true; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename MatrixType> template<typename Rhs,typename Dest> void SuperILU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()"); const int size = m_matrix.rows(); const int rhsCols = b.cols(); eigen_assert(size==b.rows()); m_sluOptions.Trans = NOTRANS; m_sluOptions.Fact = FACTORED; m_sluOptions.IterRefine = NOREFINE; m_sluFerr.resize(rhsCols); m_sluBerr.resize(rhsCols); Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b); Ref<const Matrix<typename Dest::Scalar,Dynamic,Dynamic,ColMajor> > x_ref(x); m_sluB = SluMatrix::Map(b_ref.const_cast_derived()); m_sluX = SluMatrix::Map(x_ref.const_cast_derived()); typename Rhs::PlainObject b_cpy; if(m_sluEqued!='N') { b_cpy = b; m_sluB = SluMatrix::Map(b_cpy.const_cast_derived()); } int info = 0; RealScalar recip_pivot_growth, rcond; StatInit(&m_sluStat); SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], &m_sluL, &m_sluU, NULL, 0, &m_sluB, &m_sluX, &recip_pivot_growth, &rcond, &m_sluStat, &info, Scalar()); StatFree(&m_sluStat); if(x.derived().data() != x_ref.data()) x = x_ref; m_info = info==0 ? Success : NumericalIssue; } #endif #endif } // end namespace Eigen #endif // EIGEN_SUPERLUSUPPORT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/ArrayWrapper.h
.h
6,775
210
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYWRAPPER_H #define EIGEN_ARRAYWRAPPER_H namespace Eigen { /** \class ArrayWrapper * \ingroup Core_Module * * \brief Expression of a mathematical vector or matrix as an array object * * This class is the return type of MatrixBase::array(), and most of the time * this is the only way it is use. * * \sa MatrixBase::array(), class MatrixWrapper */ namespace internal { template<typename ExpressionType> struct traits<ArrayWrapper<ExpressionType> > : public traits<typename remove_all<typename ExpressionType::Nested>::type > { typedef ArrayXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags, LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0, Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag }; }; } template<typename ExpressionType> class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> > { public: typedef ArrayBase<ArrayWrapper> Base; EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) typedef typename internal::remove_all<ExpressionType>::type NestedExpression; typedef typename internal::conditional< internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType; using Base::coeffRef; EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.coeffRef(rowId, colId); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } template<typename Dest> EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { dst = m_expression; } const typename internal::remove_all<NestedExpressionType>::type& EIGEN_DEVICE_FUNC nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } protected: NestedExpressionType m_expression; }; /** \class MatrixWrapper * \ingroup Core_Module * * \brief Expression of an array as a mathematical vector or matrix * * This class is the return type of ArrayBase::matrix(), and most of the time * this is the only way it is use. * * \sa MatrixBase::matrix(), class ArrayWrapper */ namespace internal { template<typename ExpressionType> struct traits<MatrixWrapper<ExpressionType> > : public traits<typename remove_all<typename ExpressionType::Nested>::type > { typedef MatrixXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags, LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0, Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag }; }; } template<typename ExpressionType> class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> > { public: typedef MatrixBase<MatrixWrapper<ExpressionType> > Base; EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) typedef typename internal::remove_all<ExpressionType>::type NestedExpression; typedef typename internal::conditional< internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType; using Base::coeffRef; EIGEN_DEVICE_FUNC explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.derived().coeffRef(rowId, colId); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } EIGEN_DEVICE_FUNC const typename internal::remove_all<NestedExpressionType>::type& nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } protected: NestedExpressionType m_expression; }; } // end namespace Eigen #endif // EIGEN_ARRAYWRAPPER_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Assign.h
.h
2,720
91
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net> // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ASSIGN_H #define EIGEN_ASSIGN_H namespace Eigen { template<typename Derived> template<typename OtherDerived> EIGEN_STRONG_INLINE Derived& DenseBase<Derived> ::lazyAssign(const DenseBase<OtherDerived>& other) { enum{ SameType = internal::is_same<typename Derived::Scalar,typename OtherDerived::Scalar>::value }; EIGEN_STATIC_ASSERT_LVALUE(Derived) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(rows() == other.rows() && cols() == other.cols()); internal::call_assignment_no_alias(derived(),other.derived()); return derived(); } template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase<OtherDerived>& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const MatrixBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> template <typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const DenseBase<OtherDerived>& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> template <typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other) { other.derived().evalTo(derived()); return derived(); } } // end namespace Eigen #endif // EIGEN_ASSIGN_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/DenseCoeffsBase.h
.h
24,212
682
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSECOEFFSBASE_H #define EIGEN_DENSECOEFFSBASE_H namespace Eigen { namespace internal { template<typename T> struct add_const_on_value_type_if_arithmetic { typedef typename conditional<is_arithmetic<T>::value, T, typename add_const_on_value_type<T>::type>::type type; }; } /** \brief Base class providing read-only coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #ReadOnlyAccessors Constant indicating read-only access * * This class defines the \c operator() \c const function and friends, which can be used to read specific * entries of a matrix or array. * * \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>, * \ref TopicClassHierarchy */ template<typename Derived> class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived> { public: typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar; // Explanation for this CoeffReturnType typedef. // - This is the return type of the coeff() method. // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit), const Scalar&, typename internal::conditional<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>::type >::type CoeffReturnType; typedef typename internal::add_const_on_value_type_if_arithmetic< typename internal::packet_traits<Scalar>::type >::type PacketReturnType; typedef EigenBase<Derived> Base; using Base::rows; using Base::cols; using Base::size; using Base::derived; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const { return int(Derived::RowsAtCompileTime) == 1 ? 0 : int(Derived::ColsAtCompileTime) == 1 ? inner : int(Derived::Flags)&RowMajorBit ? outer : inner; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const { return int(Derived::ColsAtCompileTime) == 1 ? 0 : int(Derived::RowsAtCompileTime) == 1 ? inner : int(Derived::Flags)&RowMajorBit ? inner : outer; } /** Short version: don't use this function, use * \link operator()(Index,Index) const \endlink instead. * * Long version: this function is similar to * \link operator()(Index,Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator()(Index,Index) const \endlink. * * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return internal::evaluator<Derived>(derived()).coeff(row,col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const { return coeff(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \returns the coefficient at given the given row and column. * * \sa operator()(Index,Index), operator[](Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const { eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return coeff(row, col); } /** Short version: don't use this function, use * \link operator[](Index) const \endlink instead. * * Long version: this function is similar to * \link operator[](Index) const \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameter \a index is in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator[](Index) const \endlink. * * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit, THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) eigen_internal_assert(index >= 0 && index < size()); return internal::evaluator<Derived>(derived()).coeff(index); } /** \returns the coefficient at given index. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType operator[](Index index) const { EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) eigen_assert(index >= 0 && index < size()); return coeff(index); } /** \returns the coefficient at given index. * * This is synonymous to operator[](Index) const. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, * z() const, w() const */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType operator()(Index index) const { eigen_assert(index >= 0 && index < size()); return coeff(index); } /** equivalent to operator[](0). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType x() const { return (*this)[0]; } /** equivalent to operator[](1). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType y() const { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS); return (*this)[1]; } /** equivalent to operator[](2). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType z() const { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS); return (*this)[2]; } /** equivalent to operator[](3). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType w() const { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS); return (*this)[3]; } /** \internal * \returns the packet of coefficients starting at the given row and column. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit. * * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template<int LoadMode> EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const { typedef typename internal::packet_traits<Scalar>::type DefaultPacketType; eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return internal::evaluator<Derived>(derived()).template packet<LoadMode,DefaultPacketType>(row,col); } /** \internal */ template<int LoadMode> EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const { return packet<LoadMode>(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \internal * \returns the packet of coefficients starting at the given index. It is your responsibility * to ensure that a packet really starts there. This method is only available on expressions having the * PacketAccessBit and the LinearAccessBit. * * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets * starting at an address which is a multiple of the packet size. */ template<int LoadMode> EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit, THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) typedef typename internal::packet_traits<Scalar>::type DefaultPacketType; eigen_internal_assert(index >= 0 && index < size()); return internal::evaluator<Derived>(derived()).template packet<LoadMode,DefaultPacketType>(index); } protected: // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase. // But some methods are only available in the DirectAccess case. // So we add dummy methods here with these names, so that "using... " doesn't fail. // It's not private so that the child class DenseBase can access them, and it's not public // either since it's an implementation detail, so has to be protected. void coeffRef(); void coeffRefByOuterInner(); void writePacket(); void writePacketByOuterInner(); void copyCoeff(); void copyCoeffByOuterInner(); void copyPacket(); void copyPacketByOuterInner(); void stride(); void innerStride(); void outerStride(); void rowStride(); void colStride(); }; /** \brief Base class providing read/write coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #WriteAccessors Constant indicating read/write access * * This class defines the non-const \c operator() function and friends, which can be used to write specific * entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which * defines the const variant for reading specific entries. * * \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy */ template<typename Derived> class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors> { public: typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base; typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename NumTraits<Scalar>::Real RealScalar; using Base::coeff; using Base::rows; using Base::cols; using Base::size; using Base::derived; using Base::rowIndexByOuterInner; using Base::colIndexByOuterInner; using Base::operator[]; using Base::operator(); using Base::x; using Base::y; using Base::z; using Base::w; /** Short version: don't use this function, use * \link operator()(Index,Index) \endlink instead. * * Long version: this function is similar to * \link operator()(Index,Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator()(Index,Index) \endlink. * * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return internal::evaluator<Derived>(derived()).coeffRef(row,col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRefByOuterInner(Index outer, Index inner) { return coeffRef(rowIndexByOuterInner(outer, inner), colIndexByOuterInner(outer, inner)); } /** \returns a reference to the coefficient at given the given row and column. * * \sa operator[](Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index row, Index col) { eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); return coeffRef(row, col); } /** Short version: don't use this function, use * \link operator[](Index) \endlink instead. * * Long version: this function is similar to * \link operator[](Index) \endlink, but without the assertion. * Use this for limiting the performance cost of debugging code when doing * repeated coefficient access. Only use this when it is guaranteed that the * parameters \a row and \a col are in range. * * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this * function equivalent to \link operator[](Index) \endlink. * * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit, THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS) eigen_internal_assert(index >= 0 && index < size()); return internal::evaluator<Derived>(derived()).coeffRef(index); } /** \returns a reference to the coefficient at given index. * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) { EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) eigen_assert(index >= 0 && index < size()); return coeffRef(index); } /** \returns a reference to the coefficient at given index. * * This is synonymous to operator[](Index). * * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. * * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index) { eigen_assert(index >= 0 && index < size()); return coeffRef(index); } /** equivalent to operator[](0). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& x() { return (*this)[0]; } /** equivalent to operator[](1). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& y() { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS); return (*this)[1]; } /** equivalent to operator[](2). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& z() { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS); return (*this)[2]; } /** equivalent to operator[](3). */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& w() { EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS); return (*this)[3]; } }; /** \brief Base class providing direct read-only coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #DirectAccessors Constant indicating direct access * * This class defines functions to work with strides which can be used to access entries directly. This class * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using * \c operator() . * * \sa \blank \ref TopicClassHierarchy */ template<typename Derived> class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors> { public: typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; using Base::rows; using Base::cols; using Base::size; using Base::derived; /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. * * \sa outerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().innerStride(); } /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns * in a column-major matrix). * * \sa innerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().outerStride(); } // FIXME shall we remove it ? inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } /** \returns the pointer increment between two consecutive rows. * * \sa innerStride(), outerStride(), colStride() */ EIGEN_DEVICE_FUNC inline Index rowStride() const { return Derived::IsRowMajor ? outerStride() : innerStride(); } /** \returns the pointer increment between two consecutive columns. * * \sa innerStride(), outerStride(), rowStride() */ EIGEN_DEVICE_FUNC inline Index colStride() const { return Derived::IsRowMajor ? innerStride() : outerStride(); } }; /** \brief Base class providing direct read/write coefficient access to matrices and arrays. * \ingroup Core_Module * \tparam Derived Type of the derived class * \tparam #DirectWriteAccessors Constant indicating direct access * * This class defines functions to work with strides which can be used to access entries directly. This class * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using * \c operator(). * * \sa \blank \ref TopicClassHierarchy */ template<typename Derived> class DenseCoeffsBase<Derived, DirectWriteAccessors> : public DenseCoeffsBase<Derived, WriteAccessors> { public: typedef DenseCoeffsBase<Derived, WriteAccessors> Base; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; using Base::rows; using Base::cols; using Base::size; using Base::derived; /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. * * \sa outerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().innerStride(); } /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns * in a column-major matrix). * * \sa innerStride(), rowStride(), colStride() */ EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().outerStride(); } // FIXME shall we remove it ? inline Index stride() const { return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); } /** \returns the pointer increment between two consecutive rows. * * \sa innerStride(), outerStride(), colStride() */ EIGEN_DEVICE_FUNC inline Index rowStride() const { return Derived::IsRowMajor ? outerStride() : innerStride(); } /** \returns the pointer increment between two consecutive columns. * * \sa innerStride(), outerStride(), rowStride() */ EIGEN_DEVICE_FUNC inline Index colStride() const { return Derived::IsRowMajor ? innerStride() : outerStride(); } }; namespace internal { template<int Alignment, typename Derived, bool JustReturnZero> struct first_aligned_impl { static inline Index run(const Derived&) { return 0; } }; template<int Alignment, typename Derived> struct first_aligned_impl<Alignment, Derived, false> { static inline Index run(const Derived& m) { return internal::first_aligned<Alignment>(m.data(), m.size()); } }; /** \internal \returns the index of the first element of the array stored by \a m that is properly aligned with respect to \a Alignment for vectorization. * * \tparam Alignment requested alignment in Bytes. * * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more * documentation. */ template<int Alignment, typename Derived> static inline Index first_aligned(const DenseBase<Derived>& m) { enum { ReturnZero = (int(evaluator<Derived>::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) }; return first_aligned_impl<Alignment, Derived, ReturnZero>::run(m.derived()); } template<typename Derived> static inline Index first_default_aligned(const DenseBase<Derived>& m) { typedef typename Derived::Scalar Scalar; typedef typename packet_traits<Scalar>::type DefaultPacketType; return internal::first_aligned<int(unpacket_traits<DefaultPacketType>::alignment),Derived>(m); } template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret> struct inner_stride_at_compile_time { enum { ret = traits<Derived>::InnerStrideAtCompileTime }; }; template<typename Derived> struct inner_stride_at_compile_time<Derived, false> { enum { ret = 0 }; }; template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret> struct outer_stride_at_compile_time { enum { ret = traits<Derived>::OuterStrideAtCompileTime }; }; template<typename Derived> struct outer_stride_at_compile_time<Derived, false> { enum { ret = 0 }; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_DENSECOEFFSBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/PermutationMatrix.h
.h
20,694
606
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PERMUTATIONMATRIX_H #define EIGEN_PERMUTATIONMATRIX_H namespace Eigen { namespace internal { enum PermPermProduct_t {PermPermProduct}; } // end namespace internal /** \class PermutationBase * \ingroup Core_Module * * \brief Base class for permutations * * \tparam Derived the derived class * * This class is the base class for all expressions representing a permutation matrix, * internally stored as a vector of integers. * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have: * \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f] * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have: * \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f] * * Permutation matrices are square and invertible. * * Notice that in addition to the member functions and operators listed here, there also are non-member * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase) * on either side. * * \sa class PermutationMatrix, class PermutationWrapper */ template<typename Derived> class PermutationBase : public EigenBase<Derived> { typedef internal::traits<Derived> Traits; typedef EigenBase<Derived> Base; public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; enum { Flags = Traits::Flags, RowsAtCompileTime = Traits::RowsAtCompileTime, ColsAtCompileTime = Traits::ColsAtCompileTime, MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = Traits::MaxColsAtCompileTime }; typedef typename Traits::StorageIndex StorageIndex; typedef Matrix<StorageIndex,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime> DenseMatrixType; typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,StorageIndex> PlainPermutationType; typedef PlainPermutationType PlainObject; using Base::derived; typedef Inverse<Derived> InverseReturnType; typedef void Scalar; #endif /** Copies the other permutation into *this */ template<typename OtherDerived> Derived& operator=(const PermutationBase<OtherDerived>& other) { indices() = other.indices(); return derived(); } /** Assignment from the Transpositions \a tr */ template<typename OtherDerived> Derived& operator=(const TranspositionsBase<OtherDerived>& tr) { setIdentity(tr.size()); for(Index k=size()-1; k>=0; --k) applyTranspositionOnTheRight(k,tr.coeff(k)); return derived(); } /** \returns the number of rows */ inline Index rows() const { return Index(indices().size()); } /** \returns the number of columns */ inline Index cols() const { return Index(indices().size()); } /** \returns the size of a side of the respective square matrix, i.e., the number of indices */ inline Index size() const { return Index(indices().size()); } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename DenseDerived> void evalTo(MatrixBase<DenseDerived>& other) const { other.setZero(); for (Index i=0; i<rows(); ++i) other.coeffRef(indices().coeff(i),i) = typename DenseDerived::Scalar(1); } #endif /** \returns a Matrix object initialized from this permutation matrix. Notice that it * is inefficient to return this Matrix object by value. For efficiency, favor using * the Matrix constructor taking EigenBase objects. */ DenseMatrixType toDenseMatrix() const { return derived(); } /** const version of indices(). */ const IndicesType& indices() const { return derived().indices(); } /** \returns a reference to the stored array representing the permutation. */ IndicesType& indices() { return derived().indices(); } /** Resizes to given size. */ inline void resize(Index newSize) { indices().resize(newSize); } /** Sets *this to be the identity permutation matrix */ void setIdentity() { StorageIndex n = StorageIndex(size()); for(StorageIndex i = 0; i < n; ++i) indices().coeffRef(i) = i; } /** Sets *this to be the identity permutation matrix of given size. */ void setIdentity(Index newSize) { resize(newSize); setIdentity(); } /** Multiplies *this by the transposition \f$(ij)\f$ on the left. * * \returns a reference to *this. * * \warning This is much slower than applyTranspositionOnTheRight(Index,Index): * this has linear complexity and requires a lot of branching. * * \sa applyTranspositionOnTheRight(Index,Index) */ Derived& applyTranspositionOnTheLeft(Index i, Index j) { eigen_assert(i>=0 && j>=0 && i<size() && j<size()); for(Index k = 0; k < size(); ++k) { if(indices().coeff(k) == i) indices().coeffRef(k) = StorageIndex(j); else if(indices().coeff(k) == j) indices().coeffRef(k) = StorageIndex(i); } return derived(); } /** Multiplies *this by the transposition \f$(ij)\f$ on the right. * * \returns a reference to *this. * * This is a fast operation, it only consists in swapping two indices. * * \sa applyTranspositionOnTheLeft(Index,Index) */ Derived& applyTranspositionOnTheRight(Index i, Index j) { eigen_assert(i>=0 && j>=0 && i<size() && j<size()); std::swap(indices().coeffRef(i), indices().coeffRef(j)); return derived(); } /** \returns the inverse permutation matrix. * * \note \blank \note_try_to_help_rvo */ inline InverseReturnType inverse() const { return InverseReturnType(derived()); } /** \returns the tranpose permutation matrix. * * \note \blank \note_try_to_help_rvo */ inline InverseReturnType transpose() const { return InverseReturnType(derived()); } /**** multiplication helpers to hopefully get RVO ****/ #ifndef EIGEN_PARSED_BY_DOXYGEN protected: template<typename OtherDerived> void assignTranspose(const PermutationBase<OtherDerived>& other) { for (Index i=0; i<rows();++i) indices().coeffRef(other.indices().coeff(i)) = i; } template<typename Lhs,typename Rhs> void assignProduct(const Lhs& lhs, const Rhs& rhs) { eigen_assert(lhs.cols() == rhs.rows()); for (Index i=0; i<rows();++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i)); } #endif public: /** \returns the product permutation matrix. * * \note \blank \note_try_to_help_rvo */ template<typename Other> inline PlainPermutationType operator*(const PermutationBase<Other>& other) const { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); } /** \returns the product of a permutation with another inverse permutation. * * \note \blank \note_try_to_help_rvo */ template<typename Other> inline PlainPermutationType operator*(const InverseImpl<Other,PermutationStorage>& other) const { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); } /** \returns the product of an inverse permutation with another permutation. * * \note \blank \note_try_to_help_rvo */ template<typename Other> friend inline PlainPermutationType operator*(const InverseImpl<Other, PermutationStorage>& other, const PermutationBase& perm) { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); } /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation. * * This function is O(\c n) procedure allocating a buffer of \c n booleans. */ Index determinant() const { Index res = 1; Index n = size(); Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n); mask.fill(false); Index r = 0; while(r < n) { // search for the next seed while(r<n && mask[r]) r++; if(r>=n) break; // we got one, let's follow it until we are back to the seed Index k0 = r++; mask.coeffRef(k0) = true; for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k)) { mask.coeffRef(k) = true; res = -res; } } return res; } protected: }; namespace internal { template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex> struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex> > : traits<Matrix<_StorageIndex,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> > { typedef PermutationStorage StorageKind; typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType; typedef _StorageIndex StorageIndex; typedef void Scalar; }; } /** \class PermutationMatrix * \ingroup Core_Module * * \brief Permutation matrix * * \tparam SizeAtCompileTime the number of rows/cols, or Dynamic * \tparam MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. * \tparam _StorageIndex the integer type of the indices * * This class represents a permutation matrix, internally stored as a vector of integers. * * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix */ template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex> class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex> > { typedef PermutationBase<PermutationMatrix> Base; typedef internal::traits<PermutationMatrix> Traits; public: typedef const PermutationMatrix& Nested; #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; typedef typename Traits::StorageIndex StorageIndex; #endif inline PermutationMatrix() {} /** Constructs an uninitialized permutation matrix of given size. */ explicit inline PermutationMatrix(Index size) : m_indices(size) { eigen_internal_assert(size <= NumTraits<StorageIndex>::highest()); } /** Copy constructor. */ template<typename OtherDerived> inline PermutationMatrix(const PermutationBase<OtherDerived>& other) : m_indices(other.indices()) {} /** Generic constructor from expression of the indices. The indices * array has the meaning that the permutations sends each integer i to indices[i]. * * \warning It is your responsibility to check that the indices array that you passes actually * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the * array's size. */ template<typename Other> explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices) {} /** Convert the Transpositions \a tr to a permutation matrix */ template<typename Other> explicit PermutationMatrix(const TranspositionsBase<Other>& tr) : m_indices(tr.size()) { *this = tr; } /** Copies the other permutation into *this */ template<typename Other> PermutationMatrix& operator=(const PermutationBase<Other>& other) { m_indices = other.indices(); return *this; } /** Assignment from the Transpositions \a tr */ template<typename Other> PermutationMatrix& operator=(const TranspositionsBase<Other>& tr) { return Base::operator=(tr.derived()); } /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the permutation. */ IndicesType& indices() { return m_indices; } /**** multiplication helpers to hopefully get RVO ****/ #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename Other> PermutationMatrix(const InverseImpl<Other,PermutationStorage>& other) : m_indices(other.derived().nestedExpression().size()) { eigen_internal_assert(m_indices.size() <= NumTraits<StorageIndex>::highest()); StorageIndex end = StorageIndex(m_indices.size()); for (StorageIndex i=0; i<end;++i) m_indices.coeffRef(other.derived().nestedExpression().indices().coeff(i)) = i; } template<typename Lhs,typename Rhs> PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs) : m_indices(lhs.indices().size()) { Base::assignProduct(lhs,rhs); } #endif protected: IndicesType m_indices; }; namespace internal { template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess> struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess> > : traits<Matrix<_StorageIndex,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> > { typedef PermutationStorage StorageKind; typedef Map<const Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType; typedef _StorageIndex StorageIndex; typedef void Scalar; }; } template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess> class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess> : public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess> > { typedef PermutationBase<Map> Base; typedef internal::traits<Map> Traits; public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; typedef typename IndicesType::Scalar StorageIndex; #endif inline Map(const StorageIndex* indicesPtr) : m_indices(indicesPtr) {} inline Map(const StorageIndex* indicesPtr, Index size) : m_indices(indicesPtr,size) {} /** Copies the other permutation into *this */ template<typename Other> Map& operator=(const PermutationBase<Other>& other) { return Base::operator=(other.derived()); } /** Assignment from the Transpositions \a tr */ template<typename Other> Map& operator=(const TranspositionsBase<Other>& tr) { return Base::operator=(tr.derived()); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ Map& operator=(const Map& other) { m_indices = other.m_indices; return *this; } #endif /** const version of indices(). */ const IndicesType& indices() const { return m_indices; } /** \returns a reference to the stored array representing the permutation. */ IndicesType& indices() { return m_indices; } protected: IndicesType m_indices; }; template<typename _IndicesType> class TranspositionsWrapper; namespace internal { template<typename _IndicesType> struct traits<PermutationWrapper<_IndicesType> > { typedef PermutationStorage StorageKind; typedef void Scalar; typedef typename _IndicesType::Scalar StorageIndex; typedef _IndicesType IndicesType; enum { RowsAtCompileTime = _IndicesType::SizeAtCompileTime, ColsAtCompileTime = _IndicesType::SizeAtCompileTime, MaxRowsAtCompileTime = IndicesType::MaxSizeAtCompileTime, MaxColsAtCompileTime = IndicesType::MaxSizeAtCompileTime, Flags = 0 }; }; } /** \class PermutationWrapper * \ingroup Core_Module * * \brief Class to view a vector of integers as a permutation matrix * * \tparam _IndicesType the type of the vector of integer (can be any compatible expression) * * This class allows to view any vector expression of integers as a permutation matrix. * * \sa class PermutationBase, class PermutationMatrix */ template<typename _IndicesType> class PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesType> > { typedef PermutationBase<PermutationWrapper> Base; typedef internal::traits<PermutationWrapper> Traits; public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Traits::IndicesType IndicesType; #endif inline PermutationWrapper(const IndicesType& indices) : m_indices(indices) {} /** const version of indices(). */ const typename internal::remove_all<typename IndicesType::Nested>::type& indices() const { return m_indices; } protected: typename IndicesType::Nested m_indices; }; /** \returns the matrix with the permutation applied to the columns. */ template<typename MatrixDerived, typename PermutationDerived> EIGEN_DEVICE_FUNC const Product<MatrixDerived, PermutationDerived, AliasFreeProduct> operator*(const MatrixBase<MatrixDerived> &matrix, const PermutationBase<PermutationDerived>& permutation) { return Product<MatrixDerived, PermutationDerived, AliasFreeProduct> (matrix.derived(), permutation.derived()); } /** \returns the matrix with the permutation applied to the rows. */ template<typename PermutationDerived, typename MatrixDerived> EIGEN_DEVICE_FUNC const Product<PermutationDerived, MatrixDerived, AliasFreeProduct> operator*(const PermutationBase<PermutationDerived> &permutation, const MatrixBase<MatrixDerived>& matrix) { return Product<PermutationDerived, MatrixDerived, AliasFreeProduct> (permutation.derived(), matrix.derived()); } template<typename PermutationType> class InverseImpl<PermutationType, PermutationStorage> : public EigenBase<Inverse<PermutationType> > { typedef typename PermutationType::PlainPermutationType PlainPermutationType; typedef internal::traits<PermutationType> PermTraits; protected: InverseImpl() {} public: typedef Inverse<PermutationType> InverseType; using EigenBase<Inverse<PermutationType> >::derived; #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename PermutationType::DenseMatrixType DenseMatrixType; enum { RowsAtCompileTime = PermTraits::RowsAtCompileTime, ColsAtCompileTime = PermTraits::ColsAtCompileTime, MaxRowsAtCompileTime = PermTraits::MaxRowsAtCompileTime, MaxColsAtCompileTime = PermTraits::MaxColsAtCompileTime }; #endif #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename DenseDerived> void evalTo(MatrixBase<DenseDerived>& other) const { other.setZero(); for (Index i=0; i<derived().rows();++i) other.coeffRef(i, derived().nestedExpression().indices().coeff(i)) = typename DenseDerived::Scalar(1); } #endif /** \return the equivalent permutation matrix */ PlainPermutationType eval() const { return derived(); } DenseMatrixType toDenseMatrix() const { return derived(); } /** \returns the matrix with the inverse permutation applied to the columns. */ template<typename OtherDerived> friend const Product<OtherDerived, InverseType, AliasFreeProduct> operator*(const MatrixBase<OtherDerived>& matrix, const InverseType& trPerm) { return Product<OtherDerived, InverseType, AliasFreeProduct>(matrix.derived(), trPerm.derived()); } /** \returns the matrix with the inverse permutation applied to the rows. */ template<typename OtherDerived> const Product<InverseType, OtherDerived, AliasFreeProduct> operator*(const MatrixBase<OtherDerived>& matrix) const { return Product<InverseType, OtherDerived, AliasFreeProduct>(derived(), matrix.derived()); } }; template<typename Derived> const PermutationWrapper<const Derived> MatrixBase<Derived>::asPermutation() const { return derived(); } namespace internal { template<> struct AssignmentKind<DenseShape,PermutationShape> { typedef EigenBase2EigenBase Kind; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_PERMUTATIONMATRIX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/EigenBase.h
.h
5,619
160
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EIGENBASE_H #define EIGEN_EIGENBASE_H namespace Eigen { /** \class EigenBase * \ingroup Core_Module * * Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). * * In other words, an EigenBase object is an object that can be copied into a MatrixBase. * * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc. * * Notice that this class is trivial, it is only used to disambiguate overloaded functions. * * \sa \blank \ref TopicClassHierarchy */ template<typename Derived> struct EigenBase { // typedef typename internal::plain_matrix_type<Derived>::type PlainObject; /** \brief The interface type of indices * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. * \deprecated Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead. * \sa StorageIndex, \ref TopicPreprocessorDirectives. */ typedef Eigen::Index Index; // FIXME is it needed? typedef typename internal::traits<Derived>::StorageKind StorageKind; /** \returns a reference to the derived object */ EIGEN_DEVICE_FUNC Derived& derived() { return *static_cast<Derived*>(this); } /** \returns a const reference to the derived object */ EIGEN_DEVICE_FUNC const Derived& derived() const { return *static_cast<const Derived*>(this); } EIGEN_DEVICE_FUNC inline Derived& const_cast_derived() const { return *static_cast<Derived*>(const_cast<EigenBase*>(this)); } EIGEN_DEVICE_FUNC inline const Derived& const_derived() const { return *static_cast<const Derived*>(this); } /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ EIGEN_DEVICE_FUNC inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ EIGEN_DEVICE_FUNC inline Index cols() const { return derived().cols(); } /** \returns the number of coefficients, which is rows()*cols(). * \sa rows(), cols(), SizeAtCompileTime. */ EIGEN_DEVICE_FUNC inline Index size() const { return rows() * cols(); } /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ template<typename Dest> EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { derived().evalTo(dst); } /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */ template<typename Dest> EIGEN_DEVICE_FUNC inline void addTo(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. typename Dest::PlainObject res(rows(),cols()); evalTo(res); dst += res; } /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */ template<typename Dest> EIGEN_DEVICE_FUNC inline void subTo(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. typename Dest::PlainObject res(rows(),cols()); evalTo(res); dst -= res; } /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */ template<typename Dest> EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. dst = dst * this->derived(); } /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */ template<typename Dest> EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const { // This is the default implementation, // derived class can reimplement it in a more optimized way. dst = this->derived() * dst; } }; /*************************************************************************** * Implementation of matrix base methods ***************************************************************************/ /** \brief Copies the generic expression \a other into *this. * * \details The expression must provide a (templated) evalTo(Derived& dst) const * function which does the actual job. In practice, this allows any user to write * its own special matrix without having to modify MatrixBase * * \returns a reference to *this. */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other) { call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other) { call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other) { call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } } // end namespace Eigen #endif // EIGEN_EIGENBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/ConditionEstimator.h
.h
6,990
176
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com) // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CONDITIONESTIMATOR_H #define EIGEN_CONDITIONESTIMATOR_H namespace Eigen { namespace internal { template <typename Vector, typename RealVector, bool IsComplex> struct rcond_compute_sign { static inline Vector run(const Vector& v) { const RealVector v_abs = v.cwiseAbs(); return (v_abs.array() == static_cast<typename Vector::RealScalar>(0)) .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); } }; // Partial specialization to avoid elementwise division for real vectors. template <typename Vector> struct rcond_compute_sign<Vector, Vector, false> { static inline Vector run(const Vector& v) { return (v.array() < static_cast<typename Vector::RealScalar>(0)) .select(-Vector::Ones(v.size()), Vector::Ones(v.size())); } }; /** * \returns an estimate of ||inv(matrix)||_1 given a decomposition of * \a matrix that implements .solve() and .adjoint().solve() methods. * * This function implements Algorithms 4.1 and 5.1 from * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf * which also forms the basis for the condition number estimators in * LAPACK. Since at most 10 calls to the solve method of dec are * performed, the total cost is O(dims^2), as opposed to O(dims^3) * needed to compute the inverse matrix explicitly. * * The most common usage is in estimating the condition number * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be * computed directly in O(n^2) operations. * * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and * LLT. * * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template <typename Decomposition> typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec) { typedef typename Decomposition::MatrixType MatrixType; typedef typename Decomposition::Scalar Scalar; typedef typename Decomposition::RealScalar RealScalar; typedef typename internal::plain_col_type<MatrixType>::type Vector; typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector; const bool is_complex = (NumTraits<Scalar>::IsComplex != 0); eigen_assert(dec.rows() == dec.cols()); const Index n = dec.rows(); if (n == 0) return 0; // Disable Index to float conversion warning #ifdef __INTEL_COMPILER #pragma warning push #pragma warning ( disable : 2259 ) #endif Vector v = dec.solve(Vector::Ones(n) / Scalar(n)); #ifdef __INTEL_COMPILER #pragma warning pop #endif // lower_bound is a lower bound on // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 // and is the objective maximized by the ("super-") gradient ascent // algorithm below. RealScalar lower_bound = v.template lpNorm<1>(); if (n == 1) return lower_bound; // Gradient ascent algorithm follows: We know that the optimum is achieved at // one of the simplices v = e_i, so in each iteration we follow a // super-gradient to move towards the optimal one. RealScalar old_lower_bound = lower_bound; Vector sign_vector(n); Vector old_sign_vector; Index v_max_abs_index = -1; Index old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v); if (k > 0 && !is_complex && sign_vector == old_sign_vector) { // Break if the solution stagnated. break; } // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )| v = dec.adjoint().solve(sign_vector); v.real().cwiseAbs().maxCoeff(&v_max_abs_index); if (v_max_abs_index == old_v_max_abs_index) { // Break if the solution stagnated. break; } // Move to the new simplex e_j, where j = v_max_abs_index. v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j. lower_bound = v.template lpNorm<1>(); if (lower_bound <= old_lower_bound) { // Break if the gradient step did not increase the lower_bound. break; } if (!is_complex) { old_sign_vector = sign_vector; } old_v_max_abs_index = v_max_abs_index; old_lower_bound = lower_bound; } // The following calculates an independent estimate of ||matrix||_1 by // multiplying matrix by a vector with entries of slowly increasing // magnitude and alternating sign: // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. // This improvement to Hager's algorithm above is due to Higham. It was // added to make the algorithm more robust in certain corner cases where // large elements in the matrix might otherwise escape detection due to // exact cancellation (especially when op and op_adjoint correspond to a // sequence of backsubstitutions and permutations), which could cause // Hager's algorithm to vastly underestimate ||matrix||_1. Scalar alternating_sign(RealScalar(1)); for (Index i = 0; i < n; ++i) { // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1)))); alternating_sign = -alternating_sign; } v = dec.solve(v); const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n)); return numext::maxi(lower_bound, alternate_lower_bound); } /** \brief Reciprocal condition number estimator. * * Computing a decomposition of a dense matrix takes O(n^3) operations, while * this method estimates the condition number quickly and reliably in O(n^2) * operations. * * \returns an estimate of the reciprocal condition number * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and * its decomposition. Supports the following decompositions: FullPivLU, * PartialPivLU, LDLT, and LLT. * * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template <typename Decomposition> typename Decomposition::RealScalar rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { typedef typename Decomposition::RealScalar RealScalar; eigen_assert(dec.rows() == dec.cols()); if (dec.rows() == 0) return NumTraits<RealScalar>::infinity(); if (matrix_norm == RealScalar(0)) return RealScalar(0); if (dec.rows() == 1) return RealScalar(1); const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec); return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0) : (RealScalar(1) / inverse_matrix_norm) / matrix_norm); } } // namespace internal } // namespace Eigen #endif
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Random.h
.h
6,379
183
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RANDOM_H #define EIGEN_RANDOM_H namespace Eigen { namespace internal { template<typename Scalar> struct scalar_random_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op) inline const Scalar operator() () const { return random<Scalar>(); } }; template<typename Scalar> struct functor_traits<scalar_random_op<Scalar> > { enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; }; } // end namespace internal /** \returns a random matrix expression * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * \not_reentrant * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used * instead. * * * Example: \include MatrixBase_random_int_int.cpp * Output: \verbinclude MatrixBase_random_int_int.out * * This expression has the "evaluate before nesting" flag so that it will be evaluated into * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * * See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators. * * \sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random() */ template<typename Derived> inline const typename DenseBase<Derived>::RandomReturnType DenseBase<Derived>::Random(Index rows, Index cols) { return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>()); } /** \returns a random vector expression * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * \not_reentrant * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Random() should be used * instead. * * Example: \include MatrixBase_random_int.cpp * Output: \verbinclude MatrixBase_random_int.out * * This expression has the "evaluate before nesting" flag so that it will be evaluated into * a temporary vector whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random() */ template<typename Derived> inline const typename DenseBase<Derived>::RandomReturnType DenseBase<Derived>::Random(Index size) { return NullaryExpr(size, internal::scalar_random_op<Scalar>()); } /** \returns a fixed-size random matrix or vector expression * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_random.cpp * Output: \verbinclude MatrixBase_random.out * * This expression has the "evaluate before nesting" flag so that it will be evaluated into * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected * behavior with expressions involving random matrices. * * \not_reentrant * * \sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index) */ template<typename Derived> inline const typename DenseBase<Derived>::RandomReturnType DenseBase<Derived>::Random() { return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>()); } /** Sets all coefficients in this expression to random values. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \not_reentrant * * Example: \include MatrixBase_setRandom.cpp * Output: \verbinclude MatrixBase_setRandom.out * * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index) */ template<typename Derived> inline Derived& DenseBase<Derived>::setRandom() { return *this = Random(rows(), cols()); } /** Resizes to the given \a newSize, and sets all coefficients in this expression to random values. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \only_for_vectors * \not_reentrant * * Example: \include Matrix_setRandom_int.cpp * Output: \verbinclude Matrix_setRandom_int.out * * \sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random() */ template<typename Derived> EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setRandom(Index newSize) { resize(newSize); return setRandom(); } /** Resizes to the given size, and sets all coefficients in this expression to random values. * * Numbers are uniformly spread through their whole definition range for integer types, * and in the [-1:1] range for floating point scalar types. * * \not_reentrant * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setRandom_int_int.cpp * Output: \verbinclude Matrix_setRandom_int_int.out * * \sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random() */ template<typename Derived> EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setRandom(Index rows, Index cols) { resize(rows, cols); return setRandom(); } } // end namespace Eigen #endif // EIGEN_RANDOM_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Transpose.h
.h
14,856
406
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRANSPOSE_H #define EIGEN_TRANSPOSE_H namespace Eigen { namespace internal { template<typename MatrixType> struct traits<Transpose<MatrixType> > : public traits<MatrixType> { typedef typename ref_selector<MatrixType>::type MatrixTypeNested; typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain; enum { RowsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTime = MatrixType::RowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime, FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0, Flags0 = traits<MatrixTypeNestedPlain>::Flags & ~(LvalueBit | NestByRefBit), Flags1 = Flags0 | FlagsLvalueBit, Flags = Flags1 ^ RowMajorBit, InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret, OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret }; }; } template<typename MatrixType, typename StorageKind> class TransposeImpl; /** \class Transpose * \ingroup Core_Module * * \brief Expression of the transpose of a matrix * * \tparam MatrixType the type of the object of which we are taking the transpose * * This class represents an expression of the transpose of a matrix. * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint() * and most of the time this is the only way it is used. * * \sa MatrixBase::transpose(), MatrixBase::adjoint() */ template<typename MatrixType> class Transpose : public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind> { public: typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested; typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose) typedef typename internal::remove_all<MatrixType>::type NestedExpression; EIGEN_DEVICE_FUNC explicit inline Transpose(MatrixType& matrix) : m_matrix(matrix) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); } /** \returns the nested expression */ EIGEN_DEVICE_FUNC const typename internal::remove_all<MatrixTypeNested>::type& nestedExpression() const { return m_matrix; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC typename internal::remove_reference<MatrixTypeNested>::type& nestedExpression() { return m_matrix; } /** \internal */ void resize(Index nrows, Index ncols) { m_matrix.resize(ncols,nrows); } protected: typename internal::ref_selector<MatrixType>::non_const_type m_matrix; }; namespace internal { template<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret> struct TransposeImpl_base { typedef typename dense_xpr_base<Transpose<MatrixType> >::type type; }; template<typename MatrixType> struct TransposeImpl_base<MatrixType, false> { typedef typename dense_xpr_base<Transpose<MatrixType> >::type type; }; } // end namespace internal // Generic API dispatcher template<typename XprType, typename StorageKind> class TransposeImpl : public internal::generic_xpr_base<Transpose<XprType> >::type { public: typedef typename internal::generic_xpr_base<Transpose<XprType> >::type Base; }; template<typename MatrixType> class TransposeImpl<MatrixType,Dense> : public internal::TransposeImpl_base<MatrixType>::type { public: typedef typename internal::TransposeImpl_base<MatrixType>::type Base; using Base::coeffRef; EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl) EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); } typedef typename internal::conditional< internal::is_lvalue<MatrixType>::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return derived().nestedExpression().data(); } // FIXME: shall we keep the const version of coeffRef? EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return derived().nestedExpression().coeffRef(colId, rowId); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return derived().nestedExpression().coeffRef(index); } protected: EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TransposeImpl) }; /** \returns an expression of the transpose of *this. * * Example: \include MatrixBase_transpose.cpp * Output: \verbinclude MatrixBase_transpose.out * * \warning If you want to replace a matrix by its own transpose, do \b NOT do this: * \code * m = m.transpose(); // bug!!! caused by aliasing effect * \endcode * Instead, use the transposeInPlace() method: * \code * m.transposeInPlace(); * \endcode * which gives Eigen good opportunities for optimization, or alternatively you can also do: * \code * m = m.transpose().eval(); * \endcode * * \sa transposeInPlace(), adjoint() */ template<typename Derived> inline Transpose<Derived> DenseBase<Derived>::transpose() { return TransposeReturnType(derived()); } /** This is the const version of transpose(). * * Make sure you read the warning for transpose() ! * * \sa transposeInPlace(), adjoint() */ template<typename Derived> inline typename DenseBase<Derived>::ConstTransposeReturnType DenseBase<Derived>::transpose() const { return ConstTransposeReturnType(derived()); } /** \returns an expression of the adjoint (i.e. conjugate transpose) of *this. * * Example: \include MatrixBase_adjoint.cpp * Output: \verbinclude MatrixBase_adjoint.out * * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this: * \code * m = m.adjoint(); // bug!!! caused by aliasing effect * \endcode * Instead, use the adjointInPlace() method: * \code * m.adjointInPlace(); * \endcode * which gives Eigen good opportunities for optimization, or alternatively you can also do: * \code * m = m.adjoint().eval(); * \endcode * * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */ template<typename Derived> inline const typename MatrixBase<Derived>::AdjointReturnType MatrixBase<Derived>::adjoint() const { return AdjointReturnType(this->transpose()); } /*************************************************************************** * "in place" transpose implementation ***************************************************************************/ namespace internal { template<typename MatrixType, bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic, bool MatchPacketSize = (int(MatrixType::RowsAtCompileTime) == int(internal::packet_traits<typename MatrixType::Scalar>::size)) && (internal::evaluator<MatrixType>::Flags&PacketAccessBit) > struct inplace_transpose_selector; template<typename MatrixType> struct inplace_transpose_selector<MatrixType,true,false> { // square matrix static void run(MatrixType& m) { m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose()); } }; // TODO: vectorized path is currently limited to LargestPacketSize x LargestPacketSize cases only. template<typename MatrixType> struct inplace_transpose_selector<MatrixType,true,true> { // PacketSize x PacketSize static void run(MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef typename internal::packet_traits<typename MatrixType::Scalar>::type Packet; const Index PacketSize = internal::packet_traits<Scalar>::size; const Index Alignment = internal::evaluator<MatrixType>::Alignment; PacketBlock<Packet> A; for (Index i=0; i<PacketSize; ++i) A.packet[i] = m.template packetByOuterInner<Alignment>(i,0); internal::ptranspose(A); for (Index i=0; i<PacketSize; ++i) m.template writePacket<Alignment>(m.rowIndexByOuterInner(i,0), m.colIndexByOuterInner(i,0), A.packet[i]); } }; template<typename MatrixType,bool MatchPacketSize> struct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non square matrix static void run(MatrixType& m) { if (m.rows()==m.cols()) m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose()); else m = m.transpose().eval(); } }; } // end namespace internal /** This is the "in place" version of transpose(): it replaces \c *this by its own transpose. * Thus, doing * \code * m.transposeInPlace(); * \endcode * has the same effect on m as doing * \code * m = m.transpose().eval(); * \endcode * and is faster and also safer because in the latter line of code, forgetting the eval() results * in a bug caused by \ref TopicAliasing "aliasing". * * Notice however that this method is only useful if you want to replace a matrix by its own transpose. * If you just need the transpose of a matrix, use transpose(). * * \note if the matrix is not square, then \c *this must be a resizable matrix. * This excludes (non-square) fixed-size matrices, block-expressions and maps. * * \sa transpose(), adjoint(), adjointInPlace() */ template<typename Derived> inline void DenseBase<Derived>::transposeInPlace() { eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic)) && "transposeInPlace() called on a non-square non-resizable matrix"); internal::inplace_transpose_selector<Derived>::run(derived()); } /*************************************************************************** * "in place" adjoint implementation ***************************************************************************/ /** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose. * Thus, doing * \code * m.adjointInPlace(); * \endcode * has the same effect on m as doing * \code * m = m.adjoint().eval(); * \endcode * and is faster and also safer because in the latter line of code, forgetting the eval() results * in a bug caused by aliasing. * * Notice however that this method is only useful if you want to replace a matrix by its own adjoint. * If you just need the adjoint of a matrix, use adjoint(). * * \note if the matrix is not square, then \c *this must be a resizable matrix. * This excludes (non-square) fixed-size matrices, block-expressions and maps. * * \sa transpose(), adjoint(), transposeInPlace() */ template<typename Derived> inline void MatrixBase<Derived>::adjointInPlace() { derived() = adjoint().eval(); } #ifndef EIGEN_NO_DEBUG // The following is to detect aliasing problems in most common cases. namespace internal { template<bool DestIsTransposed, typename OtherDerived> struct check_transpose_aliasing_compile_time_selector { enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed }; }; template<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB> struct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> > { enum { ret = bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed || bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed }; }; template<typename Scalar, bool DestIsTransposed, typename OtherDerived> struct check_transpose_aliasing_run_time_selector { static bool run(const Scalar* dest, const OtherDerived& src) { return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src)); } }; template<typename Scalar, bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB> struct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> > { static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src) { return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs()))) || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs()))); } }; // the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing, // is because when the condition controlling the assert is known at compile time, ICC emits a warning. // This is actually a good warning: in expressions that don't have any transposing, the condition is // known at compile time to be false, and using that, we can avoid generating the code of the assert again // and again for all these expressions that don't need it. template<typename Derived, typename OtherDerived, bool MightHaveTransposeAliasing = check_transpose_aliasing_compile_time_selector <blas_traits<Derived>::IsTransposed,OtherDerived>::ret > struct checkTransposeAliasing_impl { static void run(const Derived& dst, const OtherDerived& other) { eigen_assert((!check_transpose_aliasing_run_time_selector <typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived> ::run(extract_data(dst), other)) && "aliasing detected during transposition, use transposeInPlace() " "or evaluate the rhs into a temporary using .eval()"); } }; template<typename Derived, typename OtherDerived> struct checkTransposeAliasing_impl<Derived, OtherDerived, false> { static void run(const Derived&, const OtherDerived&) { } }; template<typename Dst, typename Src> void check_for_aliasing(const Dst &dst, const Src &src) { internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src); } } // end namespace internal #endif // EIGEN_NO_DEBUG } // end namespace Eigen #endif // EIGEN_TRANSPOSE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/MathFunctions.h
.h
41,000
1,422
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H // source: http://www.geom.uiuc.edu/~huberty/math5337/groupe/digits.html // TODO this should better be moved to NumTraits #define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L namespace Eigen { // On WINCE, std::abs is defined for int only, so let's defined our own overloads: // This issue has been confirmed with MSVC 2008 only, but the issue might exist for more recent versions too. #if EIGEN_OS_WINCE && EIGEN_COMP_MSVC && EIGEN_COMP_MSVC<=1500 long abs(long x) { return (labs(x)); } double abs(double x) { return (fabs(x)); } float abs(float x) { return (fabsf(x)); } long double abs(long double x) { return (fabsl(x)); } #endif namespace internal { /** \internal \class global_math_functions_filtering_base * * What it does: * Defines a typedef 'type' as follows: * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then * global_math_functions_filtering_base<T>::type is a typedef for it. * - otherwise, global_math_functions_filtering_base<T>::type is a typedef for T. * * How it's used: * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase<Derived>. * So we must make sure to use sin_impl<ArrayBase<Derived> > and not sin_impl<Derived>, otherwise our partial specialization * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it. * * How it's implemented: * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace * the typename dummy by an integer template parameter, it doesn't work anymore! */ template<typename T, typename dummy = void> struct global_math_functions_filtering_base { typedef T type; }; template<typename T> struct always_void { typedef void type; }; template<typename T> struct global_math_functions_filtering_base <T, typename always_void<typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl>::type > { typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; }; #define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type> #define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>::type /**************************************************************************** * Implementation of real * ****************************************************************************/ template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex> struct real_default_impl { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x; } }; template<typename Scalar> struct real_default_impl<Scalar,true> { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { using std::real; return real(x); } }; template<typename Scalar> struct real_impl : real_default_impl<Scalar> {}; #ifdef __CUDA_ARCH__ template<typename T> struct real_impl<std::complex<T> > { typedef T RealScalar; EIGEN_DEVICE_FUNC static inline T run(const std::complex<T>& x) { return x.real(); } }; #endif template<typename Scalar> struct real_retval { typedef typename NumTraits<Scalar>::Real type; }; /**************************************************************************** * Implementation of imag * ****************************************************************************/ template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex> struct imag_default_impl { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar&) { return RealScalar(0); } }; template<typename Scalar> struct imag_default_impl<Scalar,true> { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { using std::imag; return imag(x); } }; template<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {}; #ifdef __CUDA_ARCH__ template<typename T> struct imag_impl<std::complex<T> > { typedef T RealScalar; EIGEN_DEVICE_FUNC static inline T run(const std::complex<T>& x) { return x.imag(); } }; #endif template<typename Scalar> struct imag_retval { typedef typename NumTraits<Scalar>::Real type; }; /**************************************************************************** * Implementation of real_ref * ****************************************************************************/ template<typename Scalar> struct real_ref_impl { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar& run(Scalar& x) { return reinterpret_cast<RealScalar*>(&x)[0]; } EIGEN_DEVICE_FUNC static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast<const RealScalar*>(&x)[0]; } }; template<typename Scalar> struct real_ref_retval { typedef typename NumTraits<Scalar>::Real & type; }; /**************************************************************************** * Implementation of imag_ref * ****************************************************************************/ template<typename Scalar, bool IsComplex> struct imag_ref_default_impl { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar& run(Scalar& x) { return reinterpret_cast<RealScalar*>(&x)[1]; } EIGEN_DEVICE_FUNC static inline const RealScalar& run(const Scalar& x) { return reinterpret_cast<RealScalar*>(&x)[1]; } }; template<typename Scalar> struct imag_ref_default_impl<Scalar, false> { EIGEN_DEVICE_FUNC static inline Scalar run(Scalar&) { return Scalar(0); } EIGEN_DEVICE_FUNC static inline const Scalar run(const Scalar&) { return Scalar(0); } }; template<typename Scalar> struct imag_ref_impl : imag_ref_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {}; template<typename Scalar> struct imag_ref_retval { typedef typename NumTraits<Scalar>::Real & type; }; /**************************************************************************** * Implementation of conj * ****************************************************************************/ template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex> struct conj_impl { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { return x; } }; template<typename Scalar> struct conj_impl<Scalar,true> { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { using std::conj; return conj(x); } }; template<typename Scalar> struct conj_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of abs2 * ****************************************************************************/ template<typename Scalar,bool IsComplex> struct abs2_impl_default { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x*x; } }; template<typename Scalar> struct abs2_impl_default<Scalar, true> // IsComplex { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return x.real()*x.real() + x.imag()*x.imag(); } }; template<typename Scalar> struct abs2_impl { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return abs2_impl_default<Scalar,NumTraits<Scalar>::IsComplex>::run(x); } }; template<typename Scalar> struct abs2_retval { typedef typename NumTraits<Scalar>::Real type; }; /**************************************************************************** * Implementation of norm1 * ****************************************************************************/ template<typename Scalar, bool IsComplex> struct norm1_default_impl; template<typename Scalar> struct norm1_default_impl<Scalar,true> { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { EIGEN_USING_STD_MATH(abs); return abs(x.real()) + abs(x.imag()); } }; template<typename Scalar> struct norm1_default_impl<Scalar, false> { EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x) { EIGEN_USING_STD_MATH(abs); return abs(x); } }; template<typename Scalar> struct norm1_impl : norm1_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {}; template<typename Scalar> struct norm1_retval { typedef typename NumTraits<Scalar>::Real type; }; /**************************************************************************** * Implementation of hypot * ****************************************************************************/ template<typename Scalar> struct hypot_impl; template<typename Scalar> struct hypot_retval { typedef typename NumTraits<Scalar>::Real type; }; /**************************************************************************** * Implementation of cast * ****************************************************************************/ template<typename OldType, typename NewType> struct cast_impl { EIGEN_DEVICE_FUNC static inline NewType run(const OldType& x) { return static_cast<NewType>(x); } }; // here, for once, we're plainly returning NewType: we don't want cast to do weird things. template<typename OldType, typename NewType> EIGEN_DEVICE_FUNC inline NewType cast(const OldType& x) { return cast_impl<OldType, NewType>::run(x); } /**************************************************************************** * Implementation of round * ****************************************************************************/ #if EIGEN_HAS_CXX11_MATH template<typename Scalar> struct round_impl { static inline Scalar run(const Scalar& x) { EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) using std::round; return round(x); } }; #else template<typename Scalar> struct round_impl { static inline Scalar run(const Scalar& x) { EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL) EIGEN_USING_STD_MATH(floor); EIGEN_USING_STD_MATH(ceil); return (x > Scalar(0)) ? floor(x + Scalar(0.5)) : ceil(x - Scalar(0.5)); } }; #endif template<typename Scalar> struct round_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of arg * ****************************************************************************/ #if EIGEN_HAS_CXX11_MATH template<typename Scalar> struct arg_impl { static inline Scalar run(const Scalar& x) { EIGEN_USING_STD_MATH(arg); return arg(x); } }; #else template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex> struct arg_default_impl { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { return (x < Scalar(0)) ? Scalar(EIGEN_PI) : Scalar(0); } }; template<typename Scalar> struct arg_default_impl<Scalar,true> { typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const Scalar& x) { EIGEN_USING_STD_MATH(arg); return arg(x); } }; template<typename Scalar> struct arg_impl : arg_default_impl<Scalar> {}; #endif template<typename Scalar> struct arg_retval { typedef typename NumTraits<Scalar>::Real type; }; /**************************************************************************** * Implementation of log1p * ****************************************************************************/ namespace std_fallback { // fallback log1p implementation in case there is no log1p(Scalar) function in namespace of Scalar, // or that there is no suitable std::log1p function available template<typename Scalar> EIGEN_DEVICE_FUNC inline Scalar log1p(const Scalar& x) { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_USING_STD_MATH(log); Scalar x1p = RealScalar(1) + x; return numext::equal_strict(x1p, Scalar(1)) ? x : x * ( log(x1p) / (x1p - RealScalar(1)) ); } } template<typename Scalar> struct log1p_impl { static inline Scalar run(const Scalar& x) { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) #if EIGEN_HAS_CXX11_MATH using std::log1p; #endif using std_fallback::log1p; return log1p(x); } }; template<typename Scalar> struct log1p_retval { typedef Scalar type; }; /**************************************************************************** * Implementation of pow * ****************************************************************************/ template<typename ScalarX,typename ScalarY, bool IsInteger = NumTraits<ScalarX>::IsInteger&&NumTraits<ScalarY>::IsInteger> struct pow_impl { //typedef Scalar retval; typedef typename ScalarBinaryOpTraits<ScalarX,ScalarY,internal::scalar_pow_op<ScalarX,ScalarY> >::ReturnType result_type; static EIGEN_DEVICE_FUNC inline result_type run(const ScalarX& x, const ScalarY& y) { EIGEN_USING_STD_MATH(pow); return pow(x, y); } }; template<typename ScalarX,typename ScalarY> struct pow_impl<ScalarX,ScalarY, true> { typedef ScalarX result_type; static EIGEN_DEVICE_FUNC inline ScalarX run(ScalarX x, ScalarY y) { ScalarX res(1); eigen_assert(!NumTraits<ScalarY>::IsSigned || y >= 0); if(y & 1) res *= x; y >>= 1; while(y) { x *= x; if(y&1) res *= x; y >>= 1; } return res; } }; /**************************************************************************** * Implementation of random * ****************************************************************************/ template<typename Scalar, bool IsComplex, bool IsInteger> struct random_default_impl {}; template<typename Scalar> struct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {}; template<typename Scalar> struct random_retval { typedef Scalar type; }; template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y); template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(); template<typename Scalar> struct random_default_impl<Scalar, false, false> { static inline Scalar run(const Scalar& x, const Scalar& y) { return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX); } static inline Scalar run() { return run(Scalar(NumTraits<Scalar>::IsSigned ? -1 : 0), Scalar(1)); } }; enum { meta_floor_log2_terminate, meta_floor_log2_move_up, meta_floor_log2_move_down, meta_floor_log2_bogus }; template<unsigned int n, int lower, int upper> struct meta_floor_log2_selector { enum { middle = (lower + upper) / 2, value = (upper <= lower + 1) ? int(meta_floor_log2_terminate) : (n < (1 << middle)) ? int(meta_floor_log2_move_down) : (n==0) ? int(meta_floor_log2_bogus) : int(meta_floor_log2_move_up) }; }; template<unsigned int n, int lower = 0, int upper = sizeof(unsigned int) * CHAR_BIT - 1, int selector = meta_floor_log2_selector<n, lower, upper>::value> struct meta_floor_log2 {}; template<unsigned int n, int lower, int upper> struct meta_floor_log2<n, lower, upper, meta_floor_log2_move_down> { enum { value = meta_floor_log2<n, lower, meta_floor_log2_selector<n, lower, upper>::middle>::value }; }; template<unsigned int n, int lower, int upper> struct meta_floor_log2<n, lower, upper, meta_floor_log2_move_up> { enum { value = meta_floor_log2<n, meta_floor_log2_selector<n, lower, upper>::middle, upper>::value }; }; template<unsigned int n, int lower, int upper> struct meta_floor_log2<n, lower, upper, meta_floor_log2_terminate> { enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower }; }; template<unsigned int n, int lower, int upper> struct meta_floor_log2<n, lower, upper, meta_floor_log2_bogus> { // no value, error at compile time }; template<typename Scalar> struct random_default_impl<Scalar, false, true> { static inline Scalar run(const Scalar& x, const Scalar& y) { if (y <= x) return x; // ScalarU is the unsigned counterpart of Scalar, possibly Scalar itself. typedef typename make_unsigned<Scalar>::type ScalarU; // ScalarX is the widest of ScalarU and unsigned int. // We'll deal only with ScalarX and unsigned int below thus avoiding signed // types and arithmetic and signed overflows (which are undefined behavior). typedef typename conditional<(ScalarU(-1) > unsigned(-1)), ScalarU, unsigned>::type ScalarX; // The following difference doesn't overflow, provided our integer types are two's // complement and have the same number of padding bits in signed and unsigned variants. // This is the case in most modern implementations of C++. ScalarX range = ScalarX(y) - ScalarX(x); ScalarX offset = 0; ScalarX divisor = 1; ScalarX multiplier = 1; const unsigned rand_max = RAND_MAX; if (range <= rand_max) divisor = (rand_max + 1) / (range + 1); else multiplier = 1 + range / (rand_max + 1); // Rejection sampling. do { offset = (unsigned(std::rand()) * multiplier) / divisor; } while (offset > range); return Scalar(ScalarX(x) + offset); } static inline Scalar run() { #ifdef EIGEN_MAKING_DOCS return run(Scalar(NumTraits<Scalar>::IsSigned ? -10 : 0), Scalar(10)); #else enum { rand_bits = meta_floor_log2<(unsigned int)(RAND_MAX)+1>::value, scalar_bits = sizeof(Scalar) * CHAR_BIT, shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)), offset = NumTraits<Scalar>::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0 }; return Scalar((std::rand() >> shift) - offset); #endif } }; template<typename Scalar> struct random_default_impl<Scalar, true, false> { static inline Scalar run(const Scalar& x, const Scalar& y) { return Scalar(random(x.real(), y.real()), random(x.imag(), y.imag())); } static inline Scalar run() { typedef typename NumTraits<Scalar>::Real RealScalar; return Scalar(random<RealScalar>(), random<RealScalar>()); } }; template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); } template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() { return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); } // Implementatin of is* functions // std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang. #if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG) #define EIGEN_USE_STD_FPCLASSIFY 1 #else #define EIGEN_USE_STD_FPCLASSIFY 0 #endif template<typename T> EIGEN_DEVICE_FUNC typename internal::enable_if<internal::is_integral<T>::value,bool>::type isnan_impl(const T&) { return false; } template<typename T> EIGEN_DEVICE_FUNC typename internal::enable_if<internal::is_integral<T>::value,bool>::type isinf_impl(const T&) { return false; } template<typename T> EIGEN_DEVICE_FUNC typename internal::enable_if<internal::is_integral<T>::value,bool>::type isfinite_impl(const T&) { return true; } template<typename T> EIGEN_DEVICE_FUNC typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type isfinite_impl(const T& x) { #ifdef __CUDA_ARCH__ return (::isfinite)(x); #elif EIGEN_USE_STD_FPCLASSIFY using std::isfinite; return isfinite EIGEN_NOT_A_MACRO (x); #else return x<=NumTraits<T>::highest() && x>=NumTraits<T>::lowest(); #endif } template<typename T> EIGEN_DEVICE_FUNC typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type isinf_impl(const T& x) { #ifdef __CUDA_ARCH__ return (::isinf)(x); #elif EIGEN_USE_STD_FPCLASSIFY using std::isinf; return isinf EIGEN_NOT_A_MACRO (x); #else return x>NumTraits<T>::highest() || x<NumTraits<T>::lowest(); #endif } template<typename T> EIGEN_DEVICE_FUNC typename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type isnan_impl(const T& x) { #ifdef __CUDA_ARCH__ return (::isnan)(x); #elif EIGEN_USE_STD_FPCLASSIFY using std::isnan; return isnan EIGEN_NOT_A_MACRO (x); #else return x != x; #endif } #if (!EIGEN_USE_STD_FPCLASSIFY) #if EIGEN_COMP_MSVC template<typename T> EIGEN_DEVICE_FUNC bool isinf_msvc_helper(T x) { return _fpclass(x)==_FPCLASS_NINF || _fpclass(x)==_FPCLASS_PINF; } //MSVC defines a _isnan builtin function, but for double only EIGEN_DEVICE_FUNC inline bool isnan_impl(const long double& x) { return _isnan(x)!=0; } EIGEN_DEVICE_FUNC inline bool isnan_impl(const double& x) { return _isnan(x)!=0; } EIGEN_DEVICE_FUNC inline bool isnan_impl(const float& x) { return _isnan(x)!=0; } EIGEN_DEVICE_FUNC inline bool isinf_impl(const long double& x) { return isinf_msvc_helper(x); } EIGEN_DEVICE_FUNC inline bool isinf_impl(const double& x) { return isinf_msvc_helper(x); } EIGEN_DEVICE_FUNC inline bool isinf_impl(const float& x) { return isinf_msvc_helper(x); } #elif (defined __FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ && EIGEN_COMP_GNUC) #if EIGEN_GNUC_AT_LEAST(5,0) #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((optimize("no-finite-math-only"))) #else // NOTE the inline qualifier and noinline attribute are both needed: the former is to avoid linking issue (duplicate symbol), // while the second prevent too aggressive optimizations in fast-math mode: #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((noinline,optimize("no-finite-math-only"))) #endif template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const long double& x) { return __builtin_isnan(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const double& x) { return __builtin_isnan(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const float& x) { return __builtin_isnan(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const double& x) { return __builtin_isinf(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const float& x) { return __builtin_isinf(x); } template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const long double& x) { return __builtin_isinf(x); } #undef EIGEN_TMP_NOOPT_ATTRIB #endif #endif // The following overload are defined at the end of this file template<typename T> EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex<T>& x); template<typename T> EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex<T>& x); template<typename T> EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex<T>& x); template<typename T> T generic_fast_tanh_float(const T& a_x); } // end namespace internal /**************************************************************************** * Generic math functions * ****************************************************************************/ namespace numext { #ifndef __CUDA_ARCH__ template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) { EIGEN_USING_STD_MATH(min); return min EIGEN_NOT_A_MACRO (x,y); } template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) { EIGEN_USING_STD_MATH(max); return max EIGEN_NOT_A_MACRO (x,y); } #else template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y) { return y < x ? y : x; } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y) { return fminf(x, y); } template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y) { return x < y ? y : x; } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y) { return fmaxf(x, y); } #endif template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x) { return internal::real_ref_impl<Scalar>::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(arg, Scalar) arg(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(arg, Scalar)::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x) { return internal::imag_ref_impl<Scalar>::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) { return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); } EIGEN_DEVICE_FUNC inline bool abs2(bool x) { return x; } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) { return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(log1p, Scalar)::run(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log1p(const float &x) { return ::log1pf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double log1p(const double &x) { return ::log1p(x); } #endif template<typename ScalarX,typename ScalarY> EIGEN_DEVICE_FUNC inline typename internal::pow_impl<ScalarX,ScalarY>::result_type pow(const ScalarX& x, const ScalarY& y) { return internal::pow_impl<ScalarX,ScalarY>::run(x, y); } template<typename T> EIGEN_DEVICE_FUNC bool (isnan) (const T &x) { return internal::isnan_impl(x); } template<typename T> EIGEN_DEVICE_FUNC bool (isinf) (const T &x) { return internal::isinf_impl(x); } template<typename T> EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); } template<typename Scalar> EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x) { return EIGEN_MATHFUNC_IMPL(round, Scalar)::run(x); } template<typename T> EIGEN_DEVICE_FUNC T (floor)(const T& x) { EIGEN_USING_STD_MATH(floor); return floor(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float floor(const float &x) { return ::floorf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double floor(const double &x) { return ::floor(x); } #endif template<typename T> EIGEN_DEVICE_FUNC T (ceil)(const T& x) { EIGEN_USING_STD_MATH(ceil); return ceil(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float ceil(const float &x) { return ::ceilf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double ceil(const double &x) { return ::ceil(x); } #endif /** Log base 2 for 32 bits positive integers. * Conveniently returns 0 for x==0. */ inline int log2(int x) { eigen_assert(x>=0); unsigned int v(x); static const int table[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return table[(v * 0x07C4ACDDU) >> 27]; } /** \returns the square root of \a x. * * It is essentially equivalent to * \code using std::sqrt; return sqrt(x); \endcode * but slightly faster for float/double and some compilers (e.g., gcc), thanks to * specializations when SSE is enabled. * * It's usage is justified in performance critical functions, like norm/normalize. */ template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T sqrt(const T &x) { EIGEN_USING_STD_MATH(sqrt); return sqrt(x); } template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T log(const T &x) { EIGEN_USING_STD_MATH(log); return log(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float log(const float &x) { return ::logf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double log(const double &x) { return ::log(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename internal::enable_if<NumTraits<T>::IsSigned || NumTraits<T>::IsComplex,typename NumTraits<T>::Real>::type abs(const T &x) { EIGEN_USING_STD_MATH(abs); return abs(x); } template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename internal::enable_if<!(NumTraits<T>::IsSigned || NumTraits<T>::IsComplex),typename NumTraits<T>::Real>::type abs(const T &x) { return x; } #if defined(__SYCL_DEVICE_ONLY__) EIGEN_ALWAYS_INLINE float abs(float x) { return cl::sycl::fabs(x); } EIGEN_ALWAYS_INLINE double abs(double x) { return cl::sycl::fabs(x); } #endif // defined(__SYCL_DEVICE_ONLY__) #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float abs(const float &x) { return ::fabsf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double abs(const double &x) { return ::fabs(x); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float abs(const std::complex<float>& x) { return ::hypotf(x.real(), x.imag()); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double abs(const std::complex<double>& x) { return ::hypot(x.real(), x.imag()); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T exp(const T &x) { EIGEN_USING_STD_MATH(exp); return exp(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float exp(const float &x) { return ::expf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double exp(const double &x) { return ::exp(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T cos(const T &x) { EIGEN_USING_STD_MATH(cos); return cos(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cos(const float &x) { return ::cosf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double cos(const double &x) { return ::cos(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T sin(const T &x) { EIGEN_USING_STD_MATH(sin); return sin(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sin(const float &x) { return ::sinf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double sin(const double &x) { return ::sin(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T tan(const T &x) { EIGEN_USING_STD_MATH(tan); return tan(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tan(const float &x) { return ::tanf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double tan(const double &x) { return ::tan(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T acos(const T &x) { EIGEN_USING_STD_MATH(acos); return acos(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float acos(const float &x) { return ::acosf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double acos(const double &x) { return ::acos(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T asin(const T &x) { EIGEN_USING_STD_MATH(asin); return asin(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float asin(const float &x) { return ::asinf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double asin(const double &x) { return ::asin(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T atan(const T &x) { EIGEN_USING_STD_MATH(atan); return atan(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float atan(const float &x) { return ::atanf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double atan(const double &x) { return ::atan(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T cosh(const T &x) { EIGEN_USING_STD_MATH(cosh); return cosh(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float cosh(const float &x) { return ::coshf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double cosh(const double &x) { return ::cosh(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T sinh(const T &x) { EIGEN_USING_STD_MATH(sinh); return sinh(x); } #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sinh(const float &x) { return ::sinhf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double sinh(const double &x) { return ::sinh(x); } #endif template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T tanh(const T &x) { EIGEN_USING_STD_MATH(tanh); return tanh(x); } #if (!defined(__CUDACC__)) && EIGEN_FAST_MATH EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tanh(float x) { return internal::generic_fast_tanh_float(x); } #endif #ifdef __CUDACC__ template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float tanh(const float &x) { return ::tanhf(x); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double tanh(const double &x) { return ::tanh(x); } #endif template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T fmod(const T& a, const T& b) { EIGEN_USING_STD_MATH(fmod); return fmod(a, b); } #ifdef __CUDACC__ template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float fmod(const float& a, const float& b) { return ::fmodf(a, b); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double fmod(const double& a, const double& b) { return ::fmod(a, b); } #endif } // end namespace numext namespace internal { template<typename T> EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex<T>& x) { return (numext::isfinite)(numext::real(x)) && (numext::isfinite)(numext::imag(x)); } template<typename T> EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex<T>& x) { return (numext::isnan)(numext::real(x)) || (numext::isnan)(numext::imag(x)); } template<typename T> EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex<T>& x) { return ((numext::isinf)(numext::real(x)) || (numext::isinf)(numext::imag(x))) && (!(numext::isnan)(x)); } /**************************************************************************** * Implementation of fuzzy comparisons * ****************************************************************************/ template<typename Scalar, bool IsComplex, bool IsInteger> struct scalar_fuzzy_default_impl {}; template<typename Scalar> struct scalar_fuzzy_default_impl<Scalar, false, false> { typedef typename NumTraits<Scalar>::Real RealScalar; template<typename OtherScalar> EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { return numext::abs(x) <= numext::abs(y) * prec; } EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { return numext::abs(x - y) <= numext::mini(numext::abs(x), numext::abs(y)) * prec; } EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) { return x <= y || isApprox(x, y, prec); } }; template<typename Scalar> struct scalar_fuzzy_default_impl<Scalar, false, true> { typedef typename NumTraits<Scalar>::Real RealScalar; template<typename OtherScalar> EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&) { return x == Scalar(0); } EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&) { return x == y; } EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&) { return x <= y; } }; template<typename Scalar> struct scalar_fuzzy_default_impl<Scalar, true, false> { typedef typename NumTraits<Scalar>::Real RealScalar; template<typename OtherScalar> EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) { return numext::abs2(x) <= numext::abs2(y) * prec * prec; } EIGEN_DEVICE_FUNC static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) { return numext::abs2(x - y) <= numext::mini(numext::abs2(x), numext::abs2(y)) * prec * prec; } }; template<typename Scalar> struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {}; template<typename Scalar, typename OtherScalar> EIGEN_DEVICE_FUNC inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision()) { return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision); } template<typename Scalar> EIGEN_DEVICE_FUNC inline bool isApprox(const Scalar& x, const Scalar& y, const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision()) { return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision); } template<typename Scalar> EIGEN_DEVICE_FUNC inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision()) { return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision); } /****************************************** *** The special case of the bool type *** ******************************************/ template<> struct random_impl<bool> { static inline bool run() { return random<int>(0,1)==0 ? false : true; } }; template<> struct scalar_fuzzy_impl<bool> { typedef bool RealScalar; template<typename OtherScalar> EIGEN_DEVICE_FUNC static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) { return !x; } EIGEN_DEVICE_FUNC static inline bool isApprox(bool x, bool y, bool) { return x == y; } EIGEN_DEVICE_FUNC static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) { return (!x) || y; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATHFUNCTIONS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/CommaInitializer.h
.h
5,689
161
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMMAINITIALIZER_H #define EIGEN_COMMAINITIALIZER_H namespace Eigen { /** \class CommaInitializer * \ingroup Core_Module * * \brief Helper class used by the comma initializer operator * * This class is internally used to implement the comma initializer feature. It is * the return type of MatrixBase::operator<<, and most of the time this is the only * way it is used. * * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() */ template<typename XprType> struct CommaInitializer { typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s) : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) { m_xpr.coeffRef(0,0) = s; } template<typename OtherDerived> EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other) : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) { m_xpr.block(0, 0, other.rows(), other.cols()) = other; } /* Copy/Move constructor which transfers ownership. This is crucial in * absence of return value optimization to avoid assertions during destruction. */ // FIXME in C++11 mode this could be replaced by a proper RValue constructor EIGEN_DEVICE_FUNC inline CommaInitializer(const CommaInitializer& o) : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { // Mark original object as finished. In absence of R-value references we need to const_cast: const_cast<CommaInitializer&>(o).m_row = m_xpr.rows(); const_cast<CommaInitializer&>(o).m_col = m_xpr.cols(); const_cast<CommaInitializer&>(o).m_currentBlockRows = 0; } /* inserts a scalar value in the target matrix */ EIGEN_DEVICE_FUNC CommaInitializer& operator,(const Scalar& s) { if (m_col==m_xpr.cols()) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = 1; eigen_assert(m_row<m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); } eigen_assert(m_col<m_xpr.cols() && "Too many coefficients passed to comma initializer (operator<<)"); eigen_assert(m_currentBlockRows==1); m_xpr.coeffRef(m_row, m_col++) = s; return *this; } /* inserts a matrix expression in the target matrix */ template<typename OtherDerived> EIGEN_DEVICE_FUNC CommaInitializer& operator,(const DenseBase<OtherDerived>& other) { if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows)) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = other.rows(); eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); } eigen_assert((m_col + other.cols() <= m_xpr.cols()) && "Too many coefficients passed to comma initializer (operator<<)"); eigen_assert(m_currentBlockRows==other.rows()); m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime> (m_row, m_col, other.rows(), other.cols()) = other; m_col += other.cols(); return *this; } EIGEN_DEVICE_FUNC inline ~CommaInitializer() #if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception) #endif { finished(); } /** \returns the built matrix once all its coefficients have been set. * Calling finished is 100% optional. Its purpose is to write expressions * like this: * \code * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); * \endcode */ EIGEN_DEVICE_FUNC inline XprType& finished() { eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) && m_col == m_xpr.cols() && "Too few coefficients passed to comma initializer (operator<<)"); return m_xpr; } XprType& m_xpr; // target expression Index m_row; // current row id Index m_col; // current col id Index m_currentBlockRows; // current block height }; /** \anchor MatrixBaseCommaInitRef * Convenient operator to set the coefficients of a matrix. * * The coefficients must be provided in a row major order and exactly match * the size of the matrix. Otherwise an assertion is raised. * * Example: \include MatrixBase_set.cpp * Output: \verbinclude MatrixBase_set.out * * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order. * * \sa CommaInitializer::finished(), class CommaInitializer */ template<typename Derived> inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s) { return CommaInitializer<Derived>(*static_cast<Derived*>(this), s); } /** \sa operator<<(const Scalar&) */ template<typename Derived> template<typename OtherDerived> inline CommaInitializer<Derived> DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other) { return CommaInitializer<Derived>(*static_cast<Derived *>(this), other); } } // end namespace Eigen #endif // EIGEN_COMMAINITIALIZER_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Matrix.h
.h
19,067
460
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_H #define EIGEN_MATRIX_H namespace Eigen { namespace internal { template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> struct traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > { private: enum { size = internal::size_at_compile_time<_Rows,_Cols>::ret }; typedef typename find_best_packet<_Scalar,size>::type PacketScalar; enum { row_major_bit = _Options&RowMajor ? RowMajorBit : 0, is_dynamic_size_storage = _MaxRows==Dynamic || _MaxCols==Dynamic, max_size = is_dynamic_size_storage ? Dynamic : _MaxRows*_MaxCols, default_alignment = compute_default_alignment<_Scalar,max_size>::value, actual_alignment = ((_Options&DontAlign)==0) ? default_alignment : 0, required_alignment = unpacket_traits<PacketScalar>::alignment, packet_access_bit = (packet_traits<_Scalar>::Vectorizable && (EIGEN_UNALIGNED_VECTORIZE || (actual_alignment>=required_alignment))) ? PacketAccessBit : 0 }; public: typedef _Scalar Scalar; typedef Dense StorageKind; typedef Eigen::Index StorageIndex; typedef MatrixXpr XprKind; enum { RowsAtCompileTime = _Rows, ColsAtCompileTime = _Cols, MaxRowsAtCompileTime = _MaxRows, MaxColsAtCompileTime = _MaxCols, Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, Options = _Options, InnerStrideAtCompileTime = 1, OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime, // FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit, Alignment = actual_alignment }; }; } /** \class Matrix * \ingroup Core_Module * * \brief The matrix class, also used for vectors and row-vectors * * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen. * Vectors are matrices with one column, and row-vectors are matrices with one row. * * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note"). * * The first three template parameters are required: * \tparam _Scalar Numeric type, e.g. float, double, int or std::complex<float>. * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). * \tparam _Rows Number of rows, or \b Dynamic * \tparam _Cols Number of columns, or \b Dynamic * * The remaining template parameters are optional -- in most cases you don't have to worry about them. * \tparam _Options A combination of either \b #RowMajor or \b #ColMajor, and of either * \b #AutoAlign or \b #DontAlign. * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size. * \tparam _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note"). * \tparam _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note"). * * Eigen provides a number of typedefs covering the usual cases. Here are some examples: * * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix<double, 2, 2>) * \li \c Vector4f is a vector of 4 floats (\c Matrix<float, 4, 1>) * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix<int, 1, 3>) * * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix<float, Dynamic, Dynamic>) * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix<float, Dynamic, 1>) * * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix<float, 2, Dynamic>) * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix<double, Dynamic, 3>) * * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs. * * You can access elements of vectors and matrices using normal subscripting: * * \code * Eigen::VectorXd v(10); * v[0] = 0.1; * v[1] = 0.2; * v(0) = 0.3; * v(1) = 0.4; * * Eigen::MatrixXi m(10, 10); * m(0, 1) = 1; * m(0, 2) = 2; * m(0, 3) = 3; * \endcode * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN. * * <i><b>Some notes:</b></i> * * <dl> * <dt><b>\anchor dense Dense versus sparse:</b></dt> * <dd>This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module. * * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array. * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.</dd> * * <dt><b>\anchor fixedsize Fixed-size versus dynamic-size:</b></dt> * <dd>Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time. * * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime * variables, and the array of coefficients is allocated dynamically on the heap. * * Note that \em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of a std::map. * If you want this behavior, see the Sparse module.</dd> * * <dt><b>\anchor maxrows _MaxRows and _MaxCols:</b></dt> * <dd>In most cases, one just leaves these parameters to the default values. * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.</dd> * </dl> * * <i><b>ABI and storage layout</b></i> * * The table below summarizes the ABI of some possible Matrix instances which is fixed thorough the lifetime of Eigen 3. * <table class="manual"> * <tr><th>Matrix type</th><th>Equivalent C structure</th></tr> * <tr><td>\code Matrix<T,Dynamic,Dynamic> \endcode</td><td>\code * struct { * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 * Eigen::Index rows, cols; * }; * \endcode</td></tr> * <tr class="alt"><td>\code * Matrix<T,Dynamic,1> * Matrix<T,1,Dynamic> \endcode</td><td>\code * struct { * T *data; // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0 * Eigen::Index size; * }; * \endcode</td></tr> * <tr><td>\code Matrix<T,Rows,Cols> \endcode</td><td>\code * struct { * T data[Rows*Cols]; // with (size_t(data)%A(Rows*Cols*sizeof(T)))==0 * }; * \endcode</td></tr> * <tr class="alt"><td>\code Matrix<T,Dynamic,Dynamic,0,MaxRows,MaxCols> \endcode</td><td>\code * struct { * T data[MaxRows*MaxCols]; // with (size_t(data)%A(MaxRows*MaxCols*sizeof(T)))==0 * Eigen::Index rows, cols; * }; * \endcode</td></tr> * </table> * Note that in this table Rows, Cols, MaxRows and MaxCols are all positive integers. A(S) is defined to the largest possible power-of-two * smaller to EIGEN_MAX_STATIC_ALIGN_BYTES. * * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, * \ref TopicStorageOrders */ template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> class Matrix : public PlainObjectBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > { public: /** \brief Base class typedef. * \sa PlainObjectBase */ typedef PlainObjectBase<Matrix> Base; enum { Options = _Options }; EIGEN_DENSE_PUBLIC_INTERFACE(Matrix) typedef typename Base::PlainObject PlainObject; using Base::base; using Base::coeffRef; /** * \brief Assigns matrices to each other. * * \note This is a special case of the templated operator=. Its purpose is * to prevent a default operator= from hiding the templated operator=. * * \callgraph */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other) { return Base::_set(other); } /** \internal * \brief Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const DenseBase<OtherDerived>& other) { return Base::_set(other); } /* Here, doxygen failed to copy the brief information when using \copydoc */ /** * \brief Copies the generic expression \a other into *this. * \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other) */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase<OtherDerived> &other) { return Base::operator=(other); } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue<OtherDerived>& func) { return Base::operator=(func); } /** \brief Default constructor. * * For fixed-size matrices, does nothing. * * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix() : Base() { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } // FIXME is it still needed EIGEN_DEVICE_FUNC explicit Matrix(internal::constructor_without_unaligned_array_assert) : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC Matrix(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value) : Base(std::move(other)) { Base::_check_template_params(); } EIGEN_DEVICE_FUNC Matrix& operator=(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value) { other.swap(*this); return *this; } #endif #ifndef EIGEN_PARSED_BY_DOXYGEN // This constructor is for both 1x1 matrices and dynamic vectors template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Matrix(const T& x) { Base::_check_template_params(); Base::template _init1<T>(x); } template<typename T0, typename T1> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y) { Base::_check_template_params(); Base::template _init2<T0,T1>(x, y); } #else /** \brief Constructs a fixed-sized matrix initialized with coefficients starting at \a data */ EIGEN_DEVICE_FUNC explicit Matrix(const Scalar *data); /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors * * This is useful for dynamic-size vectors. For fixed-size vectors, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. * * \warning This constructor is disabled for fixed-size \c 1x1 matrices. For instance, * calling Matrix<double,1,1>(1) will call the initialization constructor: Matrix(const Scalar&). * For fixed-size \c 1x1 matrices it is therefore recommended to use the default * constructor Matrix() instead, especially when using one of the non standard * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). */ EIGEN_STRONG_INLINE explicit Matrix(Index dim); /** \brief Constructs an initialized 1x1 matrix with the given coefficient */ Matrix(const Scalar& x); /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns. * * This is useful for dynamic-size matrices. For fixed-size matrices, * it is redundant to pass these parameters, so one should use the default constructor * Matrix() instead. * * \warning This constructor is disabled for fixed-size \c 1x2 and \c 2x1 vectors. For instance, * calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y). * For fixed-size \c 1x2 or \c 2x1 vectors it is therefore recommended to use the default * constructor Matrix() instead, especially when using one of the non standard * \c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\c NAN} macros (see \ref TopicPreprocessorDirectives). */ EIGEN_DEVICE_FUNC Matrix(Index rows, Index cols); /** \brief Constructs an initialized 2D vector with given coefficients */ Matrix(const Scalar& x, const Scalar& y); #endif /** \brief Constructs an initialized 3D vector with given coefficients */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3) m_storage.data()[0] = x; m_storage.data()[1] = y; m_storage.data()[2] = z; } /** \brief Constructs an initialized 4D vector with given coefficients */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4) m_storage.data()[0] = x; m_storage.data()[1] = y; m_storage.data()[2] = z; m_storage.data()[3] = w; } /** \brief Copy constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const Matrix& other) : Base(other) { } /** \brief Copy constructor for generic expressions. * \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Matrix(const EigenBase<OtherDerived> &other) : Base(other.derived()) { } EIGEN_DEVICE_FUNC inline Index innerStride() const { return 1; } EIGEN_DEVICE_FUNC inline Index outerStride() const { return this->innerSize(); } /////////// Geometry module /////////// template<typename OtherDerived> EIGEN_DEVICE_FUNC explicit Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r); template<typename OtherDerived> EIGEN_DEVICE_FUNC Matrix& operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r); // allow to extend Matrix outside Eigen #ifdef EIGEN_MATRIX_PLUGIN #include EIGEN_MATRIX_PLUGIN #endif protected: template <typename Derived, typename OtherDerived, bool IsVector> friend struct internal::conservative_resize_like_impl; using Base::m_storage; }; /** \defgroup matrixtypedefs Global matrix typedefs * * \ingroup Core_Module * * Eigen defines several typedef shortcuts for most common matrix and vector types. * * The general patterns are the following: * * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd * for complex double. * * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats. * * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is * a fixed-size vector of 4 complex floats. * * \sa class Matrix */ #define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup matrixtypedefs */ \ typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix<Type, Size, 1> Vector##SizeSuffix##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix<Type, 1, Size> RowVector##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ /** \ingroup matrixtypedefs */ \ typedef Matrix<Type, Size, Dynamic> Matrix##Size##X##TypeSuffix; \ /** \ingroup matrixtypedefs */ \ typedef Matrix<Type, Dynamic, Size> Matrix##X##Size##TypeSuffix; #define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>, cf) EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd) #undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_TYPEDEFS #undef EIGEN_MAKE_FIXED_TYPEDEFS } // end namespace Eigen #endif // EIGEN_MATRIX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Solve.h
.h
6,795
189
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SOLVE_H #define EIGEN_SOLVE_H namespace Eigen { template<typename Decomposition, typename RhsType, typename StorageKind> class SolveImpl; /** \class Solve * \ingroup Core_Module * * \brief Pseudo expression representing a solving operation * * \tparam Decomposition the type of the matrix or decomposion object * \tparam Rhstype the type of the right-hand side * * This class represents an expression of A.solve(B) * and most of the time this is the only way it is used. * */ namespace internal { // this solve_traits class permits to determine the evaluation type with respect to storage kind (Dense vs Sparse) template<typename Decomposition, typename RhsType,typename StorageKind> struct solve_traits; template<typename Decomposition, typename RhsType> struct solve_traits<Decomposition,RhsType,Dense> { typedef typename make_proper_matrix_type<typename RhsType::Scalar, Decomposition::ColsAtCompileTime, RhsType::ColsAtCompileTime, RhsType::PlainObject::Options, Decomposition::MaxColsAtCompileTime, RhsType::MaxColsAtCompileTime>::type PlainObject; }; template<typename Decomposition, typename RhsType> struct traits<Solve<Decomposition, RhsType> > : traits<typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject> { typedef typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject PlainObject; typedef typename promote_index_type<typename Decomposition::StorageIndex, typename RhsType::StorageIndex>::type StorageIndex; typedef traits<PlainObject> BaseTraits; enum { Flags = BaseTraits::Flags & RowMajorBit, CoeffReadCost = HugeCost }; }; } template<typename Decomposition, typename RhsType> class Solve : public SolveImpl<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind> { public: typedef typename internal::traits<Solve>::PlainObject PlainObject; typedef typename internal::traits<Solve>::StorageIndex StorageIndex; Solve(const Decomposition &dec, const RhsType &rhs) : m_dec(dec), m_rhs(rhs) {} EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); } EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; } EIGEN_DEVICE_FUNC const RhsType& rhs() const { return m_rhs; } protected: const Decomposition &m_dec; const RhsType &m_rhs; }; // Specialization of the Solve expression for dense results template<typename Decomposition, typename RhsType> class SolveImpl<Decomposition,RhsType,Dense> : public MatrixBase<Solve<Decomposition,RhsType> > { typedef Solve<Decomposition,RhsType> Derived; public: typedef MatrixBase<Solve<Decomposition,RhsType> > Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) private: Scalar coeff(Index row, Index col) const; Scalar coeff(Index i) const; }; // Generic API dispatcher template<typename Decomposition, typename RhsType, typename StorageKind> class SolveImpl : public internal::generic_xpr_base<Solve<Decomposition,RhsType>, MatrixXpr, StorageKind>::type { public: typedef typename internal::generic_xpr_base<Solve<Decomposition,RhsType>, MatrixXpr, StorageKind>::type Base; }; namespace internal { // Evaluator of Solve -> eval into a temporary template<typename Decomposition, typename RhsType> struct evaluator<Solve<Decomposition,RhsType> > : public evaluator<typename Solve<Decomposition,RhsType>::PlainObject> { typedef Solve<Decomposition,RhsType> SolveType; typedef typename SolveType::PlainObject PlainObject; typedef evaluator<PlainObject> Base; enum { Flags = Base::Flags | EvalBeforeNestingBit }; EIGEN_DEVICE_FUNC explicit evaluator(const SolveType& solve) : m_result(solve.rows(), solve.cols()) { ::new (static_cast<Base*>(this)) Base(m_result); solve.dec()._solve_impl(solve.rhs(), m_result); } protected: PlainObject m_result; }; // Specialization for "dst = dec.solve(rhs)" // NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense> { typedef Solve<DecType,RhsType> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); src.dec()._solve_impl(src.rhs(), dst); } }; // Specialization for "dst = dec.transpose().solve(rhs)" template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> struct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense> { typedef Solve<Transpose<const DecType>,RhsType> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); src.dec().nestedExpression().template _solve_impl_transposed<false>(src.rhs(), dst); } }; // Specialization for "dst = dec.adjoint().solve(rhs)" template<typename DstXprType, typename DecType, typename RhsType, typename Scalar> struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense> { typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst); } }; } // end namepsace internal } // end namespace Eigen #endif // EIGEN_SOLVE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/StableNorm.h
.h
7,692
222
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STABLENORM_H #define EIGEN_STABLENORM_H namespace Eigen { namespace internal { template<typename ExpressionType, typename Scalar> inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) { Scalar maxCoeff = bl.cwiseAbs().maxCoeff(); if(maxCoeff>scale) { ssq = ssq * numext::abs2(scale/maxCoeff); Scalar tmp = Scalar(1)/maxCoeff; if(tmp > NumTraits<Scalar>::highest()) { invScale = NumTraits<Scalar>::highest(); scale = Scalar(1)/invScale; } else if(maxCoeff>NumTraits<Scalar>::highest()) // we got a INF { invScale = Scalar(1); scale = maxCoeff; } else { scale = maxCoeff; invScale = tmp; } } else if(maxCoeff!=maxCoeff) // we got a NaN { scale = maxCoeff; } // TODO if the maxCoeff is much much smaller than the current scale, // then we can neglect this sub vector if(scale>Scalar(0)) // if scale==0, then bl is 0 ssq += (bl*invScale).squaredNorm(); } template<typename Derived> inline typename NumTraits<typename traits<Derived>::Scalar>::Real blueNorm_impl(const EigenBase<Derived>& _vec) { typedef typename Derived::RealScalar RealScalar; using std::pow; using std::sqrt; using std::abs; const Derived& vec(_vec.derived()); static bool initialized = false; static RealScalar b1, b2, s1m, s2m, rbig, relerr; if(!initialized) { int ibeta, it, iemin, iemax, iexp; RealScalar eps; // This program calculates the machine-dependent constants // bl, b2, slm, s2m, relerr overfl // from the "basic" machine-dependent numbers // nbig, ibeta, it, iemin, iemax, rbig. // The following define the basic machine-dependent constants. // For portability, the PORT subprograms "ilmaeh" and "rlmach" // are used. For any specific computer, each of the assignment // statements can be replaced ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number iexp = -((1-iemin)/2); b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange iexp = (iemax + 1 - it)/2; b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange iexp = (2-iemin)/2; s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range iexp = - ((iemax+it)/2); s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range eps = RealScalar(pow(double(ibeta), 1-it)); relerr = sqrt(eps); // tolerance for neglecting asml initialized = true; } Index n = vec.size(); RealScalar ab2 = b2 / RealScalar(n); RealScalar asml = RealScalar(0); RealScalar amed = RealScalar(0); RealScalar abig = RealScalar(0); for(typename Derived::InnerIterator it(vec, 0); it; ++it) { RealScalar ax = abs(it.value()); if(ax > ab2) abig += numext::abs2(ax*s2m); else if(ax < b1) asml += numext::abs2(ax*s1m); else amed += numext::abs2(ax); } if(amed!=amed) return amed; // we got a NaN if(abig > RealScalar(0)) { abig = sqrt(abig); if(abig > rbig) // overflow, or *this contains INF values return abig; // return INF if(amed > RealScalar(0)) { abig = abig/s2m; amed = sqrt(amed); } else return abig/s2m; } else if(asml > RealScalar(0)) { if (amed > RealScalar(0)) { abig = sqrt(amed); amed = sqrt(asml) / s1m; } else return sqrt(asml)/s1m; } else return sqrt(amed); asml = numext::mini(abig, amed); abig = numext::maxi(abig, amed); if(asml <= abig*relerr) return abig; else return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig)); } } // end namespace internal /** \returns the \em l2 norm of \c *this avoiding underflow and overflow. * This version use a blockwise two passes algorithm: * 1 - find the absolute largest coefficient \c s * 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way * * For architecture/scalar types supporting vectorization, this version * is faster than blueNorm(). Otherwise the blueNorm() is much faster. * * \sa norm(), blueNorm(), hypotNorm() */ template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::stableNorm() const { using std::sqrt; using std::abs; const Index blockSize = 4096; RealScalar scale(0); RealScalar invScale(1); RealScalar ssq(0); // sum of square typedef typename internal::nested_eval<Derived,2>::type DerivedCopy; typedef typename internal::remove_all<DerivedCopy>::type DerivedCopyClean; const DerivedCopy copy(derived()); enum { CanAlign = ( (int(DerivedCopyClean::Flags)&DirectAccessBit) || (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough ) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT) && (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization }; typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<DerivedCopyClean>::Alignment>, typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper; Index n = size(); if(n==1) return abs(this->coeff(0)); Index bi = internal::first_default_aligned(copy); if (bi>0) internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale); for (; bi<n; bi+=blockSize) internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale); return scale * sqrt(ssq); } /** \returns the \em l2 norm of \c *this using the Blue's algorithm. * A Portable Fortran Program to Find the Euclidean Norm of a Vector, * ACM TOMS, Vol 4, Issue 1, 1978. * * For architecture/scalar types without vectorization, this version * is much faster than stableNorm(). Otherwise the stableNorm() is faster. * * \sa norm(), stableNorm(), hypotNorm() */ template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::blueNorm() const { return internal::blueNorm_impl(*this); } /** \returns the \em l2 norm of \c *this avoiding undeflow and overflow. * This version use a concatenation of hypot() calls, and it is very slow. * * \sa norm(), stableNorm() */ template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::hypotNorm() const { return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>()); } } // end namespace Eigen #endif // EIGEN_STABLENORM_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/PlainObjectBase.h
.h
45,354
1,038
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSESTORAGEBASE_H #define EIGEN_DENSESTORAGEBASE_H #if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO) # define EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0); #elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN) # define EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=std::numeric_limits<Scalar>::quiet_NaN(); #else # undef EIGEN_INITIALIZE_COEFFS # define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif namespace Eigen { namespace internal { template<int MaxSizeAtCompileTime> struct check_rows_cols_for_overflow { template<typename Index> EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE void run(Index, Index) { } }; template<> struct check_rows_cols_for_overflow<Dynamic> { template<typename Index> EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols) { // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 // we assume Index is signed Index max_index = (std::size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed bool error = (rows == 0 || cols == 0) ? false : (rows > max_index / cols); if (error) throw_std_bad_alloc(); } }; template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime) && bool(OtherDerived::IsVectorAtCompileTime)> struct conservative_resize_like_impl; template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl; } // end namespace internal #ifdef EIGEN_PARSED_BY_DOXYGEN namespace doxygen { // This is a workaround to doxygen not being able to understand the inheritance logic // when it is hidden by the dense_xpr_base helper struct. // Moreover, doxygen fails to include members that are not documented in the declaration body of // MatrixBase if we inherits MatrixBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >, // this is why we simply inherits MatrixBase, though this does not make sense. /** This class is just a workaround for Doxygen and it does not not actually exist. */ template<typename Derived> struct dense_xpr_base_dispatcher; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> struct dense_xpr_base_dispatcher<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : public MatrixBase {}; /** This class is just a workaround for Doxygen and it does not not actually exist. */ template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> struct dense_xpr_base_dispatcher<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : public ArrayBase {}; } // namespace doxygen /** \class PlainObjectBase * \ingroup Core_Module * \brief %Dense storage base class for matrices and arrays. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. * * \tparam Derived is the derived type, e.g., a Matrix or Array * * \sa \ref TopicClassHierarchy */ template<typename Derived> class PlainObjectBase : public doxygen::dense_xpr_base_dispatcher<Derived> #else template<typename Derived> class PlainObjectBase : public internal::dense_xpr_base<Derived>::type #endif { public: enum { Options = internal::traits<Derived>::Options }; typedef typename internal::dense_xpr_base<Derived>::type Base; typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Derived DenseType; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map; friend class Eigen::Map<Derived, Unaligned>; typedef Eigen::Map<Derived, Unaligned> MapType; friend class Eigen::Map<const Derived, Unaligned>; typedef const Eigen::Map<const Derived, Unaligned> ConstMapType; #if EIGEN_MAX_ALIGN_BYTES>0 // for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class twice. friend class Eigen::Map<Derived, AlignedMax>; friend class Eigen::Map<const Derived, AlignedMax>; #endif typedef Eigen::Map<Derived, AlignedMax> AlignedMapType; typedef const Eigen::Map<const Derived, AlignedMax> ConstAlignedMapType; template<typename StrideType> struct StridedMapType { typedef Eigen::Map<Derived, Unaligned, StrideType> type; }; template<typename StrideType> struct StridedConstMapType { typedef Eigen::Map<const Derived, Unaligned, StrideType> type; }; template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, AlignedMax, StrideType> type; }; template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, AlignedMax, StrideType> type; }; protected: DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage; public: enum { NeedsToAlign = (SizeAtCompileTime != Dynamic) && (internal::traits<Derived>::Alignment>0) }; EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) EIGEN_DEVICE_FUNC Base& base() { return *static_cast<Base*>(this); } EIGEN_DEVICE_FUNC const Base& base() const { return *static_cast<const Base*>(this); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); } /** This is an overloaded version of DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index,Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } /** This is an overloaded version of DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const { return m_storage.data()[index]; } /** This is an overloaded version of DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index,Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index,Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId) { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } /** This is an overloaded version of DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index) const * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts. * * See DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index) const for details. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_storage.data()[index]; } /** This is the const version of coeffRef(Index,Index) which is thus synonym of coeff(Index,Index). * It is provided for convenience. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const { if(Flags & RowMajorBit) return m_storage.data()[colId + rowId * m_storage.cols()]; else // column-major return m_storage.data()[rowId + colId * m_storage.rows()]; } /** This is the const version of coeffRef(Index) which is thus synonym of coeff(Index). * It is provided for convenience. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const { return m_storage.data()[index]; } /** \internal */ template<int LoadMode> EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt<PacketScalar, LoadMode> (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows())); } /** \internal */ template<int LoadMode> EIGEN_STRONG_INLINE PacketScalar packet(Index index) const { return internal::ploadt<PacketScalar, LoadMode>(m_storage.data() + index); } /** \internal */ template<int StoreMode> EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val) { internal::pstoret<Scalar, PacketScalar, StoreMode> (m_storage.data() + (Flags & RowMajorBit ? colId + rowId * m_storage.cols() : rowId + colId * m_storage.rows()), val); } /** \internal */ template<int StoreMode> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val) { internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, val); } /** \returns a const pointer to the data array of this matrix */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } /** \returns a pointer to the data array of this matrix */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } /** Resizes \c *this to a \a rows x \a cols matrix. * * This method is intended for dynamic-size matrices, although it is legal to call it on any * matrix as long as fixed dimensions are left unchanged. If you only want to change the number * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). * * If the current number of coefficients of \c *this exactly matches the * product \a rows * \a cols, then no memory allocation is performed and * the current values are left unchanged. In all other cases, including * shrinking, the data is reallocated and all previous values are lost. * * Example: \include Matrix_resize_int_int.cpp * Output: \verbinclude Matrix_resize_int_int.out * * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index rows, Index cols) { eigen_assert( EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,rows==RowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,cols==ColsAtCompileTime) && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,rows<=MaxRowsAtCompileTime) && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,cols<=MaxColsAtCompileTime) && rows>=0 && cols>=0 && "Invalid sizes when resizing a matrix or array."); internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(rows, cols); #ifdef EIGEN_INITIALIZE_COEFFS Index size = rows*cols; bool size_changed = size != this->size(); m_storage.resize(size, rows, cols); if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #else m_storage.resize(rows*cols, rows, cols); #endif } /** Resizes \c *this to a vector of length \a size * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix<double, 2, Dynamic>. * * Example: \include Matrix_resize_int.cpp * Output: \verbinclude Matrix_resize_int.out * * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) */ EIGEN_DEVICE_FUNC inline void resize(Index size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0); #ifdef EIGEN_INITIALIZE_COEFFS bool size_changed = size != this->size(); #endif if(RowsAtCompileTime == 1) m_storage.resize(size, 1, size); else m_storage.resize(size, size, 1); #ifdef EIGEN_INITIALIZE_COEFFS if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED #endif } /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_NoChange_int.cpp * Output: \verbinclude Matrix_resize_NoChange_int.out * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC inline void resize(NoChange_t, Index cols) { resize(rows(), cols); } /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange * as in the example below. * * Example: \include Matrix_resize_int_NoChange.cpp * Output: \verbinclude Matrix_resize_int_NoChange.out * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC inline void resize(Index rows, NoChange_t) { resize(rows, cols()); } /** Resizes \c *this to have the same dimensions as \a other. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other) { const OtherDerived& other = _other.derived(); internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.rows(), other.cols()); const Index othersize = other.rows()*other.cols(); if(RowsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(1, othersize); } else if(ColsAtCompileTime == 1) { eigen_assert(other.rows() == 1 || other.cols() == 1); resize(othersize, 1); } else resize(other.rows(), other.cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) { internal::conservative_resize_like_impl<Derived>::run(*this, rows, cols); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of columns unchanged. * * In case the matrix is growing, new rows will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows, cols()); } /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. * * As opposed to conservativeResize(Index rows, Index cols), this version leaves * the number of rows unchanged. * * In case the matrix is growing, new columns will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) { // Note: see the comment in conservativeResize(Index,Index) conservativeResize(rows(), cols); } /** Resizes the vector to \a size while retaining old values. * * \only_for_vectors. This method does not work for * partially dynamic matrices when the static dimension is anything other * than 1. For example it will not work with Matrix<double, 2, Dynamic>. * * When values are appended, they will be uninitialized. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResize(Index size) { internal::conservative_resize_like_impl<Derived>::run(*this, size); } /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. * * The method is intended for matrices of dynamic size. If you only want to change the number * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or * conservativeResize(Index, NoChange_t). * * Matrices are resized relative to the top-left element. In case values need to be * appended to the matrix they will copied from \c other. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase<OtherDerived>& other) { internal::conservative_resize_like_impl<Derived,OtherDerived>::run(*this, other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other) { return _set(other); } /** \sa MatrixBase::lazyAssign() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase<OtherDerived>& other) { _resize_to_match(other); return Base::lazyAssign(other.derived()); } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue<OtherDerived>& func) { resize(func.rows(), func.cols()); return Base::operator=(func); } // Prevent user from trying to instantiate PlainObjectBase objects // by making all its constructor protected. See bug 1074. protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase() : m_storage() { // _check_template_params(); // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ? /** \internal */ EIGEN_DEVICE_FUNC explicit PlainObjectBase(internal::constructor_without_unaligned_array_assert) : m_storage(internal::constructor_without_unaligned_array_assert()) { // _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC PlainObjectBase(PlainObjectBase&& other) EIGEN_NOEXCEPT : m_storage( std::move(other.m_storage) ) { } EIGEN_DEVICE_FUNC PlainObjectBase& operator=(PlainObjectBase&& other) EIGEN_NOEXCEPT { using std::swap; swap(m_storage, other.m_storage); return *this; } #endif /** Copy constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other) : Base(), m_storage(other.m_storage) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols) : m_storage(size, rows, cols) { // _check_template_params(); // EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } /** \sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other) : m_storage() { _check_template_params(); resizeLike(other); _set_noalias(other); } /** \sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase<OtherDerived> &other) : m_storage() { _check_template_params(); resizeLike(other); *this = other.derived(); } /** \brief Copy constructor with in-place evaluation */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PlainObjectBase(const ReturnByValue<OtherDerived>& other) { _check_template_params(); // FIXME this does not automatically transpose vectors if necessary resize(other.rows(), other.cols()); other.evalTo(this->derived()); } public: /** \brief Copies the generic expression \a other into *this. * \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other) */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other) { _resize_to_match(other); Base::operator=(other.derived()); return this->derived(); } /** \name Map * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned * \a data pointers. * * Here is an example using strides: * \include Matrix_Map_stride.cpp * Output: \verbinclude Matrix_Map_stride.out * * \see class Map */ //@{ static inline ConstMapType Map(const Scalar* data) { return ConstMapType(data); } static inline MapType Map(Scalar* data) { return MapType(data); } static inline ConstMapType Map(const Scalar* data, Index size) { return ConstMapType(data, size); } static inline MapType Map(Scalar* data, Index size) { return MapType(data, size); } static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) { return ConstMapType(data, rows, cols); } static inline MapType Map(Scalar* data, Index rows, Index cols) { return MapType(data, rows, cols); } static inline ConstAlignedMapType MapAligned(const Scalar* data) { return ConstAlignedMapType(data); } static inline AlignedMapType MapAligned(Scalar* data) { return AlignedMapType(data); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) { return ConstAlignedMapType(data, size); } static inline AlignedMapType MapAligned(Scalar* data, Index size) { return AlignedMapType(data, size); } static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) { return ConstAlignedMapType(data, rows, cols); } static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) { return AlignedMapType(data, rows, cols); } template<int Outer, int Inner> static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, const Stride<Outer, Inner>& stride) { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, stride); } template<int Outer, int Inner> static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, const Stride<Outer, Inner>& stride) { return typename StridedMapType<Stride<Outer, Inner> >::type(data, stride); } template<int Outer, int Inner> static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index size, const Stride<Outer, Inner>& stride) { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, size, stride); } template<int Outer, int Inner> static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index size, const Stride<Outer, Inner>& stride) { return typename StridedMapType<Stride<Outer, Inner> >::type(data, size, stride); } template<int Outer, int Inner> static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride) { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); } template<int Outer, int Inner> static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride) { return typename StridedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); } template<int Outer, int Inner> static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, const Stride<Outer, Inner>& stride) { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, stride); } template<int Outer, int Inner> static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, const Stride<Outer, Inner>& stride) { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, stride); } template<int Outer, int Inner> static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index size, const Stride<Outer, Inner>& stride) { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); } template<int Outer, int Inner> static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index size, const Stride<Outer, Inner>& stride) { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); } template<int Outer, int Inner> static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride) { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); } template<int Outer, int Inner> static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride) { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); } //@} using Base::setConstant; EIGEN_DEVICE_FUNC Derived& setConstant(Index size, const Scalar& val); EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, Index cols, const Scalar& val); using Base::setZero; EIGEN_DEVICE_FUNC Derived& setZero(Index size); EIGEN_DEVICE_FUNC Derived& setZero(Index rows, Index cols); using Base::setOnes; EIGEN_DEVICE_FUNC Derived& setOnes(Index size); EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, Index cols); using Base::setRandom; Derived& setRandom(Index size); Derived& setRandom(Index rows, Index cols); #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN #include EIGEN_PLAINOBJECTBASE_PLUGIN #endif protected: /** \internal Resizes *this in preparation for assigning \a other to it. * Takes care of doing all the checking that's needed. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase<OtherDerived>& other) { #ifdef EIGEN_NO_AUTOMATIC_RESIZING eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) : (rows() == other.rows() && cols() == other.cols()))) && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); EIGEN_ONLY_USED_FOR_DEBUG(other); #else resizeLike(other); #endif } /** * \brief Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. * * \sa operator=(const MatrixBase<OtherDerived>&), _set_noalias() * * \internal */ // aliasing is dealt once in internall::call_assignment // so at this stage we have to assume aliasing... and resising has to be done later. template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& _set(const DenseBase<OtherDerived>& other) { internal::call_assignment(this->derived(), other.derived()); return this->derived(); } /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which * is the case when creating a new matrix) so one can enforce lazy evaluation. * * \sa operator=(const MatrixBase<OtherDerived>&), _set() */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other) { // I don't think we need this resize call since the lazyAssign will anyways resize // and lazyAssign will be called by the assign selector. //_resize_to_match(other); // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because // it wouldn't allow to copy a row-vector into a column-vector. internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); return this->derived(); } template<typename T0, typename T1> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0) { const bool t0_is_integer_alike = internal::is_valid_index_type<T0>::value; const bool t1_is_integer_alike = internal::is_valid_index_type<T1>::value; EIGEN_STATIC_ASSERT(t0_is_integer_alike && t1_is_integer_alike, FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) resize(rows,cols); } template<typename T0, typename T1> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(const T0& val0, const T1& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) m_storage.data()[0] = Scalar(val0); m_storage.data()[1] = Scalar(val1); } template<typename T0, typename T1> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init2(const Index& val0, const Index& val1, typename internal::enable_if< (!internal::is_same<Index,Scalar>::value) && (internal::is_same<T0,Index>::value) && (internal::is_same<T1,Index>::value) && Base::SizeAtCompileTime==2,T1>::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) m_storage.data()[0] = Scalar(val0); m_storage.data()[1] = Scalar(val1); } // The argument is convertible to the Index type and we either have a non 1x1 Matrix, or a dynamic-sized Array, // then the argument is meant to be the size of the object. template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(Index size, typename internal::enable_if< (Base::SizeAtCompileTime!=1 || !internal::is_convertible<T, Scalar>::value) && ((!internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value || Base::SizeAtCompileTime==Dynamic)),T>::type* = 0) { // NOTE MSVC 2008 complains if we directly put bool(NumTraits<T>::IsInteger) as the EIGEN_STATIC_ASSERT argument. const bool is_integer_alike = internal::is_valid_index_type<T>::value; EIGEN_UNUSED_VARIABLE(is_integer_alike); EIGEN_STATIC_ASSERT(is_integer_alike, FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) resize(size); } // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitely converted) template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if<Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T>::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) m_storage.data()[0] = val0; } // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type match the index type) template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Index& val0, typename internal::enable_if< (!internal::is_same<Index,Scalar>::value) && (internal::is_same<Index,T>::value) && Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T*>::type* = 0) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1) m_storage.data()[0] = Scalar(val0); } // Initialize a fixed size matrix from a pointer to raw data template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Scalar* data){ this->_set_noalias(ConstMapType(data)); } // Initialize an arbitrary matrix from a dense expression template<typename T, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const DenseBase<OtherDerived>& other){ this->_set_noalias(other); } // Initialize an arbitrary matrix from an object convertible to the Derived type. template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Derived& other){ this->_set_noalias(other); } // Initialize an arbitrary matrix from a generic Eigen expression template<typename T, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const EigenBase<OtherDerived>& other){ this->derived() = other; } template<typename T, typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const ReturnByValue<OtherDerived>& other) { resize(other.rows(), other.cols()); other.evalTo(this->derived()); } template<typename T, typename OtherDerived, int ColsAtCompileTime> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const RotationBase<OtherDerived,ColsAtCompileTime>& r) { this->derived() = r; } // For fixed-size Array<Scalar,...> template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if< Base::SizeAtCompileTime!=Dynamic && Base::SizeAtCompileTime!=1 && internal::is_convertible<T, Scalar>::value && internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value,T>::type* = 0) { Base::setConstant(val0); } // For fixed-size Array<Index,...> template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _init1(const Index& val0, typename internal::enable_if< (!internal::is_same<Index,Scalar>::value) && (internal::is_same<Index,T>::value) && Base::SizeAtCompileTime!=Dynamic && Base::SizeAtCompileTime!=1 && internal::is_convertible<T, Scalar>::value && internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value,T*>::type* = 0) { Base::setConstant(val0); } template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> friend struct internal::matrix_swap_impl; public: #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal * \brief Override DenseBase::swap() since for dynamic-sized matrices * of same type it is enough to swap the data pointers. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void swap(DenseBase<OtherDerived> & other) { enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic }; internal::matrix_swap_impl<Derived, OtherDerived, bool(SwapPointers)>::run(this->derived(), other.derived()); } /** \internal * \brief const version forwarded to DenseBase::swap */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void swap(DenseBase<OtherDerived> const & other) { Base::swap(other.derived()); } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void _check_template_params() { EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0) && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0)) && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0)) && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0)) && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0)) && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic) && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic) && (Options & (DontAlign|RowMajor)) == Options), INVALID_MATRIX_TEMPLATE_PARAMETERS) } enum { IsPlainObjectBase = 1 }; #endif }; namespace internal { template <typename Derived, typename OtherDerived, bool IsVector> struct conservative_resize_like_impl { static void run(DenseBase<Derived>& _this, Index rows, Index cols) { if (_this.rows() == rows && _this.cols() == cols) return; EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns { internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols); _this.derived().m_storage.conservativeResize(rows*cols,rows,cols); } else { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(rows,cols); const Index common_rows = numext::mini(rows, _this.rows()); const Index common_cols = numext::mini(cols, _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns { const Index new_rows = other.rows() - _this.rows(); const Index new_cols = other.cols() - _this.cols(); _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); if (new_rows>0) _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); else if (new_cols>0) _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols); } else { // The storage order does not allow us to use reallocation. typename Derived::PlainObject tmp(other); const Index common_rows = numext::mini(tmp.rows(), _this.rows()); const Index common_cols = numext::mini(tmp.cols(), _this.cols()); tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); _this.derived().swap(tmp); } } }; // Here, the specialization for vectors inherits from the general matrix case // to allow calling .conservativeResize(rows,cols) on vectors. template <typename Derived, typename OtherDerived> struct conservative_resize_like_impl<Derived,OtherDerived,true> : conservative_resize_like_impl<Derived,OtherDerived,false> { using conservative_resize_like_impl<Derived,OtherDerived,false>::run; static void run(DenseBase<Derived>& _this, Index size) { const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1; _this.derived().m_storage.conservativeResize(size,new_rows,new_cols); } static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other) { if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; const Index num_new_elements = other.size() - _this.size(); const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); if (num_new_elements > 0) _this.tail(num_new_elements) = other.tail(num_new_elements); } }; template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl { EIGEN_DEVICE_FUNC static inline void run(MatrixTypeA& a, MatrixTypeB& b) { a.base().swap(b); } }; template<typename MatrixTypeA, typename MatrixTypeB> struct matrix_swap_impl<MatrixTypeA, MatrixTypeB, true> { EIGEN_DEVICE_FUNC static inline void run(MatrixTypeA& a, MatrixTypeB& b) { static_cast<typename MatrixTypeA::Base&>(a).m_storage.swap(static_cast<typename MatrixTypeB::Base&>(b).m_storage); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_DENSESTORAGEBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/MathFunctionsImpl.h
.h
3,369
102
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) // Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATHFUNCTIONSIMPL_H #define EIGEN_MATHFUNCTIONSIMPL_H namespace Eigen { namespace internal { /** \internal \returns the hyperbolic tan of \a a (coeff-wise) Doesn't do anything fancy, just a 13/6-degree rational interpolant which is accurate up to a couple of ulp in the range [-9, 9], outside of which the tanh(x) = +/-1. This implementation works on both scalars and packets. */ template<typename T> T generic_fast_tanh_float(const T& a_x) { // Clamp the inputs to the range [-9, 9] since anything outside // this range is +/-1.0f in single-precision. const T plus_9 = pset1<T>(9.f); const T minus_9 = pset1<T>(-9.f); // NOTE GCC prior to 6.3 might improperly optimize this max/min // step such that if a_x is nan, x will be either 9 or -9, // and tanh will return 1 or -1 instead of nan. // This is supposed to be fixed in gcc6.3, // see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 const T x = pmax(minus_9,pmin(plus_9,a_x)); // The monomial coefficients of the numerator polynomial (odd). const T alpha_1 = pset1<T>(4.89352455891786e-03f); const T alpha_3 = pset1<T>(6.37261928875436e-04f); const T alpha_5 = pset1<T>(1.48572235717979e-05f); const T alpha_7 = pset1<T>(5.12229709037114e-08f); const T alpha_9 = pset1<T>(-8.60467152213735e-11f); const T alpha_11 = pset1<T>(2.00018790482477e-13f); const T alpha_13 = pset1<T>(-2.76076847742355e-16f); // The monomial coefficients of the denominator polynomial (even). const T beta_0 = pset1<T>(4.89352518554385e-03f); const T beta_2 = pset1<T>(2.26843463243900e-03f); const T beta_4 = pset1<T>(1.18534705686654e-04f); const T beta_6 = pset1<T>(1.19825839466702e-06f); // Since the polynomials are odd/even, we need x^2. const T x2 = pmul(x, x); // Evaluate the numerator polynomial p. T p = pmadd(x2, alpha_13, alpha_11); p = pmadd(x2, p, alpha_9); p = pmadd(x2, p, alpha_7); p = pmadd(x2, p, alpha_5); p = pmadd(x2, p, alpha_3); p = pmadd(x2, p, alpha_1); p = pmul(x, p); // Evaluate the denominator polynomial p. T q = pmadd(x2, beta_6, beta_4); q = pmadd(x2, q, beta_2); q = pmadd(x2, q, beta_0); // Divide the numerator by the denominator. return pdiv(p, q); } template<typename RealScalar> EIGEN_STRONG_INLINE RealScalar positive_real_hypot(const RealScalar& x, const RealScalar& y) { EIGEN_USING_STD_MATH(sqrt); RealScalar p, qp; p = numext::maxi(x,y); if(p==RealScalar(0)) return RealScalar(0); qp = numext::mini(y,x) / p; return p * sqrt(RealScalar(1) + qp*qp); } template<typename Scalar> struct hypot_impl { typedef typename NumTraits<Scalar>::Real RealScalar; static inline RealScalar run(const Scalar& x, const Scalar& y) { EIGEN_USING_STD_MATH(abs); return positive_real_hypot<RealScalar>(abs(x), abs(y)); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATHFUNCTIONSIMPL_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Map.h
.h
7,239
172
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MAP_H #define EIGEN_MAP_H namespace Eigen { namespace internal { template<typename PlainObjectType, int MapOptions, typename StrideType> struct traits<Map<PlainObjectType, MapOptions, StrideType> > : public traits<PlainObjectType> { typedef traits<PlainObjectType> TraitsBase; enum { PlainObjectTypeInnerSize = ((traits<PlainObjectType>::Flags&RowMajorBit)==RowMajorBit) ? PlainObjectType::ColsAtCompileTime : PlainObjectType::RowsAtCompileTime, InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) : int(StrideType::InnerStrideAtCompileTime), OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 ? (InnerStrideAtCompileTime==Dynamic || PlainObjectTypeInnerSize==Dynamic ? Dynamic : int(InnerStrideAtCompileTime) * int(PlainObjectTypeInnerSize)) : int(StrideType::OuterStrideAtCompileTime), Alignment = int(MapOptions)&int(AlignedMask), Flags0 = TraitsBase::Flags & (~NestByRefBit), Flags = is_lvalue<PlainObjectType>::value ? int(Flags0) : (int(Flags0) & ~LvalueBit) }; private: enum { Options }; // Expressions don't have Options }; } /** \class Map * \ingroup Core_Module * * \brief A matrix or vector expression mapping an existing array of data. * * \tparam PlainObjectType the equivalent matrix type of the mapped data * \tparam MapOptions specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned. * The default is \c #Unaligned. * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout * of an ordinary, contiguous array. This can be overridden by specifying strides. * The type passed here must be a specialization of the Stride template, see examples below. * * This class represents a matrix or vector expression mapping an existing array of data. * It can be used to let Eigen interface without any overhead with non-Eigen data structures, * such as plain C arrays or structures from other libraries. By default, it assumes that the * data is laid out contiguously in memory. You can however override this by explicitly specifying * inner and outer strides. * * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix: * \include Map_simple.cpp * Output: \verbinclude Map_simple.out * * If you need to map non-contiguous arrays, you can do so by specifying strides: * * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time * fixed value. * \include Map_inner_stride.cpp * Output: \verbinclude Map_inner_stride.out * * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns. * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is * a short version of \c OuterStride<Dynamic> because the default template parameter of OuterStride * is \c Dynamic * \include Map_outer_stride.cpp * Output: \verbinclude Map_outer_stride.out * * For more details and for an example of specifying both an inner and an outer stride, see class Stride. * * \b Tip: to change the array of data mapped by a Map object, you can use the C++ * placement new syntax: * * Example: \include Map_placement_new.cpp * Output: \verbinclude Map_placement_new.out * * This class is the return type of PlainObjectBase::Map() but can also be used directly. * * \sa PlainObjectBase::Map(), \ref TopicStorageOrders */ template<typename PlainObjectType, int MapOptions, typename StrideType> class Map : public MapBase<Map<PlainObjectType, MapOptions, StrideType> > { public: typedef MapBase<Map> Base; EIGEN_DENSE_PUBLIC_INTERFACE(Map) typedef typename Base::PointerType PointerType; typedef PointerType PointerArgType; EIGEN_DEVICE_FUNC inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; } EIGEN_DEVICE_FUNC inline Index innerStride() const { return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; } EIGEN_DEVICE_FUNC inline Index outerStride() const { return int(StrideType::OuterStrideAtCompileTime) != 0 ? m_stride.outer() : int(internal::traits<Map>::OuterStrideAtCompileTime) != Dynamic ? Index(internal::traits<Map>::OuterStrideAtCompileTime) : IsVectorAtCompileTime ? (this->size() * innerStride()) : (int(Flags)&RowMajorBit) ? (this->cols() * innerStride()) : (this->rows() * innerStride()); } /** Constructor in the fixed-size case. * * \param dataPtr pointer to the array to map * \param stride optional Stride object, passing the strides. */ EIGEN_DEVICE_FUNC explicit inline Map(PointerArgType dataPtr, const StrideType& stride = StrideType()) : Base(cast_to_pointer_type(dataPtr)), m_stride(stride) { PlainObjectType::Base::_check_template_params(); } /** Constructor in the dynamic-size vector case. * * \param dataPtr pointer to the array to map * \param size the size of the vector expression * \param stride optional Stride object, passing the strides. */ EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index size, const StrideType& stride = StrideType()) : Base(cast_to_pointer_type(dataPtr), size), m_stride(stride) { PlainObjectType::Base::_check_template_params(); } /** Constructor in the dynamic-size matrix case. * * \param dataPtr pointer to the array to map * \param rows the number of rows of the matrix expression * \param cols the number of columns of the matrix expression * \param stride optional Stride object, passing the strides. */ EIGEN_DEVICE_FUNC inline Map(PointerArgType dataPtr, Index rows, Index cols, const StrideType& stride = StrideType()) : Base(cast_to_pointer_type(dataPtr), rows, cols), m_stride(stride) { PlainObjectType::Base::_check_template_params(); } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) protected: StrideType m_stride; }; } // end namespace Eigen #endif // EIGEN_MAP_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Dot.h
.h
11,507
319
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DOT_H #define EIGEN_DOT_H namespace Eigen { namespace internal { // helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot // with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE // looking at the static assertions. Thus this is a trick to get better compile errors. template<typename T, typename U, // the NeedToTranspose condition here is taken straight from Assign.h bool NeedToTranspose = T::IsVectorAtCompileTime && U::IsVectorAtCompileTime && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1) | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&". // revert to || as soon as not needed anymore. (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1)) > struct dot_nocheck { typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod; typedef typename conj_prod::result_type ResScalar; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) { return a.template binaryExpr<conj_prod>(b).sum(); } }; template<typename T, typename U> struct dot_nocheck<T, U, true> { typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod; typedef typename conj_prod::result_type ResScalar; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b) { return a.transpose().template binaryExpr<conj_prod>(b).sum(); } }; } // end namespace internal /** \fn MatrixBase::dot * \returns the dot product of *this with other. * * \only_for_vectors * * \note If the scalar type is complex numbers, then this function returns the hermitian * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the * second variable. * * \sa squaredNorm(), norm() */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) #if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG)) typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func; EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); #endif eigen_assert(size() == other.size()); return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other); } //---------- implementation of L2 norm and related functions ---------- /** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm. * In both cases, it consists in the sum of the square of all the matrix entries. * For vectors, this is also equals to the dot product of \c *this with itself. * * \sa dot(), norm(), lpNorm() */ template<typename Derived> EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const { return numext::real((*this).cwiseAbs2().sum()); } /** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. * In both cases, it consists in the square root of the sum of the square of all the matrix entries. * For vectors, this is also equals to the square root of the dot product of \c *this with itself. * * \sa lpNorm(), dot(), squaredNorm() */ template<typename Derived> EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const { return numext::sqrt(squaredNorm()); } /** \returns an expression of the quotient of \c *this by its own norm. * * \warning If the input vector is too small (i.e., this->norm()==0), * then this function returns a copy of the input. * * \only_for_vectors * * \sa norm(), normalize() */ template<typename Derived> EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject MatrixBase<Derived>::normalized() const { typedef typename internal::nested_eval<Derived,2>::type _Nested; _Nested n(derived()); RealScalar z = n.squaredNorm(); // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU if(z>RealScalar(0)) return n / numext::sqrt(z); else return n; } /** Normalizes the vector, i.e. divides it by its own norm. * * \only_for_vectors * * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. * * \sa norm(), normalized() */ template<typename Derived> EIGEN_STRONG_INLINE void MatrixBase<Derived>::normalize() { RealScalar z = squaredNorm(); // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU if(z>RealScalar(0)) derived() /= numext::sqrt(z); } /** \returns an expression of the quotient of \c *this by its own norm while avoiding underflow and overflow. * * \only_for_vectors * * This method is analogue to the normalized() method, but it reduces the risk of * underflow and overflow when computing the norm. * * \warning If the input vector is too small (i.e., this->norm()==0), * then this function returns a copy of the input. * * \sa stableNorm(), stableNormalize(), normalized() */ template<typename Derived> EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::PlainObject MatrixBase<Derived>::stableNormalized() const { typedef typename internal::nested_eval<Derived,3>::type _Nested; _Nested n(derived()); RealScalar w = n.cwiseAbs().maxCoeff(); RealScalar z = (n/w).squaredNorm(); if(z>RealScalar(0)) return n / (numext::sqrt(z)*w); else return n; } /** Normalizes the vector while avoid underflow and overflow * * \only_for_vectors * * This method is analogue to the normalize() method, but it reduces the risk of * underflow and overflow when computing the norm. * * \warning If the input vector is too small (i.e., this->norm()==0), then \c *this is left unchanged. * * \sa stableNorm(), stableNormalized(), normalize() */ template<typename Derived> EIGEN_STRONG_INLINE void MatrixBase<Derived>::stableNormalize() { RealScalar w = cwiseAbs().maxCoeff(); RealScalar z = (derived()/w).squaredNorm(); if(z>RealScalar(0)) derived() /= numext::sqrt(z)*w; } //---------- implementation of other norms ---------- namespace internal { template<typename Derived, int p> struct lpNorm_selector { typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase<Derived>& m) { EIGEN_USING_STD_MATH(pow) return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); } }; template<typename Derived> struct lpNorm_selector<Derived, 1> { EIGEN_DEVICE_FUNC static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m) { return m.cwiseAbs().sum(); } }; template<typename Derived> struct lpNorm_selector<Derived, 2> { EIGEN_DEVICE_FUNC static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m) { return m.norm(); } }; template<typename Derived> struct lpNorm_selector<Derived, Infinity> { typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar; EIGEN_DEVICE_FUNC static inline RealScalar run(const MatrixBase<Derived>& m) { if(Derived::SizeAtCompileTime==0 || (Derived::SizeAtCompileTime==Dynamic && m.size()==0)) return RealScalar(0); return m.cwiseAbs().maxCoeff(); } }; } // end namespace internal /** \returns the \b coefficient-wise \f$ \ell^p \f$ norm of \c *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values * of the coefficients of \c *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$ * norm, that is the maximum of the absolute values of the coefficients of \c *this. * * In all cases, if \c *this is empty, then the value 0 is returned. * * \note For matrices, this function does not compute the <a href="https://en.wikipedia.org/wiki/Operator_norm">operator-norm</a>. That is, if \c *this is a matrix, then its coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \f$\infty\f$-norm matrix operator norms using \link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \endlink. * * \sa norm() */ template<typename Derived> template<int p> #ifndef EIGEN_PARSED_BY_DOXYGEN inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real #else MatrixBase<Derived>::RealScalar #endif MatrixBase<Derived>::lpNorm() const { return internal::lpNorm_selector<Derived, p>::run(*this); } //---------- implementation of isOrthogonal / isUnitary ---------- /** \returns true if *this is approximately orthogonal to \a other, * within the precision given by \a prec. * * Example: \include MatrixBase_isOrthogonal.cpp * Output: \verbinclude MatrixBase_isOrthogonal.out */ template<typename Derived> template<typename OtherDerived> bool MatrixBase<Derived>::isOrthogonal (const MatrixBase<OtherDerived>& other, const RealScalar& prec) const { typename internal::nested_eval<Derived,2>::type nested(derived()); typename internal::nested_eval<OtherDerived,2>::type otherNested(other.derived()); return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); } /** \returns true if *this is approximately an unitary matrix, * within the precision given by \a prec. In the case where the \a Scalar * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name. * * \note This can be used to check whether a family of vectors forms an orthonormal basis. * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an * orthonormal basis. * * Example: \include MatrixBase_isUnitary.cpp * Output: \verbinclude MatrixBase_isUnitary.out */ template<typename Derived> bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const { typename internal::nested_eval<Derived,1>::type self(derived()); for(Index i = 0; i < cols(); ++i) { if(!internal::isApprox(self.col(i).squaredNorm(), static_cast<RealScalar>(1), prec)) return false; for(Index j = 0; j < i; ++j) if(!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast<Scalar>(1), prec)) return false; } return true; } } // end namespace Eigen #endif // EIGEN_DOT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/DiagonalProduct.h
.h
970
29
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DIAGONALPRODUCT_H #define EIGEN_DIAGONALPRODUCT_H namespace Eigen { /** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. */ template<typename Derived> template<typename DiagonalDerived> inline const Product<Derived, DiagonalDerived, LazyProduct> MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const { return Product<Derived, DiagonalDerived, LazyProduct>(derived(),a_diagonal.derived()); } } // end namespace Eigen #endif // EIGEN_DIAGONALPRODUCT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/CwiseUnaryOp.h
.h
3,877
104
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_UNARY_OP_H #define EIGEN_CWISE_UNARY_OP_H namespace Eigen { namespace internal { template<typename UnaryOp, typename XprType> struct traits<CwiseUnaryOp<UnaryOp, XprType> > : traits<XprType> { typedef typename result_of< UnaryOp(const typename XprType::Scalar&) >::type Scalar; typedef typename XprType::Nested XprTypeNested; typedef typename remove_reference<XprTypeNested>::type _XprTypeNested; enum { Flags = _XprTypeNested::Flags & RowMajorBit }; }; } template<typename UnaryOp, typename XprType, typename StorageKind> class CwiseUnaryOpImpl; /** \class CwiseUnaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise unary operator is applied to an expression * * \tparam UnaryOp template functor implementing the operator * \tparam XprType the type of the expression to which we are applying the unary operator * * This class represents an expression where a unary operator is applied to an expression. * It is the return type of all operations taking exactly 1 input expression, regardless of the * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix * is considered unary, because only the right-hand side is an expression, and its * return type is a specialization of CwiseUnaryOp. * * Most of the time, this is the only way that it is used, so you typically don't have to name * CwiseUnaryOp types explicitly. * * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp */ template<typename UnaryOp, typename XprType> class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>, internal::no_assignment_operator { public: typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) typedef typename internal::ref_selector<XprType>::type XprTypeNested; typedef typename internal::remove_all<XprType>::type NestedExpression; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) : m_xpr(xpr), m_functor(func) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); } /** \returns the functor representing the unary operation */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& functor() const { return m_functor; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const { return m_xpr; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::remove_all<XprTypeNested>::type& nestedExpression() { return m_xpr; } protected: XprTypeNested m_xpr; const UnaryOp m_functor; }; // Generic API dispatcher template<typename UnaryOp, typename XprType, typename StorageKind> class CwiseUnaryOpImpl : public internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type { public: typedef typename internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base; }; } // end namespace Eigen #endif // EIGEN_CWISE_UNARY_OP_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/SelfCwiseBinaryOp.h
.h
1,697
48
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SELFCWISEBINARYOP_H #define EIGEN_SELFCWISEBINARYOP_H namespace Eigen { // TODO generalize the scalar type of 'other' template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other) { internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>()); return derived(); } template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other) { internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>()); return derived(); } template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other) { internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>()); return derived(); } template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other) { internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>()); return derived(); } } // end namespace Eigen #endif // EIGEN_SELFCWISEBINARYOP_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/ReturnByValue.h
.h
4,200
118
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_RETURNBYVALUE_H #define EIGEN_RETURNBYVALUE_H namespace Eigen { namespace internal { template<typename Derived> struct traits<ReturnByValue<Derived> > : public traits<typename traits<Derived>::ReturnType> { enum { // We're disabling the DirectAccess because e.g. the constructor of // the Block-with-DirectAccess expression requires to have a coeffRef method. // Also, we don't want to have to implement the stride stuff. Flags = (traits<typename traits<Derived>::ReturnType>::Flags | EvalBeforeNestingBit) & ~DirectAccessBit }; }; /* The ReturnByValue object doesn't even have a coeff() method. * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. * So internal::nested always gives the plain return matrix type. * * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? * Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators */ template<typename Derived,int n,typename PlainObject> struct nested_eval<ReturnByValue<Derived>, n, PlainObject> { typedef typename traits<Derived>::ReturnType type; }; } // end namespace internal /** \class ReturnByValue * \ingroup Core_Module * */ template<typename Derived> class ReturnByValue : public internal::dense_xpr_base< ReturnByValue<Derived> >::type, internal::no_assignment_operator { public: typedef typename internal::traits<Derived>::ReturnType ReturnType; typedef typename internal::dense_xpr_base<ReturnByValue>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) template<typename Dest> EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { static_cast<const Derived*>(this)->evalTo(dst); } EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast<const Derived*>(this)->rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast<const Derived*>(this)->cols(); } #ifndef EIGEN_PARSED_BY_DOXYGEN #define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT class Unusable{ Unusable(const Unusable&) {} Unusable& operator=(const Unusable&) {return *this;} }; const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); } const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); } Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); } Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); } #undef Unusable #endif }; template<typename Derived> template<typename OtherDerived> Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other) { other.evalTo(derived()); return derived(); } namespace internal { // Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that // when a ReturnByValue expression is assigned, the evaluator is not constructed. // TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world template<typename Derived> struct evaluator<ReturnByValue<Derived> > : public evaluator<typename internal::traits<Derived>::ReturnType> { typedef ReturnByValue<Derived> XprType; typedef typename internal::traits<Derived>::ReturnType PlainObject; typedef evaluator<PlainObject> Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.rows(), xpr.cols()) { ::new (static_cast<Base*>(this)) Base(m_result); xpr.evalTo(m_result); } protected: PlainObject m_result; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_RETURNBYVALUE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/GenericPacketMath.h
.h
22,069
591
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERIC_PACKET_MATH_H #define EIGEN_GENERIC_PACKET_MATH_H namespace Eigen { namespace internal { /** \internal * \file GenericPacketMath.h * * Default implementation for types not supported by the vectorization. * In practice these functions are provided to make easier the writing * of generic vectorized code. */ #ifndef EIGEN_DEBUG_ALIGNED_LOAD #define EIGEN_DEBUG_ALIGNED_LOAD #endif #ifndef EIGEN_DEBUG_UNALIGNED_LOAD #define EIGEN_DEBUG_UNALIGNED_LOAD #endif #ifndef EIGEN_DEBUG_ALIGNED_STORE #define EIGEN_DEBUG_ALIGNED_STORE #endif #ifndef EIGEN_DEBUG_UNALIGNED_STORE #define EIGEN_DEBUG_UNALIGNED_STORE #endif struct default_packet_traits { enum { HasHalfPacket = 0, HasAdd = 1, HasSub = 1, HasMul = 1, HasNegate = 1, HasAbs = 1, HasArg = 0, HasAbs2 = 1, HasMin = 1, HasMax = 1, HasConj = 1, HasSetLinear = 1, HasBlend = 0, HasDiv = 0, HasSqrt = 0, HasRsqrt = 0, HasExp = 0, HasLog = 0, HasLog1p = 0, HasLog10 = 0, HasPow = 0, HasSin = 0, HasCos = 0, HasTan = 0, HasASin = 0, HasACos = 0, HasATan = 0, HasSinh = 0, HasCosh = 0, HasTanh = 0, HasLGamma = 0, HasDiGamma = 0, HasZeta = 0, HasPolygamma = 0, HasErf = 0, HasErfc = 0, HasIGamma = 0, HasIGammac = 0, HasBetaInc = 0, HasRound = 0, HasFloor = 0, HasCeil = 0, HasSign = 0 }; }; template<typename T> struct packet_traits : default_packet_traits { typedef T type; typedef T half; enum { Vectorizable = 0, size = 1, AlignedOnScalar = 0, HasHalfPacket = 0 }; enum { HasAdd = 0, HasSub = 0, HasMul = 0, HasNegate = 0, HasAbs = 0, HasAbs2 = 0, HasMin = 0, HasMax = 0, HasConj = 0, HasSetLinear = 0 }; }; template<typename T> struct packet_traits<const T> : packet_traits<T> { }; template <typename Src, typename Tgt> struct type_casting_traits { enum { VectorizedCast = 0, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; /** \internal \returns static_cast<TgtType>(a) (coeff-wise) */ template <typename SrcPacket, typename TgtPacket> EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a) { return static_cast<TgtPacket>(a); } template <typename SrcPacket, typename TgtPacket> EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& /*b*/) { return static_cast<TgtPacket>(a); } template <typename SrcPacket, typename TgtPacket> EIGEN_DEVICE_FUNC inline TgtPacket pcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/) { return static_cast<TgtPacket>(a); } /** \internal \returns a + b (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet padd(const Packet& a, const Packet& b) { return a+b; } /** \internal \returns a - b (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet psub(const Packet& a, const Packet& b) { return a-b; } /** \internal \returns -a (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pnegate(const Packet& a) { return -a; } /** \internal \returns conj(a) (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pconj(const Packet& a) { return numext::conj(a); } /** \internal \returns a * b (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pmul(const Packet& a, const Packet& b) { return a*b; } /** \internal \returns a / b (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pdiv(const Packet& a, const Packet& b) { return a/b; } /** \internal \returns the min of \a a and \a b (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) { return numext::mini(a, b); } /** \internal \returns the max of \a a and \a b (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) { return numext::maxi(a, b); } /** \internal \returns the absolute value of \a a */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pabs(const Packet& a) { using std::abs; return abs(a); } /** \internal \returns the phase angle of \a a */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet parg(const Packet& a) { using numext::arg; return arg(a); } /** \internal \returns the bitwise and of \a a and \a b */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pand(const Packet& a, const Packet& b) { return a & b; } /** \internal \returns the bitwise or of \a a and \a b */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet por(const Packet& a, const Packet& b) { return a | b; } /** \internal \returns the bitwise xor of \a a and \a b */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pxor(const Packet& a, const Packet& b) { return a ^ b; } /** \internal \returns the bitwise andnot of \a a and \a b */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pandnot(const Packet& a, const Packet& b) { return a & (!b); } /** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pload(const typename unpacket_traits<Packet>::type* from) { return *from; } /** \internal \returns a packet version of \a *from, (un-aligned load) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; } /** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pset1(const typename unpacket_traits<Packet>::type& a) { return a; } /** \internal \returns a packet with constant coefficients \a a[0], e.g.: (a[0],a[0],a[0],a[0]) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pload1(const typename unpacket_traits<Packet>::type *a) { return pset1<Packet>(*a); } /** \internal \returns a packet with elements of \a *from duplicated. * For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]} * Currently, this function is only used for scalar * complex products. */ template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; } /** \internal \returns a packet with elements of \a *from quadrupled. * For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]} * Currently, this function is only used in matrix products. * For packet-size smaller or equal to 4, this function is equivalent to pload1 */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet ploadquad(const typename unpacket_traits<Packet>::type* from) { return pload1<Packet>(from); } /** \internal equivalent to * \code * a0 = pload1(a+0); * a1 = pload1(a+1); * a2 = pload1(a+2); * a3 = pload1(a+3); * \endcode * \sa pset1, pload1, ploaddup, pbroadcast2 */ template<typename Packet> EIGEN_DEVICE_FUNC inline void pbroadcast4(const typename unpacket_traits<Packet>::type *a, Packet& a0, Packet& a1, Packet& a2, Packet& a3) { a0 = pload1<Packet>(a+0); a1 = pload1<Packet>(a+1); a2 = pload1<Packet>(a+2); a3 = pload1<Packet>(a+3); } /** \internal equivalent to * \code * a0 = pload1(a+0); * a1 = pload1(a+1); * \endcode * \sa pset1, pload1, ploaddup, pbroadcast4 */ template<typename Packet> EIGEN_DEVICE_FUNC inline void pbroadcast2(const typename unpacket_traits<Packet>::type *a, Packet& a0, Packet& a1) { a0 = pload1<Packet>(a+0); a1 = pload1<Packet>(a+1); } /** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */ template<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet plset(const typename unpacket_traits<Packet>::type& a) { return a; } /** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from) { (*to) = from; } /** \internal copy the packet \a from to \a *to, (un-aligned store) */ template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from) { (*to) = from; } template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/) { return ploadu<Packet>(from); } template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/) { pstore(to, from); } /** \internal tries to do cache prefetching of \a addr */ template<typename Scalar> EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr) { #ifdef __CUDA_ARCH__ #if defined(__LP64__) // 64-bit pointer operand constraint for inlined asm asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr)); #else // 32-bit pointer operand constraint for inlined asm asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr)); #endif #elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC) __builtin_prefetch(addr); #endif } /** \internal \returns the first element of a packet */ template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type pfirst(const Packet& a) { return a; } /** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet preduxp(const Packet* vecs) { return vecs[0]; } /** \internal \returns the sum of the elements of \a a*/ template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux(const Packet& a) { return a; } /** \internal \returns the sum of the elements of \a a by block of 4 elements. * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7} * For packet-size smaller or equal to 4, this boils down to a noop. */ template<typename Packet> EIGEN_DEVICE_FUNC inline typename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type predux_downto4(const Packet& a) { return a; } /** \internal \returns the product of the elements of \a a*/ template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a) { return a; } /** \internal \returns the min of the elements of \a a*/ template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a) { return a; } /** \internal \returns the max of the elements of \a a*/ template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a) { return a; } /** \internal \returns the reversed elements of \a a*/ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a) { return a; } /** \internal \returns \a a with real and imaginary part flipped (for complex type only) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a) { return Packet(a.imag(),a.real()); } /************************** * Special math functions ***************************/ /** \internal \returns the sine of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psin(const Packet& a) { using std::sin; return sin(a); } /** \internal \returns the cosine of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcos(const Packet& a) { using std::cos; return cos(a); } /** \internal \returns the tan of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptan(const Packet& a) { using std::tan; return tan(a); } /** \internal \returns the arc sine of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pasin(const Packet& a) { using std::asin; return asin(a); } /** \internal \returns the arc cosine of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pacos(const Packet& a) { using std::acos; return acos(a); } /** \internal \returns the arc tangent of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet patan(const Packet& a) { using std::atan; return atan(a); } /** \internal \returns the hyperbolic sine of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psinh(const Packet& a) { using std::sinh; return sinh(a); } /** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcosh(const Packet& a) { using std::cosh; return cosh(a); } /** \internal \returns the hyperbolic tan of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptanh(const Packet& a) { using std::tanh; return tanh(a); } /** \internal \returns the exp of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexp(const Packet& a) { using std::exp; return exp(a); } /** \internal \returns the log of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog(const Packet& a) { using std::log; return log(a); } /** \internal \returns the log1p of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog1p(const Packet& a) { return numext::log1p(a); } /** \internal \returns the log10 of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog10(const Packet& a) { using std::log10; return log10(a); } /** \internal \returns the square-root of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); } /** \internal \returns the reciprocal square-root of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet prsqrt(const Packet& a) { return pdiv(pset1<Packet>(1), psqrt(a)); } /** \internal \returns the rounded value of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pround(const Packet& a) { using numext::round; return round(a); } /** \internal \returns the floor of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pfloor(const Packet& a) { using numext::floor; return floor(a); } /** \internal \returns the ceil of \a a (coeff-wise) */ template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pceil(const Packet& a) { using numext::ceil; return ceil(a); } /*************************************************************************** * The following functions might not have to be overwritten for vectorized types ***************************************************************************/ /** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */ // NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type) template<typename Packet> inline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a) { pstore(to, pset1<Packet>(a)); } /** \internal \returns a * b + c (coeff-wise) */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pmadd(const Packet& a, const Packet& b, const Packet& c) { return padd(pmul(a, b),c); } /** \internal \returns a packet version of \a *from. * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ template<typename Packet, int Alignment> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits<Packet>::type* from) { if(Alignment >= unpacket_traits<Packet>::alignment) return pload<Packet>(from); else return ploadu<Packet>(from); } /** \internal copy the packet \a from to \a *to. * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ template<typename Scalar, typename Packet, int Alignment> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& from) { if(Alignment >= unpacket_traits<Packet>::alignment) pstore(to, from); else pstoreu(to, from); } /** \internal \returns a packet version of \a *from. * Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the * hardware if available to speedup the loading of data that won't be modified * by the current computation. */ template<typename Packet, int LoadMode> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits<Packet>::type* from) { return ploadt<Packet, LoadMode>(from); } /** \internal default implementation of palign() allowing partial specialization */ template<int Offset,typename PacketType> struct palign_impl { // by default data are aligned, so there is nothing to be done :) static inline void run(PacketType&, const PacketType&) {} }; /** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements * of \a first and \a Offset first elements of \a second. * * This function is currently only used to optimize matrix-vector products on unligned matrices. * It takes 2 packets that represent a contiguous memory array, and returns a packet starting * at the position \a Offset. For instance, for packets of 4 elements, we have: * Input: * - first = {f0,f1,f2,f3} * - second = {s0,s1,s2,s3} * Output: * - if Offset==0 then {f0,f1,f2,f3} * - if Offset==1 then {f1,f2,f3,s0} * - if Offset==2 then {f2,f3,s0,s1} * - if Offset==3 then {f3,s0,s1,s3} */ template<int Offset,typename PacketType> inline void palign(PacketType& first, const PacketType& second) { palign_impl<Offset,PacketType>::run(first,second); } /*************************************************************************** * Fast complex products (GCC generates a function call which is very slow) ***************************************************************************/ // Eigen+CUDA does not support complexes. #ifndef __CUDACC__ template<> inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b) { return std::complex<float>(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); } template<> inline std::complex<double> pmul(const std::complex<double>& a, const std::complex<double>& b) { return std::complex<double>(a.real()*b.real() - a.imag()*b.imag(), a.imag()*b.real() + a.real()*b.imag()); } #endif /*************************************************************************** * PacketBlock, that is a collection of N packets where the number of words * in the packet is a multiple of N. ***************************************************************************/ template <typename Packet,int N=unpacket_traits<Packet>::size> struct PacketBlock { Packet packet[N]; }; template<typename Packet> EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet,1>& /*kernel*/) { // Nothing to do in the scalar case, i.e. a 1x1 matrix. } /*************************************************************************** * Selector, i.e. vector of N boolean values used to select (i.e. blend) * words from 2 packets. ***************************************************************************/ template <size_t N> struct Selector { bool select[N]; }; template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pblend(const Selector<unpacket_traits<Packet>::size>& ifPacket, const Packet& thenPacket, const Packet& elsePacket) { return ifPacket.select[0] ? thenPacket : elsePacket; } /** \internal \returns \a a with the first coefficient replaced by the scalar b */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pinsertfirst(const Packet& a, typename unpacket_traits<Packet>::type b) { // Default implementation based on pblend. // It must be specialized for higher performance. Selector<unpacket_traits<Packet>::size> mask; mask.select[0] = true; // This for loop should be optimized away by the compiler. for(Index i=1; i<unpacket_traits<Packet>::size; ++i) mask.select[i] = false; return pblend(mask, pset1<Packet>(b), a); } /** \internal \returns \a a with the last coefficient replaced by the scalar b */ template<typename Packet> EIGEN_DEVICE_FUNC inline Packet pinsertlast(const Packet& a, typename unpacket_traits<Packet>::type b) { // Default implementation based on pblend. // It must be specialized for higher performance. Selector<unpacket_traits<Packet>::size> mask; // This for loop should be optimized away by the compiler. for(Index i=0; i<unpacket_traits<Packet>::size-1; ++i) mask.select[i] = false; mask.select[unpacket_traits<Packet>::size-1] = true; return pblend(mask, pset1<Packet>(b), a); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERIC_PACKET_MATH_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/SelfAdjointView.h
.h
14,245
353
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SELFADJOINTMATRIX_H #define EIGEN_SELFADJOINTMATRIX_H namespace Eigen { /** \class SelfAdjointView * \ingroup Core_Module * * * \brief Expression of a selfadjoint matrix from a triangular part of a dense matrix * * \param MatrixType the type of the dense matrix storing the coefficients * \param TriangularPart can be either \c #Lower or \c #Upper * * This class is an expression of a sefladjoint matrix from a triangular part of a matrix * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView() * and most of the time this is the only way that it is used. * * \sa class TriangularBase, MatrixBase::selfadjointView() */ namespace internal { template<typename MatrixType, unsigned int UpLo> struct traits<SelfAdjointView<MatrixType, UpLo> > : traits<MatrixType> { typedef typename ref_selector<MatrixType>::non_const_type MatrixTypeNested; typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned; typedef MatrixType ExpressionType; typedef typename MatrixType::PlainObject FullMatrixType; enum { Mode = UpLo | SelfAdjoint, FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0, Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits|FlagsLvalueBit) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)) // FIXME these flags should be preserved }; }; } template<typename _MatrixType, unsigned int UpLo> class SelfAdjointView : public TriangularBase<SelfAdjointView<_MatrixType, UpLo> > { public: typedef _MatrixType MatrixType; typedef TriangularBase<SelfAdjointView> Base; typedef typename internal::traits<SelfAdjointView>::MatrixTypeNested MatrixTypeNested; typedef typename internal::traits<SelfAdjointView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned; typedef MatrixTypeNestedCleaned NestedExpression; /** \brief The type of coefficients in this matrix */ typedef typename internal::traits<SelfAdjointView>::Scalar Scalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType; enum { Mode = internal::traits<SelfAdjointView>::Mode, Flags = internal::traits<SelfAdjointView>::Flags, TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0) }; typedef typename MatrixType::PlainObject PlainObject; EIGEN_DEVICE_FUNC explicit inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix) { EIGEN_STATIC_ASSERT(UpLo==Lower || UpLo==Upper,SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY); } EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_matrix.outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_matrix.innerStride(); } /** \sa MatrixBase::coeff() * \warning the coordinates must fit into the referenced triangular part */ EIGEN_DEVICE_FUNC inline Scalar coeff(Index row, Index col) const { Base::check_coordinates_internal(row, col); return m_matrix.coeff(row, col); } /** \sa MatrixBase::coeffRef() * \warning the coordinates must fit into the referenced triangular part */ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) { EIGEN_STATIC_ASSERT_LVALUE(SelfAdjointView); Base::check_coordinates_internal(row, col); return m_matrix.coeffRef(row, col); } /** \internal */ EIGEN_DEVICE_FUNC const MatrixTypeNestedCleaned& _expression() const { return m_matrix; } EIGEN_DEVICE_FUNC const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; } EIGEN_DEVICE_FUNC MatrixTypeNestedCleaned& nestedExpression() { return m_matrix; } /** Efficient triangular matrix times vector/matrix product */ template<typename OtherDerived> EIGEN_DEVICE_FUNC const Product<SelfAdjointView,OtherDerived> operator*(const MatrixBase<OtherDerived>& rhs) const { return Product<SelfAdjointView,OtherDerived>(*this, rhs.derived()); } /** Efficient vector/matrix times triangular matrix product */ template<typename OtherDerived> friend EIGEN_DEVICE_FUNC const Product<OtherDerived,SelfAdjointView> operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView& rhs) { return Product<OtherDerived,SelfAdjointView>(lhs.derived(),rhs); } friend EIGEN_DEVICE_FUNC const SelfAdjointView<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,MatrixType,product),UpLo> operator*(const Scalar& s, const SelfAdjointView& mat) { return (s*mat.nestedExpression()).template selfadjointView<UpLo>(); } /** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this: * \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$ * \returns a reference to \c *this * * The vectors \a u and \c v \b must be column vectors, however they can be * a adjoint expression without any overhead. Only the meaningful triangular * part of the matrix is updated, the rest is left unchanged. * * \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar) */ template<typename DerivedU, typename DerivedV> EIGEN_DEVICE_FUNC SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha = Scalar(1)); /** Perform a symmetric rank K update of the selfadjoint matrix \c *this: * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix. * * \returns a reference to \c *this * * Note that to perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply * call this function with u.adjoint(). * * \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar) */ template<typename DerivedU> EIGEN_DEVICE_FUNC SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1)); /** \returns an expression of a triangular view extracted from the current selfadjoint view of a given triangular part * * The parameter \a TriMode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper, * \c #Lower, \c #StrictlyLower, \c #UnitLower. * * If \c TriMode references the same triangular part than \c *this, then this method simply return a \c TriangularView of the nested expression, * otherwise, the nested expression is first transposed, thus returning a \c TriangularView<Transpose<MatrixType>> object. * * \sa MatrixBase::triangularView(), class TriangularView */ template<unsigned int TriMode> EIGEN_DEVICE_FUNC typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), TriangularView<MatrixType,TriMode>, TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type triangularView() const { typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::ConstTransposeReturnType>::type tmp1(m_matrix); typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::AdjointReturnType>::type tmp2(tmp1); return typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), TriangularView<MatrixType,TriMode>, TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type(tmp2); } typedef SelfAdjointView<const MatrixConjugateReturnType,UpLo> ConjugateReturnType; /** \sa MatrixBase::conjugate() const */ EIGEN_DEVICE_FUNC inline const ConjugateReturnType conjugate() const { return ConjugateReturnType(m_matrix.conjugate()); } typedef SelfAdjointView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType; /** \sa MatrixBase::adjoint() const */ EIGEN_DEVICE_FUNC inline const AdjointReturnType adjoint() const { return AdjointReturnType(m_matrix.adjoint()); } typedef SelfAdjointView<typename MatrixType::TransposeReturnType,TransposeMode> TransposeReturnType; /** \sa MatrixBase::transpose() */ EIGEN_DEVICE_FUNC inline TransposeReturnType transpose() { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) typename MatrixType::TransposeReturnType tmp(m_matrix); return TransposeReturnType(tmp); } typedef SelfAdjointView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType; /** \sa MatrixBase::transpose() const */ EIGEN_DEVICE_FUNC inline const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(m_matrix.transpose()); } /** \returns a const expression of the main diagonal of the matrix \c *this * * This method simply returns the diagonal of the nested expression, thus by-passing the SelfAdjointView decorator. * * \sa MatrixBase::diagonal(), class Diagonal */ EIGEN_DEVICE_FUNC typename MatrixType::ConstDiagonalReturnType diagonal() const { return typename MatrixType::ConstDiagonalReturnType(m_matrix); } /////////// Cholesky module /////////// const LLT<PlainObject, UpLo> llt() const; const LDLT<PlainObject, UpLo> ldlt() const; /////////// Eigenvalue module /////////// /** Real part of #Scalar */ typedef typename NumTraits<Scalar>::Real RealScalar; /** Return type of eigenvalues() */ typedef Matrix<RealScalar, internal::traits<MatrixType>::ColsAtCompileTime, 1> EigenvaluesReturnType; EIGEN_DEVICE_FUNC EigenvaluesReturnType eigenvalues() const; EIGEN_DEVICE_FUNC RealScalar operatorNorm() const; protected: MatrixTypeNested m_matrix; }; // template<typename OtherDerived, typename MatrixType, unsigned int UpLo> // internal::selfadjoint_matrix_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> > // operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView<MatrixType,UpLo>& rhs) // { // return internal::matrix_selfadjoint_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >(lhs.derived(),rhs); // } // selfadjoint to dense matrix namespace internal { // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.> // in the future selfadjoint-ness should be defined by the expression traits // such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work) template<typename MatrixType, unsigned int Mode> struct evaluator_traits<SelfAdjointView<MatrixType,Mode> > { typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind; typedef SelfAdjointShape Shape; }; template<int UpLo, int SetOpposite, typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version> class triangular_dense_assignment_kernel<UpLo,SelfAdjoint,SetOpposite,DstEvaluatorTypeT,SrcEvaluatorTypeT,Functor,Version> : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> { protected: typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> Base; typedef typename Base::DstXprType DstXprType; typedef typename Base::SrcXprType SrcXprType; using Base::m_dst; using Base::m_src; using Base::m_functor; public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; typedef typename Base::AssignmentTraits AssignmentTraits; EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr) : Base(dst, src, func, dstExpr) {} EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col) { eigen_internal_assert(row!=col); Scalar tmp = m_src.coeff(row,col); m_functor.assignCoeff(m_dst.coeffRef(row,col), tmp); m_functor.assignCoeff(m_dst.coeffRef(col,row), numext::conj(tmp)); } EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id) { Base::assignCoeff(id,id); } EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index) { eigen_internal_assert(false && "should never be called"); } }; } // end namespace internal /*************************************************************************** * Implementation of MatrixBase methods ***************************************************************************/ /** This is the const version of MatrixBase::selfadjointView() */ template<typename Derived> template<unsigned int UpLo> typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type MatrixBase<Derived>::selfadjointView() const { return typename ConstSelfAdjointViewReturnType<UpLo>::Type(derived()); } /** \returns an expression of a symmetric/self-adjoint view extracted from the upper or lower triangular part of the current matrix * * The parameter \a UpLo can be either \c #Upper or \c #Lower * * Example: \include MatrixBase_selfadjointView.cpp * Output: \verbinclude MatrixBase_selfadjointView.out * * \sa class SelfAdjointView */ template<typename Derived> template<unsigned int UpLo> typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type MatrixBase<Derived>::selfadjointView() { return typename SelfAdjointViewReturnType<UpLo>::Type(derived()); } } // end namespace Eigen #endif // EIGEN_SELFADJOINTMATRIX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Fuzzy.h
.h
5,705
156
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FUZZY_H #define EIGEN_FUZZY_H namespace Eigen { namespace internal { template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger> struct isApprox_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { typename internal::nested_eval<Derived,2>::type nested(x); typename internal::nested_eval<OtherDerived,2>::type otherNested(y); return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); } }; template<typename Derived, typename OtherDerived> struct isApprox_selector<Derived, OtherDerived, true> { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&) { return x.matrix() == y.matrix(); } }; template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger> struct isMuchSmallerThan_object_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum(); } }; template<typename Derived, typename OtherDerived> struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true> { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } }; template<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger> struct isMuchSmallerThan_scalar_selector { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec) { return x.cwiseAbs2().sum() <= numext::abs2(prec * y); } }; template<typename Derived> struct isMuchSmallerThan_scalar_selector<Derived, true> { EIGEN_DEVICE_FUNC static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&) { return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); } }; } // end namespace internal /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. * * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ * are considered to be approximately equal within precision \f$ p \f$ if * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm * L2 norm). * * \note Because of the multiplicativeness of this comparison, one can't use this function * to check whether \c *this is approximately equal to the zero matrix or vector. * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const * RealScalar&, RealScalar) instead. * * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const */ template<typename Derived> template<typename OtherDerived> bool DenseBase<Derived>::isApprox( const DenseBase<OtherDerived>& other, const RealScalar& prec ) const { return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec); } /** \returns \c true if the norm of \c *this is much smaller than \a other, * within the precision determined by \a prec. * * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] * * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason, * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm * of a reference matrix of same dimensions. * * \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const */ template<typename Derived> bool DenseBase<Derived>::isMuchSmallerThan( const typename NumTraits<Scalar>::Real& other, const RealScalar& prec ) const { return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec); } /** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, * within the precision determined by \a prec. * * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] * For matrices, the comparison is done using the Hilbert-Schmidt norm. * * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const */ template<typename Derived> template<typename OtherDerived> bool DenseBase<Derived>::isMuchSmallerThan( const DenseBase<OtherDerived>& other, const RealScalar& prec ) const { return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec); } } // end namespace Eigen #endif // EIGEN_FUZZY_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/CwiseUnaryView.h
.h
5,366
131
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_UNARY_VIEW_H #define EIGEN_CWISE_UNARY_VIEW_H namespace Eigen { namespace internal { template<typename ViewOp, typename MatrixType> struct traits<CwiseUnaryView<ViewOp, MatrixType> > : traits<MatrixType> { typedef typename result_of< ViewOp(const typename traits<MatrixType>::Scalar&) >::type Scalar; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested; enum { FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0, Flags = traits<_MatrixTypeNested>::Flags & (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret, // need to cast the sizeof's from size_t to int explicitly, otherwise: // "error: no integral type can represent all of the enumerator values InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic ? int(Dynamic) : int(MatrixTypeInnerStride) * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)), OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret == Dynamic ? int(Dynamic) : outer_stride_at_compile_time<MatrixType>::ret * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)) }; }; } template<typename ViewOp, typename MatrixType, typename StorageKind> class CwiseUnaryViewImpl; /** \class CwiseUnaryView * \ingroup Core_Module * * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector * * \tparam ViewOp template functor implementing the view * \tparam MatrixType the type of the matrix we are applying the unary operator * * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector. * It is the return type of real() and imag(), and most of the time this is the only way it is used. * * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp */ template<typename ViewOp, typename MatrixType> class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind> { public: typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested; typedef typename internal::remove_all<MatrixType>::type NestedExpression; explicit inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp()) : m_matrix(mat), m_functor(func) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); } /** \returns the functor representing unary operation */ const ViewOp& functor() const { return m_functor; } /** \returns the nested expression */ const typename internal::remove_all<MatrixTypeNested>::type& nestedExpression() const { return m_matrix; } /** \returns the nested expression */ typename internal::remove_reference<MatrixTypeNested>::type& nestedExpression() { return m_matrix.const_cast_derived(); } protected: MatrixTypeNested m_matrix; ViewOp m_functor; }; // Generic API dispatcher template<typename ViewOp, typename XprType, typename StorageKind> class CwiseUnaryViewImpl : public internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type { public: typedef typename internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type Base; }; template<typename ViewOp, typename MatrixType> class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense> : public internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type { public: typedef CwiseUnaryView<ViewOp, MatrixType> Derived; typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl) EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeff(0)); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar); } protected: EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(CwiseUnaryViewImpl) }; } // end namespace Eigen #endif // EIGEN_CWISE_UNARY_VIEW_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/VectorwiseOp.h
.h
29,441
696
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARTIAL_REDUX_H #define EIGEN_PARTIAL_REDUX_H namespace Eigen { /** \class PartialReduxExpr * \ingroup Core_Module * * \brief Generic expression of a partially reduxed matrix * * \tparam MatrixType the type of the matrix we are applying the redux operation * \tparam MemberOp type of the member functor * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal) * * This class represents an expression of a partial redux operator of a matrix. * It is the return type of some VectorwiseOp functions, * and most of the time this is the only way it is used. * * \sa class VectorwiseOp */ template< typename MatrixType, typename MemberOp, int Direction> class PartialReduxExpr; namespace internal { template<typename MatrixType, typename MemberOp, int Direction> struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> > : traits<MatrixType> { typedef typename MemberOp::result_type Scalar; typedef typename traits<MatrixType>::StorageKind StorageKind; typedef typename traits<MatrixType>::XprKind XprKind; typedef typename MatrixType::Scalar InputScalar; enum { RowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::RowsAtCompileTime, ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime, Flags = RowsAtCompileTime == 1 ? RowMajorBit : 0, TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime }; }; } template< typename MatrixType, typename MemberOp, int Direction> class PartialReduxExpr : public internal::dense_xpr_base< PartialReduxExpr<MatrixType, MemberOp, Direction> >::type, internal::no_assignment_operator { public: typedef typename internal::dense_xpr_base<PartialReduxExpr>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr) EIGEN_DEVICE_FUNC explicit PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp()) : m_matrix(mat), m_functor(func) {} EIGEN_DEVICE_FUNC Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); } EIGEN_DEVICE_FUNC Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); } EIGEN_DEVICE_FUNC typename MatrixType::Nested nestedExpression() const { return m_matrix; } EIGEN_DEVICE_FUNC const MemberOp& functor() const { return m_functor; } protected: typename MatrixType::Nested m_matrix; const MemberOp m_functor; }; #define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \ template <typename ResultType> \ struct member_##MEMBER { \ EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \ typedef ResultType result_type; \ template<typename Scalar, int Size> struct Cost \ { enum { value = COST }; }; \ template<typename XprType> \ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ ResultType operator()(const XprType& mat) const \ { return mat.MEMBER(); } \ } namespace internal { EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(stableNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(blueNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * functor_traits<scalar_hypot_op<Scalar> >::Cost ); EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(mean, (Size-1)*NumTraits<Scalar>::AddCost + NumTraits<Scalar>::MulCost); EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits<Scalar>::AddCost); EIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits<Scalar>::MulCost); template <int p, typename ResultType> struct member_lpnorm { typedef ResultType result_type; template<typename Scalar, int Size> struct Cost { enum { value = (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost }; }; EIGEN_DEVICE_FUNC member_lpnorm() {} template<typename XprType> EIGEN_DEVICE_FUNC inline ResultType operator()(const XprType& mat) const { return mat.template lpNorm<p>(); } }; template <typename BinaryOp, typename Scalar> struct member_redux { typedef typename result_of< BinaryOp(const Scalar&,const Scalar&) >::type result_type; template<typename _Scalar, int Size> struct Cost { enum { value = (Size-1) * functor_traits<BinaryOp>::Cost }; }; EIGEN_DEVICE_FUNC explicit member_redux(const BinaryOp func) : m_functor(func) {} template<typename Derived> EIGEN_DEVICE_FUNC inline result_type operator()(const DenseBase<Derived>& mat) const { return mat.redux(m_functor); } const BinaryOp m_functor; }; } /** \class VectorwiseOp * \ingroup Core_Module * * \brief Pseudo expression providing partial reduction operations * * \tparam ExpressionType the type of the object on which to do partial reductions * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal) * * This class represents a pseudo expression with partial reduction features. * It is the return type of DenseBase::colwise() and DenseBase::rowwise() * and most of the time this is the only way it is used. * * Example: \include MatrixBase_colwise.cpp * Output: \verbinclude MatrixBase_colwise.out * * \sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr */ template<typename ExpressionType, int Direction> class VectorwiseOp { public: typedef typename ExpressionType::Scalar Scalar; typedef typename ExpressionType::RealScalar RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename internal::ref_selector<ExpressionType>::non_const_type ExpressionTypeNested; typedef typename internal::remove_all<ExpressionTypeNested>::type ExpressionTypeNestedCleaned; template<template<typename _Scalar> class Functor, typename Scalar_=Scalar> struct ReturnType { typedef PartialReduxExpr<ExpressionType, Functor<Scalar_>, Direction > Type; }; template<typename BinaryOp> struct ReduxReturnType { typedef PartialReduxExpr<ExpressionType, internal::member_redux<BinaryOp,Scalar>, Direction > Type; }; enum { isVertical = (Direction==Vertical) ? 1 : 0, isHorizontal = (Direction==Horizontal) ? 1 : 0 }; protected: typedef typename internal::conditional<isVertical, typename ExpressionType::ColXpr, typename ExpressionType::RowXpr>::type SubVector; /** \internal * \returns the i-th subvector according to the \c Direction */ EIGEN_DEVICE_FUNC SubVector subVector(Index i) { return SubVector(m_matrix.derived(),i); } /** \internal * \returns the number of subvectors in the direction \c Direction */ EIGEN_DEVICE_FUNC Index subVectors() const { return isVertical?m_matrix.cols():m_matrix.rows(); } template<typename OtherDerived> struct ExtendedType { typedef Replicate<OtherDerived, isVertical ? 1 : ExpressionType::RowsAtCompileTime, isHorizontal ? 1 : ExpressionType::ColsAtCompileTime> Type; }; /** \internal * Replicates a vector to match the size of \c *this */ template<typename OtherDerived> EIGEN_DEVICE_FUNC typename ExtendedType<OtherDerived>::Type extendedTo(const DenseBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxColsAtCompileTime==1), YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED) EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxRowsAtCompileTime==1), YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED) return typename ExtendedType<OtherDerived>::Type (other.derived(), isVertical ? 1 : m_matrix.rows(), isHorizontal ? 1 : m_matrix.cols()); } template<typename OtherDerived> struct OppositeExtendedType { typedef Replicate<OtherDerived, isHorizontal ? 1 : ExpressionType::RowsAtCompileTime, isVertical ? 1 : ExpressionType::ColsAtCompileTime> Type; }; /** \internal * Replicates a vector in the opposite direction to match the size of \c *this */ template<typename OtherDerived> EIGEN_DEVICE_FUNC typename OppositeExtendedType<OtherDerived>::Type extendedToOpposite(const DenseBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxColsAtCompileTime==1), YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED) EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxRowsAtCompileTime==1), YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED) return typename OppositeExtendedType<OtherDerived>::Type (other.derived(), isHorizontal ? 1 : m_matrix.rows(), isVertical ? 1 : m_matrix.cols()); } public: EIGEN_DEVICE_FUNC explicit inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {} /** \internal */ EIGEN_DEVICE_FUNC inline const ExpressionType& _expression() const { return m_matrix; } /** \returns a row or column vector expression of \c *this reduxed by \a func * * The template parameter \a BinaryOp is the type of the functor * of the custom redux operator. Note that func must be an associative operator. * * \sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise() */ template<typename BinaryOp> EIGEN_DEVICE_FUNC const typename ReduxReturnType<BinaryOp>::Type redux(const BinaryOp& func = BinaryOp()) const { return typename ReduxReturnType<BinaryOp>::Type(_expression(), internal::member_redux<BinaryOp,Scalar>(func)); } typedef typename ReturnType<internal::member_minCoeff>::Type MinCoeffReturnType; typedef typename ReturnType<internal::member_maxCoeff>::Type MaxCoeffReturnType; typedef typename ReturnType<internal::member_squaredNorm,RealScalar>::Type SquaredNormReturnType; typedef typename ReturnType<internal::member_norm,RealScalar>::Type NormReturnType; typedef typename ReturnType<internal::member_blueNorm,RealScalar>::Type BlueNormReturnType; typedef typename ReturnType<internal::member_stableNorm,RealScalar>::Type StableNormReturnType; typedef typename ReturnType<internal::member_hypotNorm,RealScalar>::Type HypotNormReturnType; typedef typename ReturnType<internal::member_sum>::Type SumReturnType; typedef typename ReturnType<internal::member_mean>::Type MeanReturnType; typedef typename ReturnType<internal::member_all>::Type AllReturnType; typedef typename ReturnType<internal::member_any>::Type AnyReturnType; typedef PartialReduxExpr<ExpressionType, internal::member_count<Index>, Direction> CountReturnType; typedef typename ReturnType<internal::member_prod>::Type ProdReturnType; typedef Reverse<const ExpressionType, Direction> ConstReverseReturnType; typedef Reverse<ExpressionType, Direction> ReverseReturnType; template<int p> struct LpNormReturnType { typedef PartialReduxExpr<ExpressionType, internal::member_lpnorm<p,RealScalar>,Direction> Type; }; /** \returns a row (or column) vector expression of the smallest coefficient * of each column (or row) of the referenced expression. * * \warning the result is undefined if \c *this contains NaN. * * Example: \include PartialRedux_minCoeff.cpp * Output: \verbinclude PartialRedux_minCoeff.out * * \sa DenseBase::minCoeff() */ EIGEN_DEVICE_FUNC const MinCoeffReturnType minCoeff() const { return MinCoeffReturnType(_expression()); } /** \returns a row (or column) vector expression of the largest coefficient * of each column (or row) of the referenced expression. * * \warning the result is undefined if \c *this contains NaN. * * Example: \include PartialRedux_maxCoeff.cpp * Output: \verbinclude PartialRedux_maxCoeff.out * * \sa DenseBase::maxCoeff() */ EIGEN_DEVICE_FUNC const MaxCoeffReturnType maxCoeff() const { return MaxCoeffReturnType(_expression()); } /** \returns a row (or column) vector expression of the squared norm * of each column (or row) of the referenced expression. * This is a vector with real entries, even if the original matrix has complex entries. * * Example: \include PartialRedux_squaredNorm.cpp * Output: \verbinclude PartialRedux_squaredNorm.out * * \sa DenseBase::squaredNorm() */ EIGEN_DEVICE_FUNC const SquaredNormReturnType squaredNorm() const { return SquaredNormReturnType(_expression()); } /** \returns a row (or column) vector expression of the norm * of each column (or row) of the referenced expression. * This is a vector with real entries, even if the original matrix has complex entries. * * Example: \include PartialRedux_norm.cpp * Output: \verbinclude PartialRedux_norm.out * * \sa DenseBase::norm() */ EIGEN_DEVICE_FUNC const NormReturnType norm() const { return NormReturnType(_expression()); } /** \returns a row (or column) vector expression of the norm * of each column (or row) of the referenced expression. * This is a vector with real entries, even if the original matrix has complex entries. * * Example: \include PartialRedux_norm.cpp * Output: \verbinclude PartialRedux_norm.out * * \sa DenseBase::norm() */ template<int p> EIGEN_DEVICE_FUNC const typename LpNormReturnType<p>::Type lpNorm() const { return typename LpNormReturnType<p>::Type(_expression()); } /** \returns a row (or column) vector expression of the norm * of each column (or row) of the referenced expression, using * Blue's algorithm. * This is a vector with real entries, even if the original matrix has complex entries. * * \sa DenseBase::blueNorm() */ EIGEN_DEVICE_FUNC const BlueNormReturnType blueNorm() const { return BlueNormReturnType(_expression()); } /** \returns a row (or column) vector expression of the norm * of each column (or row) of the referenced expression, avoiding * underflow and overflow. * This is a vector with real entries, even if the original matrix has complex entries. * * \sa DenseBase::stableNorm() */ EIGEN_DEVICE_FUNC const StableNormReturnType stableNorm() const { return StableNormReturnType(_expression()); } /** \returns a row (or column) vector expression of the norm * of each column (or row) of the referenced expression, avoiding * underflow and overflow using a concatenation of hypot() calls. * This is a vector with real entries, even if the original matrix has complex entries. * * \sa DenseBase::hypotNorm() */ EIGEN_DEVICE_FUNC const HypotNormReturnType hypotNorm() const { return HypotNormReturnType(_expression()); } /** \returns a row (or column) vector expression of the sum * of each column (or row) of the referenced expression. * * Example: \include PartialRedux_sum.cpp * Output: \verbinclude PartialRedux_sum.out * * \sa DenseBase::sum() */ EIGEN_DEVICE_FUNC const SumReturnType sum() const { return SumReturnType(_expression()); } /** \returns a row (or column) vector expression of the mean * of each column (or row) of the referenced expression. * * \sa DenseBase::mean() */ EIGEN_DEVICE_FUNC const MeanReturnType mean() const { return MeanReturnType(_expression()); } /** \returns a row (or column) vector expression representing * whether \b all coefficients of each respective column (or row) are \c true. * This expression can be assigned to a vector with entries of type \c bool. * * \sa DenseBase::all() */ EIGEN_DEVICE_FUNC const AllReturnType all() const { return AllReturnType(_expression()); } /** \returns a row (or column) vector expression representing * whether \b at \b least one coefficient of each respective column (or row) is \c true. * This expression can be assigned to a vector with entries of type \c bool. * * \sa DenseBase::any() */ EIGEN_DEVICE_FUNC const AnyReturnType any() const { return AnyReturnType(_expression()); } /** \returns a row (or column) vector expression representing * the number of \c true coefficients of each respective column (or row). * This expression can be assigned to a vector whose entries have the same type as is used to * index entries of the original matrix; for dense matrices, this is \c std::ptrdiff_t . * * Example: \include PartialRedux_count.cpp * Output: \verbinclude PartialRedux_count.out * * \sa DenseBase::count() */ EIGEN_DEVICE_FUNC const CountReturnType count() const { return CountReturnType(_expression()); } /** \returns a row (or column) vector expression of the product * of each column (or row) of the referenced expression. * * Example: \include PartialRedux_prod.cpp * Output: \verbinclude PartialRedux_prod.out * * \sa DenseBase::prod() */ EIGEN_DEVICE_FUNC const ProdReturnType prod() const { return ProdReturnType(_expression()); } /** \returns a matrix expression * where each column (or row) are reversed. * * Example: \include Vectorwise_reverse.cpp * Output: \verbinclude Vectorwise_reverse.out * * \sa DenseBase::reverse() */ EIGEN_DEVICE_FUNC const ConstReverseReturnType reverse() const { return ConstReverseReturnType( _expression() ); } /** \returns a writable matrix expression * where each column (or row) are reversed. * * \sa reverse() const */ EIGEN_DEVICE_FUNC ReverseReturnType reverse() { return ReverseReturnType( _expression() ); } typedef Replicate<ExpressionType,(isVertical?Dynamic:1),(isHorizontal?Dynamic:1)> ReplicateReturnType; EIGEN_DEVICE_FUNC const ReplicateReturnType replicate(Index factor) const; /** * \return an expression of the replication of each column (or row) of \c *this * * Example: \include DirectionWise_replicate.cpp * Output: \verbinclude DirectionWise_replicate.out * * \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate */ // NOTE implemented here because of sunstudio's compilation errors // isVertical*Factor+isHorizontal instead of (isVertical?Factor:1) to handle CUDA bug with ternary operator template<int Factor> const Replicate<ExpressionType,isVertical*Factor+isHorizontal,isHorizontal*Factor+isVertical> EIGEN_DEVICE_FUNC replicate(Index factor = Factor) const { return Replicate<ExpressionType,(isVertical?Factor:1),(isHorizontal?Factor:1)> (_expression(),isVertical?factor:1,isHorizontal?factor:1); } /////////// Artithmetic operators /////////// /** Copies the vector \a other to each subvector of \c *this */ template<typename OtherDerived> EIGEN_DEVICE_FUNC ExpressionType& operator=(const DenseBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) //eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME return const_cast<ExpressionType&>(m_matrix = extendedTo(other.derived())); } /** Adds the vector \a other to each subvector of \c *this */ template<typename OtherDerived> EIGEN_DEVICE_FUNC ExpressionType& operator+=(const DenseBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return const_cast<ExpressionType&>(m_matrix += extendedTo(other.derived())); } /** Substracts the vector \a other to each subvector of \c *this */ template<typename OtherDerived> EIGEN_DEVICE_FUNC ExpressionType& operator-=(const DenseBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return const_cast<ExpressionType&>(m_matrix -= extendedTo(other.derived())); } /** Multiples each subvector of \c *this by the vector \a other */ template<typename OtherDerived> EIGEN_DEVICE_FUNC ExpressionType& operator*=(const DenseBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) m_matrix *= extendedTo(other.derived()); return const_cast<ExpressionType&>(m_matrix); } /** Divides each subvector of \c *this by the vector \a other */ template<typename OtherDerived> EIGEN_DEVICE_FUNC ExpressionType& operator/=(const DenseBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) m_matrix /= extendedTo(other.derived()); return const_cast<ExpressionType&>(m_matrix); } /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */ template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC CwiseBinaryOp<internal::scalar_sum_op<Scalar,typename OtherDerived::Scalar>, const ExpressionTypeNestedCleaned, const typename ExtendedType<OtherDerived>::Type> operator+(const DenseBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return m_matrix + extendedTo(other.derived()); } /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */ template<typename OtherDerived> EIGEN_DEVICE_FUNC CwiseBinaryOp<internal::scalar_difference_op<Scalar,typename OtherDerived::Scalar>, const ExpressionTypeNestedCleaned, const typename ExtendedType<OtherDerived>::Type> operator-(const DenseBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return m_matrix - extendedTo(other.derived()); } /** Returns the expression where each subvector is the product of the vector \a other * by the corresponding subvector of \c *this */ template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC CwiseBinaryOp<internal::scalar_product_op<Scalar>, const ExpressionTypeNestedCleaned, const typename ExtendedType<OtherDerived>::Type> EIGEN_DEVICE_FUNC operator*(const DenseBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return m_matrix * extendedTo(other.derived()); } /** Returns the expression where each subvector is the quotient of the corresponding * subvector of \c *this by the vector \a other */ template<typename OtherDerived> EIGEN_DEVICE_FUNC CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const ExpressionTypeNestedCleaned, const typename ExtendedType<OtherDerived>::Type> operator/(const DenseBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType) EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived) return m_matrix / extendedTo(other.derived()); } /** \returns an expression where each column (or row) of the referenced matrix are normalized. * The referenced matrix is \b not modified. * \sa MatrixBase::normalized(), normalize() */ EIGEN_DEVICE_FUNC CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const ExpressionTypeNestedCleaned, const typename OppositeExtendedType<typename ReturnType<internal::member_norm,RealScalar>::Type>::Type> normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); } /** Normalize in-place each row or columns of the referenced matrix. * \sa MatrixBase::normalize(), normalized() */ EIGEN_DEVICE_FUNC void normalize() { m_matrix = this->normalized(); } EIGEN_DEVICE_FUNC inline void reverseInPlace(); /////////// Geometry module /////////// typedef Homogeneous<ExpressionType,Direction> HomogeneousReturnType; EIGEN_DEVICE_FUNC HomogeneousReturnType homogeneous() const; typedef typename ExpressionType::PlainObject CrossReturnType; template<typename OtherDerived> EIGEN_DEVICE_FUNC const CrossReturnType cross(const MatrixBase<OtherDerived>& other) const; enum { HNormalized_Size = Direction==Vertical ? internal::traits<ExpressionType>::RowsAtCompileTime : internal::traits<ExpressionType>::ColsAtCompileTime, HNormalized_SizeMinusOne = HNormalized_Size==Dynamic ? Dynamic : HNormalized_Size-1 }; typedef Block<const ExpressionType, Direction==Vertical ? int(HNormalized_SizeMinusOne) : int(internal::traits<ExpressionType>::RowsAtCompileTime), Direction==Horizontal ? int(HNormalized_SizeMinusOne) : int(internal::traits<ExpressionType>::ColsAtCompileTime)> HNormalized_Block; typedef Block<const ExpressionType, Direction==Vertical ? 1 : int(internal::traits<ExpressionType>::RowsAtCompileTime), Direction==Horizontal ? 1 : int(internal::traits<ExpressionType>::ColsAtCompileTime)> HNormalized_Factors; typedef CwiseBinaryOp<internal::scalar_quotient_op<typename internal::traits<ExpressionType>::Scalar>, const HNormalized_Block, const Replicate<HNormalized_Factors, Direction==Vertical ? HNormalized_SizeMinusOne : 1, Direction==Horizontal ? HNormalized_SizeMinusOne : 1> > HNormalizedReturnType; EIGEN_DEVICE_FUNC const HNormalizedReturnType hnormalized() const; protected: ExpressionTypeNested m_matrix; }; //const colwise moved to DenseBase.h due to CUDA compiler bug /** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations * * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ template<typename Derived> inline typename DenseBase<Derived>::ColwiseReturnType DenseBase<Derived>::colwise() { return ColwiseReturnType(derived()); } //const rowwise moved to DenseBase.h due to CUDA compiler bug /** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations * * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ template<typename Derived> inline typename DenseBase<Derived>::RowwiseReturnType DenseBase<Derived>::rowwise() { return RowwiseReturnType(derived()); } } // end namespace Eigen #endif // EIGEN_PARTIAL_REDUX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Diagonal.h
.h
9,597
261
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DIAGONAL_H #define EIGEN_DIAGONAL_H namespace Eigen { /** \class Diagonal * \ingroup Core_Module * * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix * * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. * A positive value means a superdiagonal, a negative value means a subdiagonal. * You can also use DynamicIndex so the index can be set at runtime. * * The matrix is not required to be square. * * This class represents an expression of the main diagonal, or any sub/super diagonal * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the * time this is the only way it is used. * * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) */ namespace internal { template<typename MatrixType, int DiagIndex> struct traits<Diagonal<MatrixType,DiagIndex> > : traits<MatrixType> { typedef typename ref_selector<MatrixType>::type MatrixTypeNested; typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; typedef typename MatrixType::StorageKind StorageKind; enum { RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), ColsAtCompileTime = 1, MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), MaxColsAtCompileTime = 1, MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0, Flags = (unsigned int)_MatrixTypeNested::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret, InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1, OuterStrideAtCompileTime = 0 }; }; } template<typename MatrixType, int _DiagIndex> class Diagonal : public internal::dense_xpr_base< Diagonal<MatrixType,_DiagIndex> >::type { public: enum { DiagIndex = _DiagIndex }; typedef typename internal::dense_xpr_base<Diagonal>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) EIGEN_DEVICE_FUNC explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) { eigen_assert( a_index <= m_matrix.cols() && -a_index <= m_matrix.rows() ); } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) EIGEN_DEVICE_FUNC inline Index rows() const { return m_index.value()<0 ? numext::mini<Index>(m_matrix.cols(),m_matrix.rows()+m_index.value()) : numext::mini<Index>(m_matrix.rows(),m_matrix.cols()-m_index.value()); } EIGEN_DEVICE_FUNC inline Index cols() const { return 1; } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_matrix.outerStride() + 1; } EIGEN_DEVICE_FUNC inline Index outerStride() const { return 0; } typedef typename internal::conditional< internal::is_lvalue<MatrixType>::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) return m_matrix.coeffRef(row+rowOffset(), row+colOffset()); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index row, Index) const { return m_matrix.coeffRef(row+rowOffset(), row+colOffset()); } EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index row, Index) const { return m_matrix.coeff(row+rowOffset(), row+colOffset()); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index idx) { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset()); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index idx) const { return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset()); } EIGEN_DEVICE_FUNC inline CoeffReturnType coeff(Index idx) const { return m_matrix.coeff(idx+rowOffset(), idx+colOffset()); } EIGEN_DEVICE_FUNC inline const typename internal::remove_all<typename MatrixType::Nested>::type& nestedExpression() const { return m_matrix; } EIGEN_DEVICE_FUNC inline Index index() const { return m_index.value(); } protected: typename internal::ref_selector<MatrixType>::non_const_type m_matrix; const internal::variable_if_dynamicindex<Index, DiagIndex> m_index; private: // some compilers may fail to optimize std::max etc in case of compile-time constants... EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } // trigger a compile-time error if someone try to call packet template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const; template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const; }; /** \returns an expression of the main diagonal of the matrix \c *this * * \c *this is not required to be square. * * Example: \include MatrixBase_diagonal.cpp * Output: \verbinclude MatrixBase_diagonal.out * * \sa class Diagonal */ template<typename Derived> inline typename MatrixBase<Derived>::DiagonalReturnType MatrixBase<Derived>::diagonal() { return DiagonalReturnType(derived()); } /** This is the const version of diagonal(). */ template<typename Derived> inline typename MatrixBase<Derived>::ConstDiagonalReturnType MatrixBase<Derived>::diagonal() const { return ConstDiagonalReturnType(derived()); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this * * \c *this is not required to be square. * * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. * * Example: \include MatrixBase_diagonal_int.cpp * Output: \verbinclude MatrixBase_diagonal_int.out * * \sa MatrixBase::diagonal(), class Diagonal */ template<typename Derived> inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType MatrixBase<Derived>::diagonal(Index index) { return DiagonalDynamicIndexReturnType(derived(), index); } /** This is the const version of diagonal(Index). */ template<typename Derived> inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType MatrixBase<Derived>::diagonal(Index index) const { return ConstDiagonalDynamicIndexReturnType(derived(), index); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this * * \c *this is not required to be square. * * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. * * Example: \include MatrixBase_diagonal_template_int.cpp * Output: \verbinclude MatrixBase_diagonal_template_int.out * * \sa MatrixBase::diagonal(), class Diagonal */ template<typename Derived> template<int Index_> inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index_>::Type MatrixBase<Derived>::diagonal() { return typename DiagonalIndexReturnType<Index_>::Type(derived()); } /** This is the const version of diagonal<int>(). */ template<typename Derived> template<int Index_> inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index_>::Type MatrixBase<Derived>::diagonal() const { return typename ConstDiagonalIndexReturnType<Index_>::Type(derived()); } } // end namespace Eigen #endif // EIGEN_DIAGONAL_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/CoreEvaluators.h
.h
62,197
1,689
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COREEVALUATORS_H #define EIGEN_COREEVALUATORS_H namespace Eigen { namespace internal { // This class returns the evaluator kind from the expression storage kind. // Default assumes index based accessors template<typename StorageKind> struct storage_kind_to_evaluator_kind { typedef IndexBased Kind; }; // This class returns the evaluator shape from the expression storage kind. // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc. template<typename StorageKind> struct storage_kind_to_shape; template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; }; template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; }; template<> struct storage_kind_to_shape<PermutationStorage> { typedef PermutationShape Shape; }; template<> struct storage_kind_to_shape<TranspositionsStorage> { typedef TranspositionsShape Shape; }; // Evaluators have to be specialized with respect to various criteria such as: // - storage/structure/shape // - scalar type // - etc. // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators. // We currently distinguish the following kind of evaluators: // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate) // - binary_evaluator for expression taking two arguments (CwiseBinaryOp) // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp) // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching. // - mapbase_evaluator for Map, Block, Ref // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator) template< typename T, typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind, typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind, typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind, typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar, typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar, typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator; template< typename T, typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind, typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind, typename LhsScalar = typename traits<typename T::Lhs>::Scalar, typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator; template< typename T, typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind, typename Scalar = typename T::Scalar> struct unary_evaluator; // evaluator_traits<T> contains traits for evaluator<T> template<typename T> struct evaluator_traits_base { // by default, get evaluator kind and shape from storage typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind; typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape; }; // Default evaluator traits template<typename T> struct evaluator_traits : public evaluator_traits_base<T> { }; template<typename T, typename Shape = typename evaluator_traits<T>::Shape > struct evaluator_assume_aliasing { static const bool value = false; }; // By default, we assume a unary expression: template<typename T> struct evaluator : public unary_evaluator<T> { typedef unary_evaluator<T> Base; EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {} }; // TODO: Think about const-correctness template<typename T> struct evaluator<const T> : evaluator<T> { EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : evaluator<T>(xpr) {} }; // ---------- base class for all evaluators ---------- template<typename ExpressionType> struct evaluator_base : public noncopyable { // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. typedef traits<ExpressionType> ExpressionTraits; enum { Alignment = 0 }; }; // -------------------- Matrix and Array -------------------- // // evaluator<PlainObjectBase> is a common base class for the // Matrix and Array evaluators. // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense, // so no need for more sophisticated dispatching. template<typename Derived> struct evaluator<PlainObjectBase<Derived> > : evaluator_base<Derived> { typedef PlainObjectBase<Derived> PlainObjectType; typedef typename PlainObjectType::Scalar Scalar; typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = PlainObjectType::IsRowMajor, IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, ColsAtCompileTime = PlainObjectType::ColsAtCompileTime, CoeffReadCost = NumTraits<Scalar>::ReadCost, Flags = traits<Derived>::EvaluatorFlags, Alignment = traits<Derived>::Alignment }; EIGEN_DEVICE_FUNC evaluator() : m_data(0), m_outerStride(IsVectorAtCompileTime ? 0 : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m) : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (IsRowMajor) return m_data[row * m_outerStride.value() + col]; else return m_data[row + col * m_outerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_data[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { if (IsRowMajor) return const_cast<Scalar*>(m_data)[row * m_outerStride.value() + col]; else return const_cast<Scalar*>(m_data)[row + col * m_outerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return const_cast<Scalar*>(m_data)[index]; } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { if (IsRowMajor) return ploadt<PacketType, LoadMode>(m_data + row * m_outerStride.value() + col); else return ploadt<PacketType, LoadMode>(m_data + row + col * m_outerStride.value()); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return ploadt<PacketType, LoadMode>(m_data + index); } template<int StoreMode,typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { if (IsRowMajor) return pstoret<Scalar, PacketType, StoreMode> (const_cast<Scalar*>(m_data) + row * m_outerStride.value() + col, x); else return pstoret<Scalar, PacketType, StoreMode> (const_cast<Scalar*>(m_data) + row + col * m_outerStride.value(), x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_data) + index, x); } protected: const Scalar *m_data; // We do not need to know the outer stride for vectors variable_if_dynamic<Index, IsVectorAtCompileTime ? 0 : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime> m_outerStride; }; template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols> struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > > { typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType; EIGEN_DEVICE_FUNC evaluator() {} EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) : evaluator<PlainObjectBase<XprType> >(m) { } }; template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols> struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > > { typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType; EIGEN_DEVICE_FUNC evaluator() {} EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) : evaluator<PlainObjectBase<XprType> >(m) { } }; // -------------------- Transpose -------------------- template<typename ArgType> struct unary_evaluator<Transpose<ArgType>, IndexBased> : evaluator_base<Transpose<ArgType> > { typedef Transpose<ArgType> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = evaluator<ArgType>::Flags ^ RowMajorBit, Alignment = evaluator<ArgType>::Alignment }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename XprType::Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet<LoadMode,PacketType>(col, row); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet<LoadMode,PacketType>(index); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket<StoreMode,PacketType>(index, x); } protected: evaluator<ArgType> m_argImpl; }; // -------------------- CwiseNullaryOp -------------------- // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator. // Likewise, there is not need to more sophisticated dispatching here. template<typename Scalar,typename NullaryOp, bool has_nullary = has_nullary_operator<NullaryOp>::value, bool has_unary = has_unary_operator<NullaryOp>::value, bool has_binary = has_binary_operator<NullaryOp>::value> struct nullary_wrapper { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); } }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,true,false,false> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); } }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,false,false,true> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); } }; // We need the following specialization for vector-only functors assigned to a runtime vector, // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd. // In this case, i==0 and j is used for the actual iteration. template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,false,true,false> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op(i+j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op.template packetOp<T>(i+j); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); } }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {}; #if 0 && EIGEN_COMP_MSVC>0 // Disable this ugly workaround. This is now handled in traits<Ref>::match, // but this piece of code might still become handly if some other weird compilation // erros pop up again. // MSVC exhibits a weird compilation error when // compiling: // Eigen::MatrixXf A = MatrixXf::Random(3,3); // Ref<const MatrixXf> R = 2.f*A; // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet. // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A> // and at that time has_*ary_operator<T> returns true regardless of T. // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>. // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(), // and packet() are really instantiated as implemented below: // This is a simple wrapper around Index to enforce the re-instantiation of // has_*ary_operator when needed. template<typename T> struct nullary_wrapper_workaround_msvc { nullary_wrapper_workaround_msvc(const T&); operator T()const; }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,true,true,true> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i); } }; #endif // MSVC workaround template<typename NullaryOp, typename PlainObjectType> struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> > : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> > { typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType; typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned; enum { CoeffReadCost = internal::functor_traits<NullaryOp>::Cost, Flags = (evaluator<PlainObjectTypeCleaned>::Flags & ( HereditaryBits | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0) | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0))) | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit), Alignment = AlignedMax }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n) : m_functor(n.functor()), m_wrapper() { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType row, IndexType col) const { return m_wrapper(m_functor, row, col); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType index) const { return m_wrapper(m_functor,index); } template<int LoadMode, typename PacketType, typename IndexType> EIGEN_STRONG_INLINE PacketType packet(IndexType row, IndexType col) const { return m_wrapper.template packetOp<PacketType>(m_functor, row, col); } template<int LoadMode, typename PacketType, typename IndexType> EIGEN_STRONG_INLINE PacketType packet(IndexType index) const { return m_wrapper.template packetOp<PacketType>(m_functor, index); } protected: const NullaryOp m_functor; const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper; }; // -------------------- CwiseUnaryOp -------------------- template<typename UnaryOp, typename ArgType> struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased > : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> > { typedef CwiseUnaryOp<UnaryOp, ArgType> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost, Flags = evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)), Alignment = evaluator<ArgType>::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_argImpl.coeff(index)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(row, col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(index)); } protected: const UnaryOp m_functor; evaluator<ArgType> m_argImpl; }; // -------------------- CwiseTernaryOp -------------------- // this is a ternary expression template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > { typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType; typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased> : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > { typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType; enum { CoeffReadCost = evaluator<Arg1>::CoeffReadCost + evaluator<Arg2>::CoeffReadCost + evaluator<Arg3>::CoeffReadCost + functor_traits<TernaryOp>::Cost, Arg1Flags = evaluator<Arg1>::Flags, Arg2Flags = evaluator<Arg2>::Flags, Arg3Flags = evaluator<Arg3>::Flags, SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value, StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit), Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & ( HereditaryBits | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN( EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment), evaluator<Arg3>::Alignment) }; EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_functor(xpr.functor()), m_arg1Impl(xpr.arg1()), m_arg2Impl(xpr.arg2()), m_arg3Impl(xpr.arg3()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_arg1Impl.coeff(row, col), m_arg2Impl.coeff(row, col), m_arg3Impl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(row, col), m_arg2Impl.template packet<LoadMode,PacketType>(row, col), m_arg3Impl.template packet<LoadMode,PacketType>(row, col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(index), m_arg2Impl.template packet<LoadMode,PacketType>(index), m_arg3Impl.template packet<LoadMode,PacketType>(index)); } protected: const TernaryOp m_functor; evaluator<Arg1> m_arg1Impl; evaluator<Arg2> m_arg2Impl; evaluator<Arg3> m_arg3Impl; }; // -------------------- CwiseBinaryOp -------------------- // this is a binary expression template<typename BinaryOp, typename Lhs, typename Rhs> struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > { typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType; typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template<typename BinaryOp, typename Lhs, typename Rhs> struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased> : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > { typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType; enum { CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost, LhsFlags = evaluator<Lhs>::Flags, RhsFlags = evaluator<Rhs>::Flags, SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value, StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit), Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( HereditaryBits | (int(LhsFlags) & int(RhsFlags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment) }; EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr) : m_functor(xpr.functor()), m_lhsImpl(xpr.lhs()), m_rhsImpl(xpr.rhs()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(row, col), m_rhsImpl.template packet<LoadMode,PacketType>(row, col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(index), m_rhsImpl.template packet<LoadMode,PacketType>(index)); } protected: const BinaryOp m_functor; evaluator<Lhs> m_lhsImpl; evaluator<Rhs> m_rhsImpl; }; // -------------------- CwiseUnaryView -------------------- template<typename UnaryOp, typename ArgType> struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased> : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> > { typedef CwiseUnaryView<UnaryOp, ArgType> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost, Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)), Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost... }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_unaryOp(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_unaryOp(m_argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_unaryOp(m_argImpl.coeff(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_unaryOp(m_argImpl.coeffRef(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_unaryOp(m_argImpl.coeffRef(index)); } protected: const UnaryOp m_unaryOp; evaluator<ArgType> m_argImpl; }; // -------------------- Map -------------------- // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ? // but that might complicate template specialization template<typename Derived, typename PlainObjectType> struct mapbase_evaluator; template<typename Derived, typename PlainObjectType> struct mapbase_evaluator : evaluator_base<Derived> { typedef Derived XprType; typedef typename XprType::PointerType PointerType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::RowsAtCompileTime, ColsAtCompileTime = XprType::ColsAtCompileTime, CoeffReadCost = NumTraits<Scalar>::ReadCost }; EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map) : m_data(const_cast<PointerType>(map.data())), m_innerStride(map.innerStride()), m_outerStride(map.outerStride()) { EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1), PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_data[index * m_innerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_data[index * m_innerStride.value()]; } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::ploadt<PacketType, LoadMode>(ptr); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value()); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x); } protected: EIGEN_DEVICE_FUNC inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); } EIGEN_DEVICE_FUNC inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); } PointerType m_data; const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride; const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride; }; template<typename PlainObjectType, int MapOptions, typename StrideType> struct evaluator<Map<PlainObjectType, MapOptions, StrideType> > : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType> { typedef Map<PlainObjectType, MapOptions, StrideType> XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits<Scalar>::type PacketScalar; enum { InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) : int(StrideType::InnerStrideAtCompileTime), OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 ? int(PlainObjectType::OuterStrideAtCompileTime) : int(StrideType::OuterStrideAtCompileTime), HasNoInnerStride = InnerStrideAtCompileTime == 1, HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, HasNoStride = HasNoInnerStride && HasNoOuterStride, IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit), LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit), Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask), Alignment = int(MapOptions)&int(AlignedMask) }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map) : mapbase_evaluator<XprType, PlainObjectType>(map) { } }; // -------------------- Ref -------------------- template<typename PlainObjectType, int RefOptions, typename StrideType> struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> > : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType> { typedef Ref<PlainObjectType, RefOptions, StrideType> XprType; enum { Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags, Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref) : mapbase_evaluator<XprType, PlainObjectType>(ref) { } }; // -------------------- Block -------------------- template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator; template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> > : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits<Scalar>::type PacketScalar; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, RowsAtCompileTime = traits<XprType>::RowsAtCompileTime, ColsAtCompileTime = traits<XprType>::ColsAtCompileTime, MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime, ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 : ArgTypeIsRowMajor, HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(inner_stride_at_compile_time<ArgType>::ret) : int(outer_stride_at_compile_time<ArgType>::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(outer_stride_at_compile_time<ArgType>::ret) : int(inner_stride_at_compile_time<ArgType>::ret), MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0, FlagsRowMajorBit = XprType::Flags&RowMajorBit, Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) | DirectAccessBit | MaskPacketAccessBit), Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit, PacketAlignment = unpacket_traits<PacketScalar>::alignment, Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (OuterStrideAtCompileTime!=0) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0) }; typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } }; // no direct-access => dispatch to a unary evaluator template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false> : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> > { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) : unary_evaluator<XprType>(block) {} }; template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased> : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> > { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block) : m_argImpl(block.nestedExpression()), m_startRow(block.startRow()), m_startCol(block.startCol()), m_linear_offset(InnerPanel?(XprType::IsRowMajor ? block.startRow()*block.cols() : block.startCol()*block.rows()):0) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = XprType::RowsAtCompileTime, ForwardLinearAccess = InnerPanel && bool(evaluator<ArgType>::Flags&LinearAccessBit) }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { if (ForwardLinearAccess) return m_argImpl.coeff(m_linear_offset.value() + index); else return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { if (ForwardLinearAccess) return m_argImpl.coeffRef(m_linear_offset.value() + index); else return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { if (ForwardLinearAccess) return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index); else return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { if (ForwardLinearAccess) return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x); else return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0, x); } protected: evaluator<ArgType> m_argImpl; const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow; const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol; const variable_if_dynamic<Index, InnerPanel ? Dynamic : 0> m_linear_offset; }; // TODO: This evaluator does not actually use the child evaluator; // all action is via the data() as returned by the Block expression. template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true> : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject> { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) : mapbase_evaluator<XprType, typename XprType::PlainObject>(block) { // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned"); } }; // -------------------- Select -------------------- // NOTE shall we introduce a ternary_evaluator? // TODO enable vectorization for Select template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > { typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType; enum { CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost, evaluator<ElseMatrixType>::CoeffReadCost), Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment) }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select) : m_conditionImpl(select.conditionMatrix()), m_thenImpl(select.thenMatrix()), m_elseImpl(select.elseMatrix()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (m_conditionImpl.coeff(row, col)) return m_thenImpl.coeff(row, col); else return m_elseImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { if (m_conditionImpl.coeff(index)) return m_thenImpl.coeff(index); else return m_elseImpl.coeff(index); } protected: evaluator<ConditionMatrixType> m_conditionImpl; evaluator<ThenMatrixType> m_thenImpl; evaluator<ElseMatrixType> m_elseImpl; }; // -------------------- Replicate -------------------- template<typename ArgType, int RowFactor, int ColFactor> struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> > : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> > { typedef Replicate<ArgType, RowFactor, ColFactor> XprType; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor }; typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested; typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned; enum { CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost, LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0, Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit), Alignment = evaluator<ArgTypeNestedCleaned>::Alignment }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate) : m_arg(replicate.nestedExpression()), m_argImpl(m_arg), m_rows(replicate.nestedExpression().rows()), m_cols(replicate.nestedExpression().cols()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.coeff(actual_row, actual_col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.coeff(actual_index); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.template packet<LoadMode,PacketType>(actual_index); } protected: const ArgTypeNested m_arg; evaluator<ArgTypeNestedCleaned> m_argImpl; const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows; const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols; }; // -------------------- PartialReduxExpr -------------------- template< typename ArgType, typename MemberOp, int Direction> struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> > : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> > { typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType; typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested; typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned; typedef typename ArgType::Scalar InputScalar; typedef typename XprType::Scalar Scalar; enum { TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime) }; typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType; enum { CoeffReadCost = TraversalSize==Dynamic ? HugeCost : TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value), Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit, Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) { EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value)); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const { if (Direction==Vertical) return m_functor(m_arg.col(j)); else return m_functor(m_arg.row(i)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { if (Direction==Vertical) return m_functor(m_arg.col(index)); else return m_functor(m_arg.row(index)); } protected: typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg; const MemberOp m_functor; }; // -------------------- MatrixWrapper and ArrayWrapper -------------------- // // evaluator_wrapper_base<T> is a common base class for the // MatrixWrapper and ArrayWrapper evaluators. template<typename XprType> struct evaluator_wrapper_base : evaluator_base<XprType> { typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = evaluator<ArgType>::Flags, Alignment = evaluator<ArgType>::Alignment }; EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} typedef typename ArgType::Scalar Scalar; typedef typename ArgType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet<LoadMode,PacketType>(row, col); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet<LoadMode,PacketType>(index); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket<StoreMode>(row, col, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket<StoreMode>(index, x); } protected: evaluator<ArgType> m_argImpl; }; template<typename TArgType> struct unary_evaluator<MatrixWrapper<TArgType> > : evaluator_wrapper_base<MatrixWrapper<TArgType> > { typedef MatrixWrapper<TArgType> XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression()) { } }; template<typename TArgType> struct unary_evaluator<ArrayWrapper<TArgType> > : evaluator_wrapper_base<ArrayWrapper<TArgType> > { typedef ArrayWrapper<TArgType> XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression()) { } }; // -------------------- Reverse -------------------- // defined in Reverse.h: template<typename PacketType, bool ReversePacket> struct reverse_packet_cond; template<typename ArgType, int Direction> struct unary_evaluator<Reverse<ArgType, Direction> > : evaluator_base<Reverse<ArgType, Direction> > { typedef Reverse<ArgType, Direction> XprType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::IsRowMajor, IsColMajor = !IsRowMajor, ReverseRow = (Direction == Vertical) || (Direction == BothDirections), ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || ((Direction == Horizontal) && IsRowMajor), CoeffReadCost = evaluator<ArgType>::CoeffReadCost, // let's enable LinearAccess only with vectorization because of the product overhead // FIXME enable DirectAccess with negative strides? Flags0 = evaluator<ArgType>::Flags, LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) ) || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1)) ? LinearAccessBit : 0, Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess), Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f. }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse) : m_argImpl(reverse.nestedExpression()), m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { enum { PacketSize = unpacket_traits<PacketType>::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet; return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { enum { PacketSize = unpacket_traits<PacketType>::size }; return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { // FIXME we could factorize some code with packet(i,j) enum { PacketSize = unpacket_traits<PacketType>::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet; m_argImpl.template writePacket<LoadMode>( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col, reverse_packet::run(x)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { enum { PacketSize = unpacket_traits<PacketType>::size }; m_argImpl.template writePacket<LoadMode> (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); } protected: evaluator<ArgType> m_argImpl; // If we do not reverse rows, then we do not need to know the number of rows; same for columns // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors. const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows; const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols; }; // -------------------- Diagonal -------------------- template<typename ArgType, int DiagIndex> struct evaluator<Diagonal<ArgType, DiagIndex> > : evaluator_base<Diagonal<ArgType, DiagIndex> > { typedef Diagonal<ArgType, DiagIndex> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit, Alignment = 0 }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal) : m_argImpl(diagonal.nestedExpression()), m_index(diagonal.index()) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index) const { return m_argImpl.coeff(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index + rowOffset(), index + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index) { return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); } protected: evaluator<ArgType> m_argImpl; const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index; private: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; } }; //---------------------------------------------------------------------- // deprecated code //---------------------------------------------------------------------- // -------------------- EvalToTemp -------------------- // expression class for evaluating nested expression to a temporary template<typename ArgType> class EvalToTemp; template<typename ArgType> struct traits<EvalToTemp<ArgType> > : public traits<ArgType> { }; template<typename ArgType> class EvalToTemp : public dense_xpr_base<EvalToTemp<ArgType> >::type { public: typedef typename dense_xpr_base<EvalToTemp>::type Base; EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) explicit EvalToTemp(const ArgType& arg) : m_arg(arg) { } const ArgType& arg() const { return m_arg; } Index rows() const { return m_arg.rows(); } Index cols() const { return m_arg.cols(); } private: const ArgType& m_arg; }; template<typename ArgType> struct evaluator<EvalToTemp<ArgType> > : public evaluator<typename ArgType::PlainObject> { typedef EvalToTemp<ArgType> XprType; typedef typename ArgType::PlainObject PlainObject; typedef evaluator<PlainObject> Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.arg()) { ::new (static_cast<Base*>(this)) Base(m_result); } // This constructor is used when nesting an EvalTo evaluator in another evaluator EIGEN_DEVICE_FUNC evaluator(const ArgType& arg) : m_result(arg) { ::new (static_cast<Base*>(this)) Base(m_result); } protected: PlainObject m_result; }; } // namespace internal } // end namespace Eigen #endif // EIGEN_COREEVALUATORS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/ProductEvaluators.h
.h
51,070
1,139
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PRODUCTEVALUATORS_H #define EIGEN_PRODUCTEVALUATORS_H namespace Eigen { namespace internal { /** \internal * Evaluator of a product expression. * Since products require special treatments to handle all possible cases, * we simply deffer the evaluation logic to a product_evaluator class * which offers more partial specialization possibilities. * * \sa class product_evaluator */ template<typename Lhs, typename Rhs, int Options> struct evaluator<Product<Lhs, Rhs, Options> > : public product_evaluator<Product<Lhs, Rhs, Options> > { typedef Product<Lhs, Rhs, Options> XprType; typedef product_evaluator<XprType> Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {} }; // Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B" // TODO we should apply that rule only if that's really helpful template<typename Lhs, typename Rhs, typename Scalar1, typename Scalar2, typename Plain1> struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>, const Product<Lhs, Rhs, DefaultProduct> > > { static const bool value = true; }; template<typename Lhs, typename Rhs, typename Scalar1, typename Scalar2, typename Plain1> struct evaluator<CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>, const Product<Lhs, Rhs, DefaultProduct> > > : public evaluator<Product<EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar1,Lhs,product), Rhs, DefaultProduct> > { typedef CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>, const Product<Lhs, Rhs, DefaultProduct> > XprType; typedef evaluator<Product<EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar1,Lhs,product), Rhs, DefaultProduct> > Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr.lhs().functor().m_other * xpr.rhs().lhs() * xpr.rhs().rhs()) {} }; template<typename Lhs, typename Rhs, int DiagIndex> struct evaluator<Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> > : public evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> > { typedef Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> XprType; typedef evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> > Base; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex>( Product<Lhs, Rhs, LazyProduct>(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()), xpr.index() )) {} }; // Helper class to perform a matrix product with the destination at hand. // Depending on the sizes of the factors, there are different evaluation strategies // as controlled by internal::product_type. template< typename Lhs, typename Rhs, typename LhsShape = typename evaluator_traits<Lhs>::Shape, typename RhsShape = typename evaluator_traits<Rhs>::Shape, int ProductType = internal::product_type<Lhs,Rhs>::value> struct generic_product_impl; template<typename Lhs, typename Rhs> struct evaluator_assume_aliasing<Product<Lhs, Rhs, DefaultProduct> > { static const bool value = true; }; // This is the default evaluator implementation for products: // It creates a temporary and call generic_product_impl template<typename Lhs, typename Rhs, int Options, int ProductTag, typename LhsShape, typename RhsShape> struct product_evaluator<Product<Lhs, Rhs, Options>, ProductTag, LhsShape, RhsShape> : public evaluator<typename Product<Lhs, Rhs, Options>::PlainObject> { typedef Product<Lhs, Rhs, Options> XprType; typedef typename XprType::PlainObject PlainObject; typedef evaluator<PlainObject> Base; enum { Flags = Base::Flags | EvalBeforeNestingBit }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) : m_result(xpr.rows(), xpr.cols()) { ::new (static_cast<Base*>(this)) Base(m_result); // FIXME shall we handle nested_eval here?, // if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.) // typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested; // typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested; // typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned; // typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned; // // const LhsNested lhs(xpr.lhs()); // const RhsNested rhs(xpr.rhs()); // // generic_product_impl<LhsNestedCleaned, RhsNestedCleaned>::evalTo(m_result, lhs, rhs); generic_product_impl<Lhs, Rhs, LhsShape, RhsShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs()); } protected: PlainObject m_result; }; // The following three shortcuts are enabled only if the scalar types match excatly. // TODO: we could enable them for different scalar types when the product is not vectorized. // Dense = Product template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scalar,Scalar>, Dense2Dense, typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> { typedef Product<Lhs,Rhs,Options> SrcXprType; static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); // FIXME shall we handle nested_eval here? generic_product_impl<Lhs, Rhs>::evalTo(dst, src.lhs(), src.rhs()); } }; // Dense += Product template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<Scalar,Scalar>, Dense2Dense, typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> { typedef Product<Lhs,Rhs,Options> SrcXprType; static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,Scalar> &) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); // FIXME shall we handle nested_eval here? generic_product_impl<Lhs, Rhs>::addTo(dst, src.lhs(), src.rhs()); } }; // Dense -= Product template< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar> struct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<Scalar,Scalar>, Dense2Dense, typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type> { typedef Product<Lhs,Rhs,Options> SrcXprType; static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,Scalar> &) { eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); // FIXME shall we handle nested_eval here? generic_product_impl<Lhs, Rhs>::subTo(dst, src.lhs(), src.rhs()); } }; // Dense ?= scalar * Product // TODO we should apply that rule if that's really helpful // for instance, this is not good for inner products template< typename DstXprType, typename Lhs, typename Rhs, typename AssignFunc, typename Scalar, typename ScalarBis, typename Plain> struct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>, const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>, const Product<Lhs,Rhs,DefaultProduct> >, AssignFunc, Dense2Dense> { typedef CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>, const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>, const Product<Lhs,Rhs,DefaultProduct> > SrcXprType; static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func) { call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func); } }; //---------------------------------------- // Catch "Dense ?= xpr + Product<>" expression to save one temporary // FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct template<typename OtherXpr, typename Lhs, typename Rhs> struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, const OtherXpr, const Product<Lhs,Rhs,DefaultProduct> >, DenseShape > { static const bool value = true; }; template<typename OtherXpr, typename Lhs, typename Rhs> struct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_difference_op<typename OtherXpr::Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, const OtherXpr, const Product<Lhs,Rhs,DefaultProduct> >, DenseShape > { static const bool value = true; }; template<typename DstXprType, typename OtherXpr, typename ProductType, typename Func1, typename Func2> struct assignment_from_xpr_op_product { template<typename SrcXprType, typename InitialFunc> static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/) { call_assignment_no_alias(dst, src.lhs(), Func1()); call_assignment_no_alias(dst, src.rhs(), Func2()); } }; #define EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(ASSIGN_OP,BINOP,ASSIGN_OP2) \ template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar> \ struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<OtherScalar,ProdScalar>, const OtherXpr, \ const Product<Lhs,Rhs,DefaultProduct> >, internal::ASSIGN_OP<DstScalar,SrcScalar>, Dense2Dense> \ : assignment_from_xpr_op_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::ASSIGN_OP<DstScalar,OtherScalar>, internal::ASSIGN_OP2<DstScalar,ProdScalar> > \ {} EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_sum_op,add_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_sum_op,add_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_sum_op,sub_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op, scalar_difference_op,sub_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_difference_op,sub_assign_op); EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_difference_op,add_assign_op); //---------------------------------------- template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct> { template<typename Dst> static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); } template<typename Dst> static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum(); } template<typename Dst> static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); } }; /*********************************************************************** * Implementation of outer dense * dense vector product ***********************************************************************/ // Column major result template<typename Dst, typename Lhs, typename Rhs, typename Func> void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&) { evaluator<Rhs> rhsEval(rhs); typename nested_eval<Lhs,Rhs::SizeAtCompileTime>::type actual_lhs(lhs); // FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored // FIXME not very good if rhs is real and lhs complex while alpha is real too const Index cols = dst.cols(); for (Index j=0; j<cols; ++j) func(dst.col(j), rhsEval.coeff(Index(0),j) * actual_lhs); } // Row major result template<typename Dst, typename Lhs, typename Rhs, typename Func> void outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&) { evaluator<Lhs> lhsEval(lhs); typename nested_eval<Rhs,Lhs::SizeAtCompileTime>::type actual_rhs(rhs); // FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored // FIXME not very good if lhs is real and rhs complex while alpha is real too const Index rows = dst.rows(); for (Index i=0; i<rows; ++i) func(dst.row(i), lhsEval.coeff(i,Index(0)) * actual_rhs); } template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,OuterProduct> { template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; typedef typename Product<Lhs,Rhs>::Scalar Scalar; // TODO it would be nice to be able to exploit our *_assign_op functors for that purpose struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } }; struct sub { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } }; struct adds { Scalar m_scale; explicit adds(const Scalar& s) : m_scale(s) {} template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += m_scale * src; } }; template<typename Dst> static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major<Dst>()); } template<typename Dst> static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major<Dst>()); } template<typename Dst> static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major<Dst>()); } template<typename Dst> static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major<Dst>()); } }; // This base class provides default implementations for evalTo, addTo, subTo, in terms of scaleAndAddTo template<typename Lhs, typename Rhs, typename Derived> struct generic_product_impl_base { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dst> static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } template<typename Dst> static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dst> static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { Derived::scaleAndAddTo(dst,lhs,rhs,alpha); } }; template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct> > { typedef typename nested_eval<Lhs,1>::type LhsNested; typedef typename nested_eval<Rhs,1>::type RhsNested; typedef typename Product<Lhs,Rhs>::Scalar Scalar; enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; typedef typename internal::remove_all<typename internal::conditional<int(Side)==OnTheRight,LhsNested,RhsNested>::type>::type MatrixType; template<typename Dest> static EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { LhsNested actual_lhs(lhs); RhsNested actual_rhs(rhs); internal::gemv_dense_selector<Side, (int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor, bool(internal::blas_traits<MatrixType>::HasUsableDirectAccess) >::run(actual_lhs, actual_rhs, dst, alpha); } }; template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dst> static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // Same as: dst.noalias() = lhs.lazyProduct(rhs); // but easier on the compiler side call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<typename Dst::Scalar,Scalar>()); } template<typename Dst> static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // dst.noalias() += lhs.lazyProduct(rhs); call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>()); } template<typename Dst> static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { // dst.noalias() -= lhs.lazyProduct(rhs); call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<typename Dst::Scalar,Scalar>()); } // Catch "dst {,+,-}= (s*A)*B" and evaluate it lazily by moving out the scalar factor: // dst {,+,-}= s * (A.lazyProduct(B)) // This is a huge benefit for heap-allocated matrix types as it save one costly allocation. // For them, this strategy is also faster than simply by-passing the heap allocation through // stack allocation. // For fixed sizes matrices, this is less obvious, it is sometimes x2 faster, but sometimes x3 slower, // and the behavior depends also a lot on the compiler... so let's be conservative and enable them for dynamic-size only, // that is when coming from generic_product_impl<...,GemmProduct> in file GeneralMatrixMatrix.h template<typename Dst, typename Scalar1, typename Scalar2, typename Plain1, typename Xpr2, typename Func> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic(Dst& dst, const CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>, Xpr2>& lhs, const Rhs& rhs, const Func &func) { call_assignment_no_alias(dst, lhs.lhs().functor().m_other * lhs.rhs().lazyProduct(rhs), func); } // Here, we we always have LhsT==Lhs, but we need to make it a template type to make the above // overload more specialized. template<typename Dst, typename LhsT, typename Func> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void eval_dynamic(Dst& dst, const LhsT& lhs, const Rhs& rhs, const Func &func) { call_assignment_no_alias(dst, lhs.lazyProduct(rhs), func); } // template<typename Dst> // static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) // { dst.noalias() += alpha * lhs.lazyProduct(rhs); } }; // This specialization enforces the use of a coefficient-based evaluation strategy template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,LazyCoeffBasedProductMode> : generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> {}; // Case 2: Evaluate coeff by coeff // // This is mostly taken from CoeffBasedProduct.h // The main difference is that we add an extra argument to the etor_product_*_impl::run() function // for the inner dimension of the product, because evaluator object do not know their size. template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar> struct etor_product_coeff_impl; template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl; template<typename Lhs, typename Rhs, int ProductTag> struct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, DenseShape> : evaluator_base<Product<Lhs, Rhs, LazyProduct> > { typedef Product<Lhs, Rhs, LazyProduct> XprType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit product_evaluator(const XprType& xpr) : m_lhs(xpr.lhs()), m_rhs(xpr.rhs()), m_lhsImpl(m_lhs), // FIXME the creation of the evaluator objects should result in a no-op, but check that! m_rhsImpl(m_rhs), // Moreover, they are only useful for the packet path, so we could completely disable them when not needed, // or perhaps declare them on the fly on the packet method... We have experiment to check what's best. m_innerDim(xpr.lhs().cols()) { EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::MulCost); EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::AddCost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); #if 0 std::cerr << "LhsOuterStrideBytes= " << LhsOuterStrideBytes << "\n"; std::cerr << "RhsOuterStrideBytes= " << RhsOuterStrideBytes << "\n"; std::cerr << "LhsAlignment= " << LhsAlignment << "\n"; std::cerr << "RhsAlignment= " << RhsAlignment << "\n"; std::cerr << "CanVectorizeLhs= " << CanVectorizeLhs << "\n"; std::cerr << "CanVectorizeRhs= " << CanVectorizeRhs << "\n"; std::cerr << "CanVectorizeInner= " << CanVectorizeInner << "\n"; std::cerr << "EvalToRowMajor= " << EvalToRowMajor << "\n"; std::cerr << "Alignment= " << Alignment << "\n"; std::cerr << "Flags= " << Flags << "\n"; #endif } // Everything below here is taken from CoeffBasedProduct.h typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested; typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested; typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned; typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned; typedef evaluator<LhsNestedCleaned> LhsEtorType; typedef evaluator<RhsNestedCleaned> RhsEtorType; enum { RowsAtCompileTime = LhsNestedCleaned::RowsAtCompileTime, ColsAtCompileTime = RhsNestedCleaned::ColsAtCompileTime, InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime), MaxRowsAtCompileTime = LhsNestedCleaned::MaxRowsAtCompileTime, MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime }; typedef typename find_best_packet<Scalar,RowsAtCompileTime>::type LhsVecPacketType; typedef typename find_best_packet<Scalar,ColsAtCompileTime>::type RhsVecPacketType; enum { LhsCoeffReadCost = LhsEtorType::CoeffReadCost, RhsCoeffReadCost = RhsEtorType::CoeffReadCost, CoeffReadCost = InnerSize==0 ? NumTraits<Scalar>::ReadCost : InnerSize == Dynamic ? HugeCost : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost) + (InnerSize - 1) * NumTraits<Scalar>::AddCost, Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT, LhsFlags = LhsEtorType::Flags, RhsFlags = RhsEtorType::Flags, LhsRowMajor = LhsFlags & RowMajorBit, RhsRowMajor = RhsFlags & RowMajorBit, LhsVecPacketSize = unpacket_traits<LhsVecPacketType>::size, RhsVecPacketSize = unpacket_traits<RhsVecPacketType>::size, // Here, we don't care about alignment larger than the usable packet size. LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))), RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))), SameType = is_same<typename LhsNestedCleaned::Scalar,typename RhsNestedCleaned::Scalar>::value, CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1), CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) && (RowsAtCompileTime!=1), EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : (bool(RhsRowMajor) && !CanVectorizeLhs), Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & ~RowMajorBit) | (EvalToRowMajor ? RowMajorBit : 0) // TODO enable vectorization for mixed types | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0) | (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0), LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)), RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)), Alignment = bool(CanVectorizeLhs) ? (LhsOuterStrideBytes<=0 || (int(LhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,LhsAlignment))!=0 ? 0 : LhsAlignment) : bool(CanVectorizeRhs) ? (RhsOuterStrideBytes<=0 || (int(RhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,RhsAlignment))!=0 ? 0 : RhsAlignment) : 0, /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI. */ CanVectorizeInner = SameType && LhsRowMajor && (!RhsRowMajor) && (LhsFlags & RhsFlags & ActualPacketAccessBit) && (InnerSize % packet_traits<Scalar>::size == 0) }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const { return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum(); } /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, * which is why we don't set the LinearAccessBit. * TODO: this seems possible when the result is a vector */ EIGEN_DEVICE_FUNC const CoeffReturnType coeff(Index index) const { const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index; const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0; return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum(); } template<int LoadMode, typename PacketType> const PacketType packet(Index row, Index col) const { PacketType res; typedef etor_product_packet_impl<bool(int(Flags)&RowMajorBit) ? RowMajor : ColMajor, Unroll ? int(InnerSize) : Dynamic, LhsEtorType, RhsEtorType, PacketType, LoadMode> PacketImpl; PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res); return res; } template<int LoadMode, typename PacketType> const PacketType packet(Index index) const { const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index; const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0; return packet<LoadMode,PacketType>(row,col); } protected: typename internal::add_const_on_value_type<LhsNested>::type m_lhs; typename internal::add_const_on_value_type<RhsNested>::type m_rhs; LhsEtorType m_lhsImpl; RhsEtorType m_rhsImpl; // TODO: Get rid of m_innerDim if known at compile time Index m_innerDim; }; template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, LazyCoeffBasedProductMode, DenseShape, DenseShape> : product_evaluator<Product<Lhs, Rhs, LazyProduct>, CoeffBasedProductMode, DenseShape, DenseShape> { typedef Product<Lhs, Rhs, DefaultProduct> XprType; typedef Product<Lhs, Rhs, LazyProduct> BaseProduct; typedef product_evaluator<BaseProduct, CoeffBasedProductMode, DenseShape, DenseShape> Base; enum { Flags = Base::Flags | EvalBeforeNestingBit }; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(BaseProduct(xpr.lhs(),xpr.rhs())) {} }; /**************************************** *** Coeff based product, Packet path *** ****************************************/ template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res); res = pmadd(pset1<Packet>(lhs.coeff(row, Index(UnrollingIndex-1))), rhs.template packet<LoadMode,Packet>(Index(UnrollingIndex-1), col), res); } }; template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res); res = pmadd(lhs.template packet<LoadMode,Packet>(row, Index(UnrollingIndex-1)), pset1<Packet>(rhs.coeff(Index(UnrollingIndex-1), col)), res); } }; template<typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(pset1<Packet>(lhs.coeff(row, Index(0))),rhs.template packet<LoadMode,Packet>(Index(0), col)); } }; template<typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(lhs.template packet<LoadMode,Packet>(row, Index(0)), pset1<Packet>(rhs.coeff(Index(0), col))); } }; template<typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) { res = pset1<Packet>(typename unpacket_traits<Packet>::type(0)); } }; template<typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) { res = pset1<Packet>(typename unpacket_traits<Packet>::type(0)); } }; template<typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { res = pset1<Packet>(typename unpacket_traits<Packet>::type(0)); for(Index i = 0; i < innerDim; ++i) res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode,Packet>(i, col), res); } }; template<typename Lhs, typename Rhs, typename Packet, int LoadMode> struct etor_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode> { static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { res = pset1<Packet>(typename unpacket_traits<Packet>::type(0)); for(Index i = 0; i < innerDim; ++i) res = pmadd(lhs.template packet<LoadMode,Packet>(row, i), pset1<Packet>(rhs.coeff(i, col)), res); } }; /*************************************************************************** * Triangular products ***************************************************************************/ template<int Mode, bool LhsIsTriangular, typename Lhs, bool LhsIsVector, typename Rhs, bool RhsIsVector> struct triangular_product_impl; template<typename Lhs, typename Rhs, int ProductTag> struct generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { triangular_product_impl<Lhs::Mode,true,typename Lhs::MatrixType,false,Rhs, Rhs::ColsAtCompileTime==1> ::run(dst, lhs.nestedExpression(), rhs, alpha); } }; template<typename Lhs, typename Rhs, int ProductTag> struct generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { triangular_product_impl<Rhs::Mode,false,Lhs,Lhs::RowsAtCompileTime==1, typename Rhs::MatrixType, false>::run(dst, lhs, rhs.nestedExpression(), alpha); } }; /*************************************************************************** * SelfAdjoint products ***************************************************************************/ template <typename Lhs, int LhsMode, bool LhsIsVector, typename Rhs, int RhsMode, bool RhsIsVector> struct selfadjoint_product_impl; template<typename Lhs, typename Rhs, int ProductTag> struct generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { selfadjoint_product_impl<typename Lhs::MatrixType,Lhs::Mode,false,Rhs,0,Rhs::IsVectorAtCompileTime>::run(dst, lhs.nestedExpression(), rhs, alpha); } }; template<typename Lhs, typename Rhs, int ProductTag> struct generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { selfadjoint_product_impl<Lhs,0,Lhs::IsVectorAtCompileTime,typename Rhs::MatrixType,Rhs::Mode,false>::run(dst, lhs, rhs.nestedExpression(), alpha); } }; /*************************************************************************** * Diagonal products ***************************************************************************/ template<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder> struct diagonal_product_evaluator_base : evaluator_base<Derived> { typedef typename ScalarBinaryOpTraits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar; public: enum { CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost, MatrixFlags = evaluator<MatrixType>::Flags, DiagFlags = evaluator<DiagonalType>::Flags, _StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor, _ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft) ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)), _SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value, // FIXME currently we need same types, but in the future the next rule should be the one //_Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagFlags)&PacketAccessBit))), _Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagFlags)&PacketAccessBit))), _LinearAccessMask = (MatrixType::RowsAtCompileTime==1 || MatrixType::ColsAtCompileTime==1) ? LinearAccessBit : 0, Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixFlags)) | (_Vectorizable ? PacketAccessBit : 0), Alignment = evaluator<MatrixType>::Alignment, AsScalarProduct = (DiagonalType::SizeAtCompileTime==1) || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft) || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight) }; diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag) : m_diagImpl(diag), m_matImpl(mat) { EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::MulCost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const { if(AsScalarProduct) return m_diagImpl.coeff(0) * m_matImpl.coeff(idx); else return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx); } protected: template<int LoadMode,typename PacketType> EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const { return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col), internal::pset1<PacketType>(m_diagImpl.coeff(id))); } template<int LoadMode,typename PacketType> EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const { enum { InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, DiagonalPacketLoadMode = EIGEN_PLAIN_ENUM_MIN(LoadMode,((InnerSize%16) == 0) ? int(Aligned16) : int(evaluator<DiagonalType>::Alignment)) // FIXME hardcoded 16!! }; return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col), m_diagImpl.template packet<DiagonalPacketLoadMode,PacketType>(id)); } evaluator<DiagonalType> m_diagImpl; evaluator<MatrixType> m_matImpl; }; // diagonal * dense template<typename Lhs, typename Rhs, int ProductKind, int ProductTag> struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalShape, DenseShape> : diagonal_product_evaluator_base<Rhs, typename Lhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheLeft> { typedef diagonal_product_evaluator_base<Rhs, typename Lhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheLeft> Base; using Base::m_diagImpl; using Base::m_matImpl; using Base::coeff; typedef typename Base::Scalar Scalar; typedef Product<Lhs, Rhs, ProductKind> XprType; typedef typename XprType::PlainObject PlainObject; enum { StorageOrder = int(Rhs::Flags) & RowMajorBit ? RowMajor : ColMajor }; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col); } #ifndef __CUDACC__ template<int LoadMode,typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { // FIXME: NVCC used to complain about the template keyword, but we have to check whether this is still the case. // See also similar calls below. return this->template packet_impl<LoadMode,PacketType>(row,col, row, typename internal::conditional<int(StorageOrder)==RowMajor, internal::true_type, internal::false_type>::type()); } template<int LoadMode,typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index idx) const { return packet<LoadMode,PacketType>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); } #endif }; // dense * diagonal template<typename Lhs, typename Rhs, int ProductKind, int ProductTag> struct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape, DiagonalShape> : diagonal_product_evaluator_base<Lhs, typename Rhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheRight> { typedef diagonal_product_evaluator_base<Lhs, typename Rhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheRight> Base; using Base::m_diagImpl; using Base::m_matImpl; using Base::coeff; typedef typename Base::Scalar Scalar; typedef Product<Lhs, Rhs, ProductKind> XprType; typedef typename XprType::PlainObject PlainObject; enum { StorageOrder = int(Lhs::Flags) & RowMajorBit ? RowMajor : ColMajor }; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal()) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col); } #ifndef __CUDACC__ template<int LoadMode,typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return this->template packet_impl<LoadMode,PacketType>(row,col, col, typename internal::conditional<int(StorageOrder)==ColMajor, internal::true_type, internal::false_type>::type()); } template<int LoadMode,typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index idx) const { return packet<LoadMode,PacketType>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx); } #endif }; /*************************************************************************** * Products with permutation matrices ***************************************************************************/ /** \internal * \class permutation_matrix_product * Internal helper class implementing the product between a permutation matrix and a matrix. * This class is specialized for DenseShape below and for SparseShape in SparseCore/SparsePermutation.h */ template<typename ExpressionType, int Side, bool Transposed, typename ExpressionShape> struct permutation_matrix_product; template<typename ExpressionType, int Side, bool Transposed> struct permutation_matrix_product<ExpressionType, Side, Transposed, DenseShape> { typedef typename nested_eval<ExpressionType, 1>::type MatrixType; typedef typename remove_all<MatrixType>::type MatrixTypeCleaned; template<typename Dest, typename PermutationType> static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr) { MatrixType mat(xpr); const Index n = Side==OnTheLeft ? mat.rows() : mat.cols(); // FIXME we need an is_same for expression that is not sensitive to constness. For instance // is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true. //if(is_same<MatrixTypeCleaned,Dest>::value && extract_data(dst) == extract_data(mat)) if(is_same_dense(dst, mat)) { // apply the permutation inplace Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(perm.size()); mask.fill(false); Index r = 0; while(r < perm.size()) { // search for the next seed while(r<perm.size() && mask[r]) r++; if(r>=perm.size()) break; // we got one, let's follow it until we are back to the seed Index k0 = r++; Index kPrev = k0; mask.coeffRef(k0) = true; for(Index k=perm.indices().coeff(k0); k!=k0; k=perm.indices().coeff(k)) { Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k) .swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime> (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev)); mask.coeffRef(k) = true; kPrev = k; } } } else { for(Index i = 0; i < n; ++i) { Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime> (dst, ((Side==OnTheLeft) ^ Transposed) ? perm.indices().coeff(i) : i) = Block<const MatrixTypeCleaned,Side==OnTheLeft ? 1 : MatrixTypeCleaned::RowsAtCompileTime,Side==OnTheRight ? 1 : MatrixTypeCleaned::ColsAtCompileTime> (mat, ((Side==OnTheRight) ^ Transposed) ? perm.indices().coeff(i) : i); } } } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Lhs, Rhs, PermutationShape, MatrixShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { permutation_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs); } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Lhs, Rhs, MatrixShape, PermutationShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { permutation_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs); } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Inverse<Lhs>, Rhs, PermutationShape, MatrixShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Inverse<Lhs>& lhs, const Rhs& rhs) { permutation_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs); } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Lhs, Inverse<Rhs>, MatrixShape, PermutationShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Lhs& lhs, const Inverse<Rhs>& rhs) { permutation_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs); } }; /*************************************************************************** * Products with transpositions matrices ***************************************************************************/ // FIXME could we unify Transpositions and Permutation into a single "shape"?? /** \internal * \class transposition_matrix_product * Internal helper class implementing the product between a permutation matrix and a matrix. */ template<typename ExpressionType, int Side, bool Transposed, typename ExpressionShape> struct transposition_matrix_product { typedef typename nested_eval<ExpressionType, 1>::type MatrixType; typedef typename remove_all<MatrixType>::type MatrixTypeCleaned; template<typename Dest, typename TranspositionType> static inline void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr) { MatrixType mat(xpr); typedef typename TranspositionType::StorageIndex StorageIndex; const Index size = tr.size(); StorageIndex j = 0; if(!is_same_dense(dst,mat)) dst = mat; for(Index k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k) if(Index(j=tr.coeff(k))!=k) { if(Side==OnTheLeft) dst.row(k).swap(dst.row(j)); else if(Side==OnTheRight) dst.col(k).swap(dst.col(j)); } } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Lhs, Rhs, TranspositionsShape, MatrixShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { transposition_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs); } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Lhs, Rhs, MatrixShape, TranspositionsShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { transposition_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs); } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Transpose<Lhs>, Rhs, TranspositionsShape, MatrixShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Transpose<Lhs>& lhs, const Rhs& rhs) { transposition_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs); } }; template<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape> struct generic_product_impl<Lhs, Transpose<Rhs>, MatrixShape, TranspositionsShape, ProductTag> { template<typename Dest> static void evalTo(Dest& dst, const Lhs& lhs, const Transpose<Rhs>& rhs) { transposition_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_PRODUCT_EVALUATORS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/GlobalFunctions.h
.h
10,222
188
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010-2016 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GLOBAL_FUNCTIONS_H #define EIGEN_GLOBAL_FUNCTIONS_H #ifdef EIGEN_PARSED_BY_DOXYGEN #define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \ /** \returns an expression of the coefficient-wise DOC_OP of \a x DOC_DETAILS \sa <a href="group__CoeffwiseMathFunctions.html#cwisetable_##NAME">Math functions</a>, class CwiseUnaryOp */ \ template<typename Derived> \ inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \ NAME(const Eigen::ArrayBase<Derived>& x); #else #define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \ template<typename Derived> \ inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \ (NAME)(const Eigen::ArrayBase<Derived>& x) { \ return Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived>(x.derived()); \ } #endif // EIGEN_PARSED_BY_DOXYGEN #define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \ \ template<typename Derived> \ struct NAME##_retval<ArrayBase<Derived> > \ { \ typedef const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> type; \ }; \ template<typename Derived> \ struct NAME##_impl<ArrayBase<Derived> > \ { \ static inline typename NAME##_retval<ArrayBase<Derived> >::type run(const Eigen::ArrayBase<Derived>& x) \ { \ return typename NAME##_retval<ArrayBase<Derived> >::type(x.derived()); \ } \ }; namespace Eigen { EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op,real part,\sa ArrayBase::real) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op,imaginary part,\sa ArrayBase::imag) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op,complex conjugate,\sa ArrayBase::conjugate) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(inverse,scalar_inverse_op,inverse,\sa ArrayBase::inverse) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op,sine,\sa ArrayBase::sin) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op,cosine,\sa ArrayBase::cos) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op,tangent,\sa ArrayBase::tan) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atan,scalar_atan_op,arc-tangent,\sa ArrayBase::atan) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op,arc-sine,\sa ArrayBase::asin) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op,arc-consine,\sa ArrayBase::acos) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh,scalar_sinh_op,hyperbolic sine,\sa ArrayBase::sinh) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh,scalar_cosh_op,hyperbolic cosine,\sa ArrayBase::cosh) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh,scalar_tanh_op,hyperbolic tangent,\sa ArrayBase::tanh) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma,scalar_lgamma_op,natural logarithm of the gamma function,\sa ArrayBase::lgamma) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma,scalar_digamma_op,derivative of lgamma,\sa ArrayBase::digamma) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf,scalar_erf_op,error function,\sa ArrayBase::erf) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc,scalar_erfc_op,complement error function,\sa ArrayBase::erfc) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op,exponential,\sa ArrayBase::exp) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op,natural logarithm,\sa Eigen::log10 DOXCOMMA ArrayBase::log) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p,scalar_log1p_op,natural logarithm of 1 plus the value,\sa ArrayBase::log1p) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10,scalar_log10_op,base 10 logarithm,\sa Eigen::log DOXCOMMA ArrayBase::log) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op,absolute value,\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2,scalar_abs2_op,squared absolute value,\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg,scalar_arg_op,complex argument,\sa ArrayBase::arg) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op,square root,\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt,scalar_rsqrt_op,reciprocal square root,\sa ArrayBase::rsqrt) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square,scalar_square_op,square (power 2),\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube,scalar_cube_op,cube (power 3),\sa Eigen::pow DOXCOMMA ArrayBase::cube) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round,scalar_round_op,nearest integer,\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(floor,scalar_floor_op,nearest integer not greater than the giben value,\sa Eigen::ceil DOXCOMMA ArrayBase::floor) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ceil,scalar_ceil_op,nearest integer not less than the giben value,\sa Eigen::floor DOXCOMMA ArrayBase::ceil) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isnan,scalar_isnan_op,not-a-number test,\sa Eigen::isinf DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isnan) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isinf,scalar_isinf_op,infinite value test,\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite,scalar_isfinite_op,finite value test,\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite) EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign,scalar_sign_op,sign (or 0),\sa ArrayBase::sign) /** \returns an expression of the coefficient-wise power of \a x to the given constant \a exponent. * * \tparam ScalarExponent is the scalar type of \a exponent. It must be compatible with the scalar type of the given expression (\c Derived::Scalar). * * \sa ArrayBase::pow() * * \relates ArrayBase */ #ifdef EIGEN_PARSED_BY_DOXYGEN template<typename Derived,typename ScalarExponent> inline const CwiseBinaryOp<internal::scalar_pow_op<Derived::Scalar,ScalarExponent>,Derived,Constant<ScalarExponent> > pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent); #else template<typename Derived,typename ScalarExponent> inline typename internal::enable_if< !(internal::is_same<typename Derived::Scalar,ScalarExponent>::value) && EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent), const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,ScalarExponent,pow) >::type pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent) { return x.derived().pow(exponent); } template<typename Derived> inline const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename Derived::Scalar,pow) pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) { return x.derived().pow(exponent); } #endif /** \returns an expression of the coefficient-wise power of \a x to the given array of \a exponents. * * This function computes the coefficient-wise power. * * Example: \include Cwise_array_power_array.cpp * Output: \verbinclude Cwise_array_power_array.out * * \sa ArrayBase::pow() * * \relates ArrayBase */ template<typename Derived,typename ExponentDerived> inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived> pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents) { return Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived>( x.derived(), exponents.derived() ); } /** \returns an expression of the coefficient-wise power of the scalar \a x to the given array of \a exponents. * * This function computes the coefficient-wise power between a scalar and an array of exponents. * * \tparam Scalar is the scalar type of \a x. It must be compatible with the scalar type of the given array expression (\c Derived::Scalar). * * Example: \include Cwise_scalar_power_array.cpp * Output: \verbinclude Cwise_scalar_power_array.out * * \sa ArrayBase::pow() * * \relates ArrayBase */ #ifdef EIGEN_PARSED_BY_DOXYGEN template<typename Scalar,typename Derived> inline const CwiseBinaryOp<internal::scalar_pow_op<Scalar,Derived::Scalar>,Constant<Scalar>,Derived> pow(const Scalar& x,const Eigen::ArrayBase<Derived>& x); #else template<typename Scalar, typename Derived> inline typename internal::enable_if< !(internal::is_same<typename Derived::Scalar,Scalar>::value) && EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar), const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,Derived,pow) >::type pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents) { return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,Derived,pow)( typename internal::plain_constant_type<Derived,Scalar>::type(exponents.rows(), exponents.cols(), x), exponents.derived() ); } template<typename Derived> inline const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename Derived::Scalar,Derived,pow) pow(const typename Derived::Scalar& x, const Eigen::ArrayBase<Derived>& exponents) { return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename Derived::Scalar,Derived,pow)( typename internal::plain_constant_type<Derived,typename Derived::Scalar>::type(exponents.rows(), exponents.cols(), x), exponents.derived() ); } #endif namespace internal { EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op) EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op) EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op) } } // TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...) #endif // EIGEN_GLOBAL_FUNCTIONS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/VectorBlock.h
.h
3,462
97
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_VECTORBLOCK_H #define EIGEN_VECTORBLOCK_H namespace Eigen { namespace internal { template<typename VectorType, int Size> struct traits<VectorBlock<VectorType, Size> > : public traits<Block<VectorType, traits<VectorType>::Flags & RowMajorBit ? 1 : Size, traits<VectorType>::Flags & RowMajorBit ? Size : 1> > { }; } /** \class VectorBlock * \ingroup Core_Module * * \brief Expression of a fixed-size or dynamic-size sub-vector * * \tparam VectorType the type of the object in which we are taking a sub-vector * \tparam Size size of the sub-vector we are taking at compile time (optional) * * This class represents an expression of either a fixed-size or dynamic-size sub-vector. * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and * most of the time this is the only way it is used. * * However, if you want to directly maniputate sub-vector expressions, * for instance if you want to write a function returning such an expression, you * will need to use this class. * * Here is an example illustrating the dynamic case: * \include class_VectorBlock.cpp * Output: \verbinclude class_VectorBlock.out * * \note Even though this expression has dynamic size, in the case where \a VectorType * has fixed size, this expression inherits a fixed maximal size which means that evaluating * it does not cause a dynamic memory allocation. * * Here is an example illustrating the fixed-size case: * \include class_FixedVectorBlock.cpp * Output: \verbinclude class_FixedVectorBlock.out * * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index) */ template<typename VectorType, int Size> class VectorBlock : public Block<VectorType, internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size, internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> { typedef Block<VectorType, internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size, internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> Base; enum { IsColVector = !(internal::traits<VectorType>::Flags & RowMajorBit) }; public: EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock) using Base::operator=; /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline VectorBlock(VectorType& vector, Index start, Index size) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start, IsColVector ? size : 1, IsColVector ? 1 : size) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); } /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline VectorBlock(VectorType& vector, Index start) : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock); } }; } // end namespace Eigen #endif // EIGEN_VECTORBLOCK_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/NestByValue.h
.h
3,400
111
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NESTBYVALUE_H #define EIGEN_NESTBYVALUE_H namespace Eigen { namespace internal { template<typename ExpressionType> struct traits<NestByValue<ExpressionType> > : public traits<ExpressionType> {}; } /** \class NestByValue * \ingroup Core_Module * * \brief Expression which must be nested by value * * \tparam ExpressionType the type of the object of which we are requiring nesting-by-value * * This class is the return type of MatrixBase::nestByValue() * and most of the time this is the only way it is used. * * \sa MatrixBase::nestByValue() */ template<typename ExpressionType> class NestByValue : public internal::dense_xpr_base< NestByValue<ExpressionType> >::type { public: typedef typename internal::dense_xpr_base<NestByValue>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue) EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template<int LoadMode> inline const PacketScalar packet(Index row, Index col) const { return m_expression.template packet<LoadMode>(row, col); } template<int LoadMode> inline void writePacket(Index row, Index col, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x); } template<int LoadMode> inline const PacketScalar packet(Index index) const { return m_expression.template packet<LoadMode>(index); } template<int LoadMode> inline void writePacket(Index index, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket<LoadMode>(index, x); } EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } protected: const ExpressionType m_expression; }; /** \returns an expression of the temporary version of *this. */ template<typename Derived> inline const NestByValue<Derived> DenseBase<Derived>::nestByValue() const { return NestByValue<Derived>(derived()); } } // end namespace Eigen #endif // EIGEN_NESTBYVALUE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/NoAlias.h
.h
3,582
109
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NOALIAS_H #define EIGEN_NOALIAS_H namespace Eigen { /** \class NoAlias * \ingroup Core_Module * * \brief Pseudo expression providing an operator = assuming no aliasing * * \tparam ExpressionType the type of the object on which to do the lazy assignment * * This class represents an expression with special assignment operators * assuming no aliasing between the target expression and the source expression. * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression. * It is the return type of MatrixBase::noalias() * and most of the time this is the only way it is used. * * \sa MatrixBase::noalias() */ template<typename ExpressionType, template <typename> class StorageBase> class NoAlias { public: typedef typename ExpressionType::Scalar Scalar; explicit NoAlias(ExpressionType& expression) : m_expression(expression) {} template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other) { call_assignment_no_alias(m_expression, other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); return m_expression; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other) { call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); return m_expression; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other) { call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); return m_expression; } EIGEN_DEVICE_FUNC ExpressionType& expression() const { return m_expression; } protected: ExpressionType& m_expression; }; /** \returns a pseudo expression of \c *this with an operator= assuming * no aliasing between \c *this and the source expression. * * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag. * Currently, even though several expressions may alias, only product * expressions have this flag. Therefore, noalias() is only usefull when * the source expression contains a matrix product. * * Here are some examples where noalias is usefull: * \code * D.noalias() = A * B; * D.noalias() += A.transpose() * B; * D.noalias() -= 2 * A * B.adjoint(); * \endcode * * On the other hand the following example will lead to a \b wrong result: * \code * A.noalias() = A * B; * \endcode * because the result matrix A is also an operand of the matrix product. Therefore, * there is no alternative than evaluating A * B in a temporary, that is the default * behavior when you write: * \code * A = A * B; * \endcode * * \sa class NoAlias */ template<typename Derived> NoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias() { return NoAlias<Derived, Eigen::MatrixBase >(derived()); } } // end namespace Eigen #endif // EIGEN_NOALIAS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/NumTraits.h
.h
9,234
249
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_NUMTRAITS_H #define EIGEN_NUMTRAITS_H namespace Eigen { namespace internal { // default implementation of digits10(), based on numeric_limits if specialized, // 0 for integer types, and log10(epsilon()) otherwise. template< typename T, bool use_numeric_limits = std::numeric_limits<T>::is_specialized, bool is_integer = NumTraits<T>::IsInteger> struct default_digits10_impl { static int run() { return std::numeric_limits<T>::digits10; } }; template<typename T> struct default_digits10_impl<T,false,false> // Floating point { static int run() { using std::log10; using std::ceil; typedef typename NumTraits<T>::Real Real; return int(ceil(-log10(NumTraits<Real>::epsilon()))); } }; template<typename T> struct default_digits10_impl<T,false,true> // Integer { static int run() { return 0; } }; } // end namespace internal /** \class NumTraits * \ingroup Core_Module * * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen. * * \tparam T the numeric type at hand * * This class stores enums, typedefs and static methods giving information about a numeric type. * * The provided data consists of: * \li A typedef \c Real, giving the "real part" type of \a T. If \a T is already real, * then \c Real is just a typedef to \a T. If \a T is \c std::complex<U> then \c Real * is a typedef to \a U. * \li A typedef \c NonInteger, giving the type that should be used for operations producing non-integral values, * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is * only intended as a helper for code that needs to explicitly promote types. * \li A typedef \c Literal giving the type to use for numeric literals such as "2" or "0.5". For instance, for \c std::complex<U>, Literal is defined as \c U. * Of course, this type must be fully compatible with \a T. In doubt, just use \a T here. * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what * this means, just use \a T here. * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex * type, and to 0 otherwise. * \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int, * and to \c 0 otherwise. * \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed * to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers. * Stay vague here. No need to do architecture-specific stuff. * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned. * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must * be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise. * \li An epsilon() function which, unlike <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/epsilon">std::numeric_limits::epsilon()</a>, * it returns a \a Real instead of a \a T. * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default * value by the fuzzy comparison operators. * \li highest() and lowest() functions returning the highest and lowest possible values respectively. * \li digits10() function returning the number of decimal digits that can be represented without change. This is * the analogue of <a href="http://en.cppreference.com/w/cpp/types/numeric_limits/digits10">std::numeric_limits<T>::digits10</a> * which is used as the default implementation if specialized. */ template<typename T> struct GenericNumTraits { enum { IsInteger = std::numeric_limits<T>::is_integer, IsSigned = std::numeric_limits<T>::is_signed, IsComplex = 0, RequireInitialization = internal::is_arithmetic<T>::value ? 0 : 1, ReadCost = 1, AddCost = 1, MulCost = 1 }; typedef T Real; typedef typename internal::conditional< IsInteger, typename internal::conditional<sizeof(T)<=2, float, double>::type, T >::type NonInteger; typedef T Nested; typedef T Literal; EIGEN_DEVICE_FUNC static inline Real epsilon() { return numext::numeric_limits<T>::epsilon(); } EIGEN_DEVICE_FUNC static inline int digits10() { return internal::default_digits10_impl<T>::run(); } EIGEN_DEVICE_FUNC static inline Real dummy_precision() { // make sure to override this for floating-point types return Real(0); } EIGEN_DEVICE_FUNC static inline T highest() { return (numext::numeric_limits<T>::max)(); } EIGEN_DEVICE_FUNC static inline T lowest() { return IsInteger ? (numext::numeric_limits<T>::min)() : (-(numext::numeric_limits<T>::max)()); } EIGEN_DEVICE_FUNC static inline T infinity() { return numext::numeric_limits<T>::infinity(); } EIGEN_DEVICE_FUNC static inline T quiet_NaN() { return numext::numeric_limits<T>::quiet_NaN(); } }; template<typename T> struct NumTraits : GenericNumTraits<T> {}; template<> struct NumTraits<float> : GenericNumTraits<float> { EIGEN_DEVICE_FUNC static inline float dummy_precision() { return 1e-5f; } }; template<> struct NumTraits<double> : GenericNumTraits<double> { EIGEN_DEVICE_FUNC static inline double dummy_precision() { return 1e-12; } }; template<> struct NumTraits<long double> : GenericNumTraits<long double> { static inline long double dummy_precision() { return 1e-15l; } }; template<typename _Real> struct NumTraits<std::complex<_Real> > : GenericNumTraits<std::complex<_Real> > { typedef _Real Real; typedef typename NumTraits<_Real>::Literal Literal; enum { IsComplex = 1, RequireInitialization = NumTraits<_Real>::RequireInitialization, ReadCost = 2 * NumTraits<_Real>::ReadCost, AddCost = 2 * NumTraits<Real>::AddCost, MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost }; EIGEN_DEVICE_FUNC static inline Real epsilon() { return NumTraits<Real>::epsilon(); } EIGEN_DEVICE_FUNC static inline Real dummy_precision() { return NumTraits<Real>::dummy_precision(); } EIGEN_DEVICE_FUNC static inline int digits10() { return NumTraits<Real>::digits10(); } }; template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols> struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > { typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> ArrayType; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Array<RealScalar, Rows, Cols, Options, MaxRows, MaxCols> Real; typedef typename NumTraits<Scalar>::NonInteger NonIntegerScalar; typedef Array<NonIntegerScalar, Rows, Cols, Options, MaxRows, MaxCols> NonInteger; typedef ArrayType & Nested; typedef typename NumTraits<Scalar>::Literal Literal; enum { IsComplex = NumTraits<Scalar>::IsComplex, IsInteger = NumTraits<Scalar>::IsInteger, IsSigned = NumTraits<Scalar>::IsSigned, RequireInitialization = 1, ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::ReadCost, AddCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::AddCost, MulCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::MulCost }; EIGEN_DEVICE_FUNC static inline RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); } EIGEN_DEVICE_FUNC static inline RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); } static inline int digits10() { return NumTraits<Scalar>::digits10(); } }; template<> struct NumTraits<std::string> : GenericNumTraits<std::string> { enum { RequireInitialization = 1, ReadCost = HugeCost, AddCost = HugeCost, MulCost = HugeCost }; static inline int digits10() { return 0; } private: static inline std::string epsilon(); static inline std::string dummy_precision(); static inline std::string lowest(); static inline std::string highest(); static inline std::string infinity(); static inline std::string quiet_NaN(); }; // Empty specialization for void to allow template specialization based on NumTraits<T>::Real with T==void and SFINAE. template<> struct NumTraits<void> {}; } // end namespace Eigen #endif // EIGEN_NUMTRAITS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Reverse.h
.h
7,073
212
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2009 Ricard Marxer <email@ricardmarxer.com> // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REVERSE_H #define EIGEN_REVERSE_H namespace Eigen { namespace internal { template<typename MatrixType, int Direction> struct traits<Reverse<MatrixType, Direction> > : traits<MatrixType> { typedef typename MatrixType::Scalar Scalar; typedef typename traits<MatrixType>::StorageKind StorageKind; typedef typename traits<MatrixType>::XprKind XprKind; typedef typename ref_selector<MatrixType>::type MatrixTypeNested; typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, Flags = _MatrixTypeNested::Flags & (RowMajorBit | LvalueBit) }; }; template<typename PacketType, bool ReversePacket> struct reverse_packet_cond { static inline PacketType run(const PacketType& x) { return preverse(x); } }; template<typename PacketType> struct reverse_packet_cond<PacketType,false> { static inline PacketType run(const PacketType& x) { return x; } }; } // end namespace internal /** \class Reverse * \ingroup Core_Module * * \brief Expression of the reverse of a vector or matrix * * \tparam MatrixType the type of the object of which we are taking the reverse * \tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections * * This class represents an expression of the reverse of a vector. * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse() * and most of the time this is the only way it is used. * * \sa MatrixBase::reverse(), VectorwiseOp::reverse() */ template<typename MatrixType, int Direction> class Reverse : public internal::dense_xpr_base< Reverse<MatrixType, Direction> >::type { public: typedef typename internal::dense_xpr_base<Reverse>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Reverse) typedef typename internal::remove_all<MatrixType>::type NestedExpression; using Base::IsRowMajor; protected: enum { PacketSize = internal::packet_traits<Scalar>::size, IsColMajor = !IsRowMajor, ReverseRow = (Direction == Vertical) || (Direction == BothDirections), ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1, ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || ((Direction == Horizontal) && IsRowMajor) }; typedef internal::reverse_packet_cond<PacketScalar,ReversePacket> reverse_packet; public: EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse) EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return -m_matrix.innerStride(); } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename MatrixType::Nested>::type& nestedExpression() const { return m_matrix; } protected: typename MatrixType::Nested m_matrix; }; /** \returns an expression of the reverse of *this. * * Example: \include MatrixBase_reverse.cpp * Output: \verbinclude MatrixBase_reverse.out * */ template<typename Derived> inline typename DenseBase<Derived>::ReverseReturnType DenseBase<Derived>::reverse() { return ReverseReturnType(derived()); } //reverse const overload moved DenseBase.h due to a CUDA compiler bug /** This is the "in place" version of reverse: it reverses \c *this. * * In most cases it is probably better to simply use the reversed expression * of a matrix. However, when reversing the matrix data itself is really needed, * then this "in-place" version is probably the right choice because it provides * the following additional benefits: * - less error prone: doing the same operation with .reverse() requires special care: * \code m = m.reverse().eval(); \endcode * - this API enables reverse operations without the need for a temporary * - it allows future optimizations (cache friendliness, etc.) * * \sa VectorwiseOp::reverseInPlace(), reverse() */ template<typename Derived> inline void DenseBase<Derived>::reverseInPlace() { if(cols()>rows()) { Index half = cols()/2; leftCols(half).swap(rightCols(half).reverse()); if((cols()%2)==1) { Index half2 = rows()/2; col(half).head(half2).swap(col(half).tail(half2).reverse()); } } else { Index half = rows()/2; topRows(half).swap(bottomRows(half).reverse()); if((rows()%2)==1) { Index half2 = cols()/2; row(half).head(half2).swap(row(half).tail(half2).reverse()); } } } namespace internal { template<int Direction> struct vectorwise_reverse_inplace_impl; template<> struct vectorwise_reverse_inplace_impl<Vertical> { template<typename ExpressionType> static void run(ExpressionType &xpr) { Index half = xpr.rows()/2; xpr.topRows(half).swap(xpr.bottomRows(half).colwise().reverse()); } }; template<> struct vectorwise_reverse_inplace_impl<Horizontal> { template<typename ExpressionType> static void run(ExpressionType &xpr) { Index half = xpr.cols()/2; xpr.leftCols(half).swap(xpr.rightCols(half).rowwise().reverse()); } }; } // end namespace internal /** This is the "in place" version of VectorwiseOp::reverse: it reverses each column or row of \c *this. * * In most cases it is probably better to simply use the reversed expression * of a matrix. However, when reversing the matrix data itself is really needed, * then this "in-place" version is probably the right choice because it provides * the following additional benefits: * - less error prone: doing the same operation with .reverse() requires special care: * \code m = m.reverse().eval(); \endcode * - this API enables reverse operations without the need for a temporary * * \sa DenseBase::reverseInPlace(), reverse() */ template<typename ExpressionType, int Direction> void VectorwiseOp<ExpressionType,Direction>::reverseInPlace() { internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived()); } } // end namespace Eigen #endif // EIGEN_REVERSE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/DenseBase.h
.h
27,551
613
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSEBASE_H #define EIGEN_DENSEBASE_H namespace Eigen { namespace internal { // The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type. // This dummy function simply aims at checking that at compile time. static inline void check_DenseIndex_is_signed() { EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); } } // end namespace internal /** \class DenseBase * \ingroup Core_Module * * \brief Base class for all dense matrices, vectors, and arrays * * This class is the base that is inherited by all dense objects (matrix, vector, arrays, * and related expression types). The common Eigen API for dense objects is contained in this class. * * \tparam Derived is the derived type, e.g., a matrix type or an expression. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. * * \sa \blank \ref TopicClassHierarchy */ template<typename Derived> class DenseBase #ifndef EIGEN_PARSED_BY_DOXYGEN : public DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value> #else : public DenseCoeffsBase<Derived,DirectWriteAccessors> #endif // not EIGEN_PARSED_BY_DOXYGEN { public: /** Inner iterator type to iterate over the coefficients of a row or column. * \sa class InnerIterator */ typedef Eigen::InnerIterator<Derived> InnerIterator; typedef typename internal::traits<Derived>::StorageKind StorageKind; /** * \brief The type used to store indices * \details This typedef is relevant for types that store multiple indices such as * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index * \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. */ typedef typename internal::traits<Derived>::StorageIndex StorageIndex; /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */ typedef typename internal::traits<Derived>::Scalar Scalar; /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. * * It is an alias for the Scalar type */ typedef Scalar value_type; typedef typename NumTraits<Scalar>::Real RealScalar; typedef DenseCoeffsBase<Derived, internal::accessors_level<Derived>::value> Base; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::rowIndexByOuterInner; using Base::colIndexByOuterInner; using Base::coeff; using Base::coeffByOuterInner; using Base::operator(); using Base::operator[]; using Base::x; using Base::y; using Base::z; using Base::w; using Base::stride; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; typedef typename Base::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime, /**< This value is equal to the maximum possible number of rows that this expression * might have. If this expression might have an arbitrarily high number of rows, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime */ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime, /**< This value is equal to the maximum possible number of columns that this expression * might have. If this expression might have an arbitrarily high number of columns, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime */ MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>::ret), /**< This value is equal to the maximum possible number of coefficients that this expression * might have. If this expression might have an arbitrarily high number of coefficients, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime */ IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1 || internal::traits<Derived>::MaxColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ Flags = internal::traits<Derived>::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret, OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret }; typedef typename internal::find_best_packet<Scalar,SizeAtCompileTime>::type PacketScalar; enum { IsPlainObjectBase = 0 }; /** The plain matrix type corresponding to this expression. * \sa PlainObject */ typedef Matrix<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime, AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor), internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime > PlainMatrix; /** The plain array type corresponding to this expression. * \sa PlainObject */ typedef Array<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime, AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor), internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime > PlainArray; /** \brief The plain matrix or array type corresponding to this expression. * * This is not necessarily exactly the return type of eval(). In the case of plain matrices, * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed * that the return type of eval() is either PlainObject or const PlainObject&. */ typedef typename internal::conditional<internal::is_same<typename internal::traits<Derived>::XprKind,MatrixXpr >::value, PlainMatrix, PlainArray>::type PlainObject; /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ EIGEN_DEVICE_FUNC inline Index nonZeros() const { return size(); } /** \returns the outer size. * * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a * column-major matrix, and the number of rows for a row-major matrix. */ EIGEN_DEVICE_FUNC Index outerSize() const { return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols(); } /** \returns the inner size. * * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a * column-major matrix, and the number of columns for a row-major matrix. */ EIGEN_DEVICE_FUNC Index innerSize() const { return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows(); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ EIGEN_DEVICE_FUNC void resize(Index newSize) { EIGEN_ONLY_USED_FOR_DEBUG(newSize); eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize."); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(cols); eigen_assert(rows == this->rows() && cols == this->cols() && "DenseBase::resize() does not actually allow to resize."); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType; /** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */ typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> SequentialLinSpacedReturnType; /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> RandomAccessLinSpacedReturnType; /** \internal the return type of MatrixBase::eigenvalues() */ typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** Copies \a other into *this. \returns a reference to *this. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other); /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator+=(const EigenBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator-=(const EigenBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue<OtherDerived>& func); /** \internal * Copies \a other into *this without evaluating other. \returns a reference to *this. * \deprecated */ template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& lazyAssign(const DenseBase<OtherDerived>& other); EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<< (const Scalar& s); /** \deprecated it now returns \c *this */ template<unsigned int Added,unsigned int Removed> EIGEN_DEPRECATED const Derived& flagged() const { return derived(); } template<typename OtherDerived> EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other); typedef Transpose<Derived> TransposeReturnType; EIGEN_DEVICE_FUNC TransposeReturnType transpose(); typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType; EIGEN_DEVICE_FUNC ConstTransposeReturnType transpose() const; EIGEN_DEVICE_FUNC void transposeInPlace(); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index size, const Scalar& value); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(const Scalar& value); EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType LinSpaced(Sequential_t, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(const Scalar& low, const Scalar& high); template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index size, const CustomNullaryOp& func); template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(const CustomNullaryOp& func); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(); EIGEN_DEVICE_FUNC void fill(const Scalar& value); EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value); EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setZero(); EIGEN_DEVICE_FUNC Derived& setOnes(); EIGEN_DEVICE_FUNC Derived& setRandom(); template<typename OtherDerived> EIGEN_DEVICE_FUNC bool isApprox(const DenseBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const RealScalar& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; template<typename OtherDerived> EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const DenseBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; inline bool hasNaN() const; inline bool allFinite() const; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const Scalar& other); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const Scalar& other); typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType; /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. * * \warning Be carefull with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvalReturnType eval() const { // Even though MSVC does not honor strong inlining when the return type // is a dynamic matrix, we desperately need strong inlining for fixed // size types on MSVC. return typename internal::eval<Derived>::type(derived()); } /** swaps *this with the expression \a other. * */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void swap(const DenseBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); eigen_assert(rows()==other.rows() && cols()==other.cols()); call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>()); } /** swaps *this with the matrix or array \a other. * */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void swap(PlainObjectBase<OtherDerived>& other) { eigen_assert(rows()==other.rows() && cols()==other.cols()); call_assignment(derived(), other.derived(), internal::swap_assign_op<Scalar>()); } EIGEN_DEVICE_FUNC inline const NestByValue<Derived> nestByValue() const; EIGEN_DEVICE_FUNC inline const ForceAlignedAccess<Derived> forceAlignedAccess() const; EIGEN_DEVICE_FUNC inline ForceAlignedAccess<Derived> forceAlignedAccess(); template<bool Enable> EIGEN_DEVICE_FUNC inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const; template<bool Enable> EIGEN_DEVICE_FUNC inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf(); EIGEN_DEVICE_FUNC Scalar sum() const; EIGEN_DEVICE_FUNC Scalar mean() const; EIGEN_DEVICE_FUNC Scalar trace() const; EIGEN_DEVICE_FUNC Scalar prod() const; EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff() const; EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff() const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const; template<typename BinaryOp> EIGEN_DEVICE_FUNC Scalar redux(const BinaryOp& func) const; template<typename Visitor> EIGEN_DEVICE_FUNC void visit(Visitor& func) const; /** \returns a WithFormat proxy object allowing to print a matrix the with given * format \a fmt. * * See class IOFormat for some examples. * * \sa class IOFormat, class WithFormat */ inline const WithFormat<Derived> format(const IOFormat& fmt) const { return WithFormat<Derived>(derived(), fmt); } /** \returns the unique coefficient of a 1x1 expression */ EIGEN_DEVICE_FUNC CoeffReturnType value() const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeff(0,0); } EIGEN_DEVICE_FUNC bool all() const; EIGEN_DEVICE_FUNC bool any() const; EIGEN_DEVICE_FUNC Index count() const; typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType; typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType; typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType; typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType; /** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations * * Example: \include MatrixBase_rowwise.cpp * Output: \verbinclude MatrixBase_rowwise.out * * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const { return ConstRowwiseReturnType(derived()); } EIGEN_DEVICE_FUNC RowwiseReturnType rowwise(); /** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations * * Example: \include MatrixBase_colwise.cpp * Output: \verbinclude MatrixBase_colwise.out * * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const { return ConstColwiseReturnType(derived()); } EIGEN_DEVICE_FUNC ColwiseReturnType colwise(); typedef CwiseNullaryOp<internal::scalar_random_op<Scalar>,PlainObject> RandomReturnType; static const RandomReturnType Random(Index rows, Index cols); static const RandomReturnType Random(Index size); static const RandomReturnType Random(); template<typename ThenDerived,typename ElseDerived> const Select<Derived,ThenDerived,ElseDerived> select(const DenseBase<ThenDerived>& thenMatrix, const DenseBase<ElseDerived>& elseMatrix) const; template<typename ThenDerived> inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType> select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const; template<typename ElseDerived> inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived > select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const; template<int p> RealScalar lpNorm() const; template<int RowFactor, int ColFactor> EIGEN_DEVICE_FUNC const Replicate<Derived,RowFactor,ColFactor> replicate() const; /** * \return an expression of the replication of \c *this * * Example: \include MatrixBase_replicate_int_int.cpp * Output: \verbinclude MatrixBase_replicate_int_int.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC const Replicate<Derived, Dynamic, Dynamic> replicate(Index rowFactor, Index colFactor) const { return Replicate<Derived, Dynamic, Dynamic>(derived(), rowFactor, colFactor); } typedef Reverse<Derived, BothDirections> ReverseReturnType; typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType; EIGEN_DEVICE_FUNC ReverseReturnType reverse(); /** This is the const version of reverse(). */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const { return ConstReverseReturnType(derived()); } EIGEN_DEVICE_FUNC void reverseInPlace(); #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase #define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL #define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) # include "../plugins/BlockMethods.h" # ifdef EIGEN_DENSEBASE_PLUGIN # include EIGEN_DENSEBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL #undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF // disable the use of evalTo for dense objects with a nice compilation error template<typename Dest> EIGEN_DEVICE_FUNC inline void evalTo(Dest& ) const { EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); } protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(DenseBase) /** Default constructor. Do nothing. */ EIGEN_DEVICE_FUNC DenseBase() { /* Just checks for self-consistency of the flags. * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down */ #ifdef EIGEN_INTERNAL_DEBUGGING EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))), INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) #endif } private: EIGEN_DEVICE_FUNC explicit DenseBase(int); EIGEN_DEVICE_FUNC DenseBase(int,int); template<typename OtherDerived> EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase<OtherDerived>&); }; } // end namespace Eigen #endif // EIGEN_DENSEBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/MapBase.h
.h
11,212
309
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MAPBASE_H #define EIGEN_MAPBASE_H #define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ EIGEN_STATIC_ASSERT((int(internal::evaluator<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) namespace Eigen { /** \ingroup Core_Module * * \brief Base class for dense Map and Block expression with direct access * * This base class provides the const low-level accessors (e.g. coeff, coeffRef) of dense * Map and Block objects with direct access. * Typical users do not have to directly deal with this class. * * This class can be extended by through the macro plugin \c EIGEN_MAPBASE_PLUGIN. * See \link TopicCustomizing_Plugins customizing Eigen \endlink for details. * * The \c Derived class has to provide the following two methods describing the memory layout: * \code Index innerStride() const; \endcode * \code Index outerStride() const; \endcode * * \sa class Map, class Block */ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors> : public internal::dense_xpr_base<Derived>::type { public: typedef typename internal::dense_xpr_base<Derived>::type Base; enum { RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime, ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime, InnerStrideAtCompileTime = internal::traits<Derived>::InnerStrideAtCompileTime, SizeAtCompileTime = Base::SizeAtCompileTime }; typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename internal::conditional< bool(internal::is_lvalue<Derived>::value), Scalar *, const Scalar *>::type PointerType; using Base::derived; // using Base::RowsAtCompileTime; // using Base::ColsAtCompileTime; // using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::IsRowMajor; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::eval; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; // bug 217 - compile error on ICC 11.1 using Base::operator=; typedef typename Base::CoeffReturnType CoeffReturnType; /** \copydoc DenseBase::rows() */ EIGEN_DEVICE_FUNC inline Index rows() const { return m_rows.value(); } /** \copydoc DenseBase::cols() */ EIGEN_DEVICE_FUNC inline Index cols() const { return m_cols.value(); } /** Returns a pointer to the first coefficient of the matrix or vector. * * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride(). * * \sa innerStride(), outerStride() */ EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_data; } /** \copydoc PlainObjectBase::coeff(Index,Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index rowId, Index colId) const { return m_data[colId * colStride() + rowId * rowStride()]; } /** \copydoc PlainObjectBase::coeff(Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index index) const { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return m_data[index * innerStride()]; } /** \copydoc PlainObjectBase::coeffRef(Index,Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return this->m_data[colId * colStride() + rowId * rowStride()]; } /** \copydoc PlainObjectBase::coeffRef(Index) const */ EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return this->m_data[index * innerStride()]; } /** \internal */ template<int LoadMode> inline PacketScalar packet(Index rowId, Index colId) const { return internal::ploadt<PacketScalar, LoadMode> (m_data + (colId * colStride() + rowId * rowStride())); } /** \internal */ template<int LoadMode> inline PacketScalar packet(Index index) const { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride()); } /** \internal Constructor for fixed size matrices or vectors */ EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) checkSanity<Derived>(); } /** \internal Constructor for dynamically sized vectors */ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : m_data(dataPtr), m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)), m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime)) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) eigen_assert(vecSize >= 0); eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize); checkSanity<Derived>(); } /** \internal Constructor for dynamically sized matrices */ EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : m_data(dataPtr), m_rows(rows), m_cols(cols) { eigen_assert( (dataPtr == 0) || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); checkSanity<Derived>(); } #ifdef EIGEN_MAPBASE_PLUGIN #include EIGEN_MAPBASE_PLUGIN #endif protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) template<typename T> EIGEN_DEVICE_FUNC void checkSanity(typename internal::enable_if<(internal::traits<T>::Alignment>0),void*>::type = 0) const { #if EIGEN_MAX_ALIGN_BYTES>0 // innerStride() is not set yet when this function is called, so we optimistically assume the lowest plausible value: const Index minInnerStride = InnerStrideAtCompileTime == Dynamic ? 1 : Index(InnerStrideAtCompileTime); EIGEN_ONLY_USED_FOR_DEBUG(minInnerStride); eigen_assert(( ((internal::UIntPtr(m_data) % internal::traits<Derived>::Alignment) == 0) || (cols() * rows() * minInnerStride * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && "data is not aligned"); #endif } template<typename T> EIGEN_DEVICE_FUNC void checkSanity(typename internal::enable_if<internal::traits<T>::Alignment==0,void*>::type = 0) const {} PointerType m_data; const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows; const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols; }; /** \ingroup Core_Module * * \brief Base class for non-const dense Map and Block expression with direct access * * This base class provides the non-const low-level accessors (e.g. coeff and coeffRef) of * dense Map and Block objects with direct access. * It inherits MapBase<Derived, ReadOnlyAccessors> which defines the const variant for reading specific entries. * * \sa class Map, class Block */ template<typename Derived> class MapBase<Derived, WriteAccessors> : public MapBase<Derived, ReadOnlyAccessors> { typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase; public: typedef MapBase<Derived, ReadOnlyAccessors> Base; typedef typename Base::Scalar Scalar; typedef typename Base::PacketScalar PacketScalar; typedef typename Base::StorageIndex StorageIndex; typedef typename Base::PointerType PointerType; using Base::derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; typedef typename internal::conditional< internal::is_lvalue<Derived>::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; EIGEN_DEVICE_FUNC inline const Scalar* data() const { return this->m_data; } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) { return this->m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue& coeffRef(Index index) { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) return this->m_data[index * innerStride()]; } template<int StoreMode> inline void writePacket(Index row, Index col, const PacketScalar& val) { internal::pstoret<Scalar, PacketScalar, StoreMode> (this->m_data + (col * colStride() + row * rowStride()), val); } template<int StoreMode> inline void writePacket(Index index, const PacketScalar& val) { EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) internal::pstoret<Scalar, PacketScalar, StoreMode> (this->m_data + index * innerStride(), val); } EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {} EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {} EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : Base(dataPtr, rows, cols) {} EIGEN_DEVICE_FUNC Derived& operator=(const MapBase& other) { ReadOnlyMapBase::Base::operator=(other); return derived(); } // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base, // see bugs 821 and 920. using ReadOnlyMapBase::Base::operator=; protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(MapBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MapBase) }; #undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS } // end namespace Eigen #endif // EIGEN_MAPBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Redux.h
.h
17,852
506
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REDUX_H #define EIGEN_REDUX_H namespace Eigen { namespace internal { // TODO // * implement other kind of vectorization // * factorize code /*************************************************************************** * Part 1 : the logic deciding a strategy for vectorization and unrolling ***************************************************************************/ template<typename Func, typename Derived> struct redux_traits { public: typedef typename find_best_packet<typename Derived::Scalar,Derived::SizeAtCompileTime>::type PacketType; enum { PacketSize = unpacket_traits<PacketType>::size, InnerMaxSize = int(Derived::IsRowMajor) ? Derived::MaxColsAtCompileTime : Derived::MaxRowsAtCompileTime }; enum { MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit) && (functor_traits<Func>::PacketAccess), MayLinearVectorize = bool(MightVectorize) && (int(Derived::Flags)&LinearAccessBit), MaySliceVectorize = bool(MightVectorize) && int(InnerMaxSize)>=3*PacketSize }; public: enum { Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal) : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) : int(DefaultTraversal) }; public: enum { Cost = Derived::SizeAtCompileTime == Dynamic ? HugeCost : Derived::SizeAtCompileTime * Derived::CoeffReadCost + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost, UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize)) }; public: enum { Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling }; #ifdef EIGEN_DEBUG_ASSIGN static void debug() { std::cerr << "Xpr: " << typeid(typename Derived::XprType).name() << std::endl; std::cerr.setf(std::ios::hex, std::ios::basefield); EIGEN_DEBUG_VAR(Derived::Flags) std::cerr.unsetf(std::ios::hex); EIGEN_DEBUG_VAR(InnerMaxSize) EIGEN_DEBUG_VAR(PacketSize) EIGEN_DEBUG_VAR(MightVectorize) EIGEN_DEBUG_VAR(MayLinearVectorize) EIGEN_DEBUG_VAR(MaySliceVectorize) EIGEN_DEBUG_VAR(Traversal) EIGEN_DEBUG_VAR(UnrollingLimit) EIGEN_DEBUG_VAR(Unrolling) std::cerr << std::endl; } #endif }; /*************************************************************************** * Part 2 : unrollers ***************************************************************************/ /*** no vectorization ***/ template<typename Func, typename Derived, int Start, int Length> struct redux_novec_unroller { enum { HalfLength = Length/2 }; typedef typename Derived::Scalar Scalar; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func) { return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func), redux_novec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func)); } }; template<typename Func, typename Derived, int Start> struct redux_novec_unroller<Func, Derived, Start, 1> { enum { outer = Start / Derived::InnerSizeAtCompileTime, inner = Start % Derived::InnerSizeAtCompileTime }; typedef typename Derived::Scalar Scalar; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&) { return mat.coeffByOuterInner(outer, inner); } }; // This is actually dead code and will never be called. It is required // to prevent false warnings regarding failed inlining though // for 0 length run() will never be called at all. template<typename Func, typename Derived, int Start> struct redux_novec_unroller<Func, Derived, Start, 0> { typedef typename Derived::Scalar Scalar; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); } }; /*** vectorization ***/ template<typename Func, typename Derived, int Start, int Length> struct redux_vec_unroller { enum { PacketSize = redux_traits<Func, Derived>::PacketSize, HalfLength = Length/2 }; typedef typename Derived::Scalar Scalar; typedef typename redux_traits<Func, Derived>::PacketType PacketScalar; static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func) { return func.packetOp( redux_vec_unroller<Func, Derived, Start, HalfLength>::run(mat,func), redux_vec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func) ); } }; template<typename Func, typename Derived, int Start> struct redux_vec_unroller<Func, Derived, Start, 1> { enum { index = Start * redux_traits<Func, Derived>::PacketSize, outer = index / int(Derived::InnerSizeAtCompileTime), inner = index % int(Derived::InnerSizeAtCompileTime), alignment = Derived::Alignment }; typedef typename Derived::Scalar Scalar; typedef typename redux_traits<Func, Derived>::PacketType PacketScalar; static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&) { return mat.template packetByOuterInner<alignment,PacketScalar>(outer, inner); } }; /*************************************************************************** * Part 3 : implementation of all cases ***************************************************************************/ template<typename Func, typename Derived, int Traversal = redux_traits<Func, Derived>::Traversal, int Unrolling = redux_traits<Func, Derived>::Unrolling > struct redux_impl; template<typename Func, typename Derived> struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling> { typedef typename Derived::Scalar Scalar; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func) { eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); Scalar res; res = mat.coeffByOuterInner(0, 0); for(Index i = 1; i < mat.innerSize(); ++i) res = func(res, mat.coeffByOuterInner(0, i)); for(Index i = 1; i < mat.outerSize(); ++i) for(Index j = 0; j < mat.innerSize(); ++j) res = func(res, mat.coeffByOuterInner(i, j)); return res; } }; template<typename Func, typename Derived> struct redux_impl<Func,Derived, DefaultTraversal, CompleteUnrolling> : public redux_novec_unroller<Func,Derived, 0, Derived::SizeAtCompileTime> {}; template<typename Func, typename Derived> struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling> { typedef typename Derived::Scalar Scalar; typedef typename redux_traits<Func, Derived>::PacketType PacketScalar; static Scalar run(const Derived &mat, const Func& func) { const Index size = mat.size(); const Index packetSize = redux_traits<Func, Derived>::PacketSize; const int packetAlignment = unpacket_traits<PacketScalar>::alignment; enum { alignment0 = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned), alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Derived::Alignment) }; const Index alignedStart = internal::first_default_aligned(mat.nestedExpression()); const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize); const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize); const Index alignedEnd2 = alignedStart + alignedSize2; const Index alignedEnd = alignedStart + alignedSize; Scalar res; if(alignedSize) { PacketScalar packet_res0 = mat.template packet<alignment,PacketScalar>(alignedStart); if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop { PacketScalar packet_res1 = mat.template packet<alignment,PacketScalar>(alignedStart+packetSize); for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize) { packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(index)); packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment,PacketScalar>(index+packetSize)); } packet_res0 = func.packetOp(packet_res0,packet_res1); if(alignedEnd>alignedEnd2) packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(alignedEnd2)); } res = func.predux(packet_res0); for(Index index = 0; index < alignedStart; ++index) res = func(res,mat.coeff(index)); for(Index index = alignedEnd; index < size; ++index) res = func(res,mat.coeff(index)); } else // too small to vectorize anything. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. { res = mat.coeff(0); for(Index index = 1; index < size; ++index) res = func(res,mat.coeff(index)); } return res; } }; // NOTE: for SliceVectorizedTraversal we simply bypass unrolling template<typename Func, typename Derived, int Unrolling> struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling> { typedef typename Derived::Scalar Scalar; typedef typename redux_traits<Func, Derived>::PacketType PacketType; EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func) { eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); const Index innerSize = mat.innerSize(); const Index outerSize = mat.outerSize(); enum { packetSize = redux_traits<Func, Derived>::PacketSize }; const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize; Scalar res; if(packetedInnerSize) { PacketType packet_res = mat.template packet<Unaligned,PacketType>(0,0); for(Index j=0; j<outerSize; ++j) for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize)) packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned,PacketType>(j,i)); res = func.predux(packet_res); for(Index j=0; j<outerSize; ++j) for(Index i=packetedInnerSize; i<innerSize; ++i) res = func(res, mat.coeffByOuterInner(j,i)); } else // too small to vectorize anything. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. { res = redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>::run(mat, func); } return res; } }; template<typename Func, typename Derived> struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling> { typedef typename Derived::Scalar Scalar; typedef typename redux_traits<Func, Derived>::PacketType PacketScalar; enum { PacketSize = redux_traits<Func, Derived>::PacketSize, Size = Derived::SizeAtCompileTime, VectorizedSize = (Size / PacketSize) * PacketSize }; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func) { eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); if (VectorizedSize > 0) { Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func)); if (VectorizedSize != Size) res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func)); return res; } else { return redux_novec_unroller<Func, Derived, 0, Size>::run(mat,func); } } }; // evaluator adaptor template<typename _XprType> class redux_evaluator { public: typedef _XprType XprType; EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::PacketScalar PacketScalar; typedef typename XprType::PacketReturnType PacketReturnType; enum { MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime, MaxColsAtCompileTime = XprType::MaxColsAtCompileTime, // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator Flags = evaluator<XprType>::Flags & ~DirectAccessBit, IsRowMajor = XprType::IsRowMajor, SizeAtCompileTime = XprType::SizeAtCompileTime, InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime, CoeffReadCost = evaluator<XprType>::CoeffReadCost, Alignment = evaluator<XprType>::Alignment }; EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); } EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); } EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); } EIGEN_DEVICE_FUNC Index innerSize() const { return m_xpr.innerSize(); } EIGEN_DEVICE_FUNC Index outerSize() const { return m_xpr.outerSize(); } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const { return m_evaluator.coeff(row, col); } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_evaluator.coeff(index); } template<int LoadMode, typename PacketType> PacketType packet(Index row, Index col) const { return m_evaluator.template packet<LoadMode,PacketType>(row, col); } template<int LoadMode, typename PacketType> PacketType packet(Index index) const { return m_evaluator.template packet<LoadMode,PacketType>(index); } EIGEN_DEVICE_FUNC CoeffReturnType coeffByOuterInner(Index outer, Index inner) const { return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); } template<int LoadMode, typename PacketType> PacketType packetByOuterInner(Index outer, Index inner) const { return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); } const XprType & nestedExpression() const { return m_xpr; } protected: internal::evaluator<XprType> m_evaluator; const XprType &m_xpr; }; } // end namespace internal /*************************************************************************** * Part 4 : public API ***************************************************************************/ /** \returns the result of a full redux operation on the whole matrix or vector using \a func * * The template parameter \a BinaryOp is the type of the functor \a func which must be * an associative operator. Both current C++98 and C++11 functor styles are handled. * * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise() */ template<typename Derived> template<typename Func> EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::redux(const Func& func) const { eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix"); typedef typename internal::redux_evaluator<Derived> ThisEvaluator; ThisEvaluator thisEval(derived()); return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func); } /** \returns the minimum of all coefficients of \c *this. * \warning the result is undefined if \c *this contains NaN. */ template<typename Derived> EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff() const { return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>()); } /** \returns the maximum of all coefficients of \c *this. * \warning the result is undefined if \c *this contains NaN. */ template<typename Derived> EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff() const { return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>()); } /** \returns the sum of all coefficients of \c *this * * If \c *this is empty, then the value 0 is returned. * * \sa trace(), prod(), mean() */ template<typename Derived> EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::sum() const { if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) return Scalar(0); return derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>()); } /** \returns the mean of all coefficients of *this * * \sa trace(), prod(), sum() */ template<typename Derived> EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::mean() const { #ifdef __INTEL_COMPILER #pragma warning push #pragma warning ( disable : 2259 ) #endif return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>())) / Scalar(this->size()); #ifdef __INTEL_COMPILER #pragma warning pop #endif } /** \returns the product of all coefficients of *this * * Example: \include MatrixBase_prod.cpp * Output: \verbinclude MatrixBase_prod.out * * \sa sum(), mean(), trace() */ template<typename Derived> EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar DenseBase<Derived>::prod() const { if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) return Scalar(1); return derived().redux(Eigen::internal::scalar_product_op<Scalar>()); } /** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal. * * \c *this can be any matrix, not necessarily square. * * \sa diagonal(), sum() */ template<typename Derived> EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar MatrixBase<Derived>::trace() const { return derived().diagonal().sum(); } } // end namespace Eigen #endif // EIGEN_REDUX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Visitor.h
.h
8,074
274
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_VISITOR_H #define EIGEN_VISITOR_H namespace Eigen { namespace internal { template<typename Visitor, typename Derived, int UnrollCount> struct visitor_impl { enum { col = (UnrollCount-1) / Derived::RowsAtCompileTime, row = (UnrollCount-1) % Derived::RowsAtCompileTime }; EIGEN_DEVICE_FUNC static inline void run(const Derived &mat, Visitor& visitor) { visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor); visitor(mat.coeff(row, col), row, col); } }; template<typename Visitor, typename Derived> struct visitor_impl<Visitor, Derived, 1> { EIGEN_DEVICE_FUNC static inline void run(const Derived &mat, Visitor& visitor) { return visitor.init(mat.coeff(0, 0), 0, 0); } }; template<typename Visitor, typename Derived> struct visitor_impl<Visitor, Derived, Dynamic> { EIGEN_DEVICE_FUNC static inline void run(const Derived& mat, Visitor& visitor) { visitor.init(mat.coeff(0,0), 0, 0); for(Index i = 1; i < mat.rows(); ++i) visitor(mat.coeff(i, 0), i, 0); for(Index j = 1; j < mat.cols(); ++j) for(Index i = 0; i < mat.rows(); ++i) visitor(mat.coeff(i, j), i, j); } }; // evaluator adaptor template<typename XprType> class visitor_evaluator { public: EIGEN_DEVICE_FUNC explicit visitor_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = XprType::RowsAtCompileTime, CoeffReadCost = internal::evaluator<XprType>::CoeffReadCost }; EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); } EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); } EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const { return m_evaluator.coeff(row, col); } protected: internal::evaluator<XprType> m_evaluator; const XprType &m_xpr; }; } // end namespace internal /** Applies the visitor \a visitor to the whole coefficients of the matrix or vector. * * The template parameter \a Visitor is the type of the visitor and provides the following interface: * \code * struct MyVisitor { * // called for the first coefficient * void init(const Scalar& value, Index i, Index j); * // called for all other coefficients * void operator() (const Scalar& value, Index i, Index j); * }; * \endcode * * \note compared to one or two \em for \em loops, visitors offer automatic * unrolling for small fixed size matrix. * * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux() */ template<typename Derived> template<typename Visitor> EIGEN_DEVICE_FUNC void DenseBase<Derived>::visit(Visitor& visitor) const { typedef typename internal::visitor_evaluator<Derived> ThisEvaluator; ThisEvaluator thisEval(derived()); enum { unroll = SizeAtCompileTime != Dynamic && SizeAtCompileTime * ThisEvaluator::CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost <= EIGEN_UNROLLING_LIMIT }; return internal::visitor_impl<Visitor, ThisEvaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(thisEval, visitor); } namespace internal { /** \internal * \brief Base class to implement min and max visitors */ template <typename Derived> struct coeff_visitor { typedef typename Derived::Scalar Scalar; Index row, col; Scalar res; EIGEN_DEVICE_FUNC inline void init(const Scalar& value, Index i, Index j) { res = value; row = i; col = j; } }; /** \internal * \brief Visitor computing the min coefficient with its value and coordinates * * \sa DenseBase::minCoeff(Index*, Index*) */ template <typename Derived> struct min_coeff_visitor : coeff_visitor<Derived> { typedef typename Derived::Scalar Scalar; EIGEN_DEVICE_FUNC void operator() (const Scalar& value, Index i, Index j) { if(value < this->res) { this->res = value; this->row = i; this->col = j; } } }; template<typename Scalar> struct functor_traits<min_coeff_visitor<Scalar> > { enum { Cost = NumTraits<Scalar>::AddCost }; }; /** \internal * \brief Visitor computing the max coefficient with its value and coordinates * * \sa DenseBase::maxCoeff(Index*, Index*) */ template <typename Derived> struct max_coeff_visitor : coeff_visitor<Derived> { typedef typename Derived::Scalar Scalar; EIGEN_DEVICE_FUNC void operator() (const Scalar& value, Index i, Index j) { if(value > this->res) { this->res = value; this->row = i; this->col = j; } } }; template<typename Scalar> struct functor_traits<max_coeff_visitor<Scalar> > { enum { Cost = NumTraits<Scalar>::AddCost }; }; } // end namespace internal /** \fn DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const * \returns the minimum of all coefficients of *this and puts in *row and *col its location. * \warning the result is undefined if \c *this contains NaN. * * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visit(), DenseBase::minCoeff() */ template<typename Derived> template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const { internal::min_coeff_visitor<Derived> minVisitor; this->visit(minVisitor); *rowId = minVisitor.row; if (colId) *colId = minVisitor.col; return minVisitor.res; } /** \returns the minimum of all coefficients of *this and puts in *index its location. * \warning the result is undefined if \c *this contains NaN. * * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::minCoeff() */ template<typename Derived> template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::minCoeff(IndexType* index) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) internal::min_coeff_visitor<Derived> minVisitor; this->visit(minVisitor); *index = IndexType((RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row); return minVisitor.res; } /** \fn DenseBase<Derived>::maxCoeff(IndexType* rowId, IndexType* colId) const * \returns the maximum of all coefficients of *this and puts in *row and *col its location. * \warning the result is undefined if \c *this contains NaN. * * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::maxCoeff() */ template<typename Derived> template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const { internal::max_coeff_visitor<Derived> maxVisitor; this->visit(maxVisitor); *rowPtr = maxVisitor.row; if (colPtr) *colPtr = maxVisitor.col; return maxVisitor.res; } /** \returns the maximum of all coefficients of *this and puts in *index its location. * \warning the result is undefined if \c *this contains NaN. * * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff() */ template<typename Derived> template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar DenseBase<Derived>::maxCoeff(IndexType* index) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) internal::max_coeff_visitor<Derived> maxVisitor; this->visit(maxVisitor); *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row; return maxVisitor.res; } } // end namespace Eigen #endif // EIGEN_VISITOR_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Swap.h
.h
2,683
68
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SWAP_H #define EIGEN_SWAP_H namespace Eigen { namespace internal { // Overload default assignPacket behavior for swapping them template<typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT> class generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, Specialized> : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn> { protected: typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn> Base; using Base::m_dst; using Base::m_src; using Base::m_functor; public: typedef typename Base::Scalar Scalar; typedef typename Base::DstXprType DstXprType; typedef swap_assign_op<Scalar> Functor; EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr) : Base(dst, src, func, dstExpr) {} template<int StoreMode, int LoadMode, typename PacketType> void assignPacket(Index row, Index col) { PacketType tmp = m_src.template packet<LoadMode,PacketType>(row,col); const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(row,col, m_dst.template packet<StoreMode,PacketType>(row,col)); m_dst.template writePacket<StoreMode>(row,col,tmp); } template<int StoreMode, int LoadMode, typename PacketType> void assignPacket(Index index) { PacketType tmp = m_src.template packet<LoadMode,PacketType>(index); const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(index, m_dst.template packet<StoreMode,PacketType>(index)); m_dst.template writePacket<StoreMode>(index,tmp); } // TODO find a simple way not to have to copy/paste this function from generic_dense_assignment_kernel, by simple I mean no CRTP (Gael) template<int StoreMode, int LoadMode, typename PacketType> void assignPacketByOuterInner(Index outer, Index inner) { Index row = Base::rowIndexByOuterInner(outer, inner); Index col = Base::colIndexByOuterInner(outer, inner); assignPacket<StoreMode,LoadMode,PacketType>(row, col); } }; } // namespace internal } // end namespace Eigen #endif // EIGEN_SWAP_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Ref.h
.h
12,844
285
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REF_H #define EIGEN_REF_H namespace Eigen { namespace internal { template<typename _PlainObjectType, int _Options, typename _StrideType> struct traits<Ref<_PlainObjectType, _Options, _StrideType> > : public traits<Map<_PlainObjectType, _Options, _StrideType> > { typedef _PlainObjectType PlainObjectType; typedef _StrideType StrideType; enum { Options = _Options, Flags = traits<Map<_PlainObjectType, _Options, _StrideType> >::Flags | NestByRefBit, Alignment = traits<Map<_PlainObjectType, _Options, _StrideType> >::Alignment }; template<typename Derived> struct match { enum { IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime, HasDirectAccess = internal::has_direct_access<Derived>::ret, StorageOrderMatch = IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic) || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime) || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1), OuterStrideMatch = IsVectorAtCompileTime || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime), // NOTE, this indirection of evaluator<Derived>::Alignment is needed // to workaround a very strange bug in MSVC related to the instantiation // of has_*ary_operator in evaluator<CwiseNullaryOp>. // This line is surprisingly very sensitive. For instance, simply adding parenthesis // as "DerivedAlignment = (int(evaluator<Derived>::Alignment))," will make MSVC fail... DerivedAlignment = int(evaluator<Derived>::Alignment), AlignmentMatch = (int(traits<PlainObjectType>::Alignment)==int(Unaligned)) || (DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should be replaced by the required alignment ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value, MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch }; typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type; }; }; template<typename Derived> struct traits<RefBase<Derived> > : public traits<Derived> {}; } template<typename Derived> class RefBase : public MapBase<Derived> { typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType; typedef typename internal::traits<Derived>::StrideType StrideType; public: typedef MapBase<Derived> Base; EIGEN_DENSE_PUBLIC_INTERFACE(RefBase) EIGEN_DEVICE_FUNC inline Index innerStride() const { return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; } EIGEN_DEVICE_FUNC inline Index outerStride() const { return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() : IsVectorAtCompileTime ? this->size() : int(Flags)&RowMajorBit ? this->cols() : this->rows(); } EIGEN_DEVICE_FUNC RefBase() : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime), // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values: m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime, StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase) protected: typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase; template<typename Expression> EIGEN_DEVICE_FUNC void construct(Expression& expr) { EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(PlainObjectType,Expression); if(PlainObjectType::RowsAtCompileTime==1) { eigen_assert(expr.rows()==1 || expr.cols()==1); ::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size()); } else if(PlainObjectType::ColsAtCompileTime==1) { eigen_assert(expr.rows()==1 || expr.cols()==1); ::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1); } else ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols()); if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit))) ::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1); else ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(), StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride()); } StrideBase m_stride; }; /** \class Ref * \ingroup Core_Module * * \brief A matrix or vector expression mapping an existing expression * * \tparam PlainObjectType the equivalent matrix type of the mapped data * \tparam Options specifies the pointer alignment in bytes. It can be: \c #Aligned128, , \c #Aligned64, \c #Aligned32, \c #Aligned16, \c #Aligned8 or \c #Unaligned. * The default is \c #Unaligned. * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1), * but accepts a variable outer stride (leading dimension). * This can be overridden by specifying strides. * The type passed here must be a specialization of the Stride template, see examples below. * * This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the number of copies. * A Ref<> object can represent either a const expression or a l-value: * \code * // in-out argument: * void foo1(Ref<VectorXf> x); * * // read-only const argument: * void foo2(const Ref<const VectorXf>& x); * \endcode * * In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered. * By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout. * Likewise, a Ref<MatrixXf> can reference any column-major dense matrix expression of float whose column's elements are contiguously stored with * the possibility to have a constant space in-between each column, i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension) * can be greater than the number of rows. * * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function. * Here are some examples: * \code * MatrixXf A; * VectorXf a; * foo1(a.head()); // OK * foo1(A.col()); // OK * foo1(A.row()); // Compilation error because here innerstride!=1 * foo2(A.row()); // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object * foo2(A.row().transpose()); // The row is copied into a contiguous temporary * foo2(2*a); // The expression is evaluated into a temporary * foo2(A.col().segment(2,4)); // No temporary * \endcode * * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters. * Here is an example accepting an innerstride!=1: * \code * // in-out argument: * void foo3(Ref<VectorXf,0,InnerStride<> > x); * foo3(A.row()); // OK * \endcode * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involve more * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overload internally calling a * template function, e.g.: * \code * // in the .h: * void foo(const Ref<MatrixXf>& A); * void foo(const Ref<MatrixXf,0,Stride<> >& A); * * // in the .cpp: * template<typename TypeOfA> void foo_impl(const TypeOfA& A) { * ... // crazy code goes here * } * void foo(const Ref<MatrixXf>& A) { foo_impl(A); } * void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); } * \endcode * * * \sa PlainObjectBase::Map(), \ref TopicStorageOrders */ template<typename PlainObjectType, int Options, typename StrideType> class Ref : public RefBase<Ref<PlainObjectType, Options, StrideType> > { private: typedef internal::traits<Ref> Traits; template<typename Derived> EIGEN_DEVICE_FUNC inline Ref(const PlainObjectBase<Derived>& expr, typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0); public: typedef RefBase<Ref> Base; EIGEN_DENSE_PUBLIC_INTERFACE(Ref) #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename Derived> EIGEN_DEVICE_FUNC inline Ref(PlainObjectBase<Derived>& expr, typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0) { EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); Base::construct(expr.derived()); } template<typename Derived> EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr, typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0) #else /** Implicit constructor from any dense expression */ template<typename Derived> inline Ref(DenseBase<Derived>& expr) #endif { EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); Base::construct(expr.const_cast_derived()); } EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref) }; // this is the const ref version template<typename TPlainObjectType, int Options, typename StrideType> class Ref<const TPlainObjectType, Options, StrideType> : public RefBase<Ref<const TPlainObjectType, Options, StrideType> > { typedef internal::traits<Ref> Traits; public: typedef RefBase<Ref> Base; EIGEN_DENSE_PUBLIC_INTERFACE(Ref) template<typename Derived> EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr, typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0) { // std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n"; // std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; // std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n"; construct(expr.derived(), typename Traits::template match<Derived>::type()); } EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) { // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy } template<typename OtherRef> EIGEN_DEVICE_FUNC inline Ref(const RefBase<OtherRef>& other) { construct(other.derived(), typename Traits::template match<OtherRef>::type()); } protected: template<typename Expression> EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type) { Base::construct(expr); } template<typename Expression> EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type) { internal::call_assignment_no_alias(m_object,expr,internal::assign_op<Scalar,Scalar>()); Base::construct(m_object); } protected: TPlainObjectType m_object; }; } // end namespace Eigen #endif // EIGEN_REF_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/MatrixBase.h
.h
23,276
531
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIXBASE_H #define EIGEN_MATRIXBASE_H namespace Eigen { /** \class MatrixBase * \ingroup Core_Module * * \brief Base class for all dense matrices, vectors, and expressions * * This class is the base that is inherited by all matrix, vector, and related expression * types. Most of the Eigen API is contained in this class, and its base classes. Other important * classes for the Eigen API are Matrix, and VectorwiseOp. * * Note that some methods are defined in other modules such as the \ref LU_Module LU module * for all functions related to matrix inversions. * * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc. * * When writing a function taking Eigen objects as argument, if you want your function * to take as argument any matrix, vector, or expression, just let it take a * MatrixBase argument. As an example, here is a function printFirstRow which, given * a matrix, vector, or expression \a x, prints the first row of \a x. * * \code template<typename Derived> void printFirstRow(const Eigen::MatrixBase<Derived>& x) { cout << x.row(0) << endl; } * \endcode * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN. * * \sa \blank \ref TopicClassHierarchy */ template<typename Derived> class MatrixBase : public DenseBase<Derived> { public: #ifndef EIGEN_PARSED_BY_DOXYGEN typedef MatrixBase StorageBaseType; typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::StorageIndex StorageIndex; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef DenseBase<Derived> Base; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::eval; using Base::operator+=; using Base::operator-=; using Base::operator*=; using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType; typedef typename Base::RowXpr RowXpr; typedef typename Base::ColXpr ColXpr; #endif // not EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN /** type of the equivalent square matrix */ typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime), EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** \returns the size of the main diagonal, which is min(rows(),cols()). * \sa rows(), cols(), SizeAtCompileTime. */ EIGEN_DEVICE_FUNC inline Index diagonalSize() const { return (numext::mini)(rows(),cols()); } typedef typename Base::PlainObject PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType; /** \internal the return type of MatrixBase::adjoint() */ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex, CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>, ConstTransposeReturnType >::type AdjointReturnType; /** \internal Return type of eigenvalues() */ typedef Matrix<std::complex<RealScalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType; /** \internal the return type of identity */ typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>,PlainObject> IdentityReturnType; /** \internal the return type of unit vectors */ typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>, internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime> BasisReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase #define EIGEN_DOC_UNARY_ADDONS(X,Y) # include "../plugins/CommonCwiseUnaryOps.h" # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # ifdef EIGEN_MATRIXBASE_PLUGIN # include EIGEN_MATRIXBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_UNARY_ADDONS /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const MatrixBase& other); // We cannot inherit here via Base::operator= since it is causing // trouble with MSVC. template <typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other); template <typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const MatrixBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const MatrixBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC const Product<Derived,OtherDerived> operator*(const MatrixBase<OtherDerived> &other) const; template<typename OtherDerived> EIGEN_DEVICE_FUNC const Product<Derived,OtherDerived,LazyProduct> lazyProduct(const MatrixBase<OtherDerived> &other) const; template<typename OtherDerived> Derived& operator*=(const EigenBase<OtherDerived>& other); template<typename OtherDerived> void applyOnTheLeft(const EigenBase<OtherDerived>& other); template<typename OtherDerived> void applyOnTheRight(const EigenBase<OtherDerived>& other); template<typename DiagonalDerived> EIGEN_DEVICE_FUNC const Product<Derived, DiagonalDerived, LazyProduct> operator*(const DiagonalBase<DiagonalDerived> &diagonal) const; template<typename OtherDerived> EIGEN_DEVICE_FUNC typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType dot(const MatrixBase<OtherDerived>& other) const; EIGEN_DEVICE_FUNC RealScalar squaredNorm() const; EIGEN_DEVICE_FUNC RealScalar norm() const; RealScalar stableNorm() const; RealScalar blueNorm() const; RealScalar hypotNorm() const; EIGEN_DEVICE_FUNC const PlainObject normalized() const; EIGEN_DEVICE_FUNC const PlainObject stableNormalized() const; EIGEN_DEVICE_FUNC void normalize(); EIGEN_DEVICE_FUNC void stableNormalize(); EIGEN_DEVICE_FUNC const AdjointReturnType adjoint() const; EIGEN_DEVICE_FUNC void adjointInPlace(); typedef Diagonal<Derived> DiagonalReturnType; EIGEN_DEVICE_FUNC DiagonalReturnType diagonal(); typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType; EIGEN_DEVICE_FUNC ConstDiagonalReturnType diagonal() const; template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; }; template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; }; template<int Index> EIGEN_DEVICE_FUNC typename DiagonalIndexReturnType<Index>::Type diagonal(); template<int Index> EIGEN_DEVICE_FUNC typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const; typedef Diagonal<Derived,DynamicIndex> DiagonalDynamicIndexReturnType; typedef typename internal::add_const<Diagonal<const Derived,DynamicIndex> >::type ConstDiagonalDynamicIndexReturnType; EIGEN_DEVICE_FUNC DiagonalDynamicIndexReturnType diagonal(Index index); EIGEN_DEVICE_FUNC ConstDiagonalDynamicIndexReturnType diagonal(Index index) const; template<unsigned int Mode> struct TriangularViewReturnType { typedef TriangularView<Derived, Mode> Type; }; template<unsigned int Mode> struct ConstTriangularViewReturnType { typedef const TriangularView<const Derived, Mode> Type; }; template<unsigned int Mode> EIGEN_DEVICE_FUNC typename TriangularViewReturnType<Mode>::Type triangularView(); template<unsigned int Mode> EIGEN_DEVICE_FUNC typename ConstTriangularViewReturnType<Mode>::Type triangularView() const; template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SelfAdjointView<Derived, UpLo> Type; }; template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView<const Derived, UpLo> Type; }; template<unsigned int UpLo> EIGEN_DEVICE_FUNC typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView(); template<unsigned int UpLo> EIGEN_DEVICE_FUNC typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const; const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0), const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(); EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(Index rows, Index cols); EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index size, Index i); EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index i); EIGEN_DEVICE_FUNC static const BasisReturnType UnitX(); EIGEN_DEVICE_FUNC static const BasisReturnType UnitY(); EIGEN_DEVICE_FUNC static const BasisReturnType UnitZ(); EIGEN_DEVICE_FUNC static const BasisReturnType UnitW(); EIGEN_DEVICE_FUNC const DiagonalWrapper<const Derived> asDiagonal() const; const PermutationWrapper<const Derived> asPermutation() const; EIGEN_DEVICE_FUNC Derived& setIdentity(); EIGEN_DEVICE_FUNC Derived& setIdentity(Index rows, Index cols); bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; template<typename OtherDerived> bool isOrthogonal(const MatrixBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; /** \returns true if each coefficients of \c *this and \a other are all exactly equal. * \warning When using floating point scalar values you probably should rather use a * fuzzy comparison such as isApprox() * \sa isApprox(), operator!= */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase<OtherDerived>& other) const { return cwiseEqual(other).all(); } /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other. * \warning When using floating point scalar values you probably should rather use a * fuzzy comparison such as isApprox() * \sa isApprox(), operator== */ template<typename OtherDerived> EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase<OtherDerived>& other) const { return cwiseNotEqual(other).any(); } NoAlias<Derived,Eigen::MatrixBase > noalias(); // TODO forceAlignedAccess is temporarily disabled // Need to find a nicer workaround. inline const Derived& forceAlignedAccess() const { return derived(); } inline Derived& forceAlignedAccess() { return derived(); } template<bool Enable> inline const Derived& forceAlignedAccessIf() const { return derived(); } template<bool Enable> inline Derived& forceAlignedAccessIf() { return derived(); } EIGEN_DEVICE_FUNC Scalar trace() const; template<int p> EIGEN_DEVICE_FUNC RealScalar lpNorm() const; EIGEN_DEVICE_FUNC MatrixBase<Derived>& matrix() { return *this; } EIGEN_DEVICE_FUNC const MatrixBase<Derived>& matrix() const { return *this; } /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix * \sa ArrayBase::matrix() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ArrayWrapper<Derived> array() { return ArrayWrapper<Derived>(derived()); } /** \returns a const \link Eigen::ArrayBase Array \endlink expression of this matrix * \sa ArrayBase::matrix() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArrayWrapper<const Derived> array() const { return ArrayWrapper<const Derived>(derived()); } /////////// LU module /////////// inline const FullPivLU<PlainObject> fullPivLu() const; inline const PartialPivLU<PlainObject> partialPivLu() const; inline const PartialPivLU<PlainObject> lu() const; inline const Inverse<Derived> inverse() const; template<typename ResultType> inline void computeInverseAndDetWithCheck( ResultType& inverse, typename ResultType::Scalar& determinant, bool& invertible, const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision() ) const; template<typename ResultType> inline void computeInverseWithCheck( ResultType& inverse, bool& invertible, const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision() ) const; Scalar determinant() const; /////////// Cholesky module /////////// inline const LLT<PlainObject> llt() const; inline const LDLT<PlainObject> ldlt() const; /////////// QR module /////////// inline const HouseholderQR<PlainObject> householderQr() const; inline const ColPivHouseholderQR<PlainObject> colPivHouseholderQr() const; inline const FullPivHouseholderQR<PlainObject> fullPivHouseholderQr() const; inline const CompleteOrthogonalDecomposition<PlainObject> completeOrthogonalDecomposition() const; /////////// Eigenvalues module /////////// inline EigenvaluesReturnType eigenvalues() const; inline RealScalar operatorNorm() const; /////////// SVD module /////////// inline JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const; inline BDCSVD<PlainObject> bdcSvd(unsigned int computationOptions = 0) const; /////////// Geometry module /////////// #ifndef EIGEN_PARSED_BY_DOXYGEN /// \internal helper struct to form the return type of the cross product template<typename OtherDerived> struct cross_product_return_type { typedef typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar; typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type; }; #endif // EIGEN_PARSED_BY_DOXYGEN template<typename OtherDerived> EIGEN_DEVICE_FUNC #ifndef EIGEN_PARSED_BY_DOXYGEN inline typename cross_product_return_type<OtherDerived>::type #else inline PlainObject #endif cross(const MatrixBase<OtherDerived>& other) const; template<typename OtherDerived> EIGEN_DEVICE_FUNC inline PlainObject cross3(const MatrixBase<OtherDerived>& other) const; EIGEN_DEVICE_FUNC inline PlainObject unitOrthogonal(void) const; EIGEN_DEVICE_FUNC inline Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const; // put this as separate enum value to work around possible GCC 4.3 bug (?) enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1&&RowsAtCompileTime==1 ? ((internal::traits<Derived>::Flags&RowMajorBit)==RowMajorBit ? Horizontal : Vertical) : ColsAtCompileTime==1 ? Vertical : Horizontal }; typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType; EIGEN_DEVICE_FUNC inline HomogeneousReturnType homogeneous() const; enum { SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 }; typedef Block<const Derived, internal::traits<Derived>::ColsAtCompileTime==1 ? SizeMinusOne : 1, internal::traits<Derived>::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne; typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(ConstStartMinusOne,Scalar,quotient) HNormalizedReturnType; EIGEN_DEVICE_FUNC inline const HNormalizedReturnType hnormalized() const; ////////// Householder module /////////// void makeHouseholderInPlace(Scalar& tau, RealScalar& beta); template<typename EssentialPart> void makeHouseholder(EssentialPart& essential, Scalar& tau, RealScalar& beta) const; template<typename EssentialPart> void applyHouseholderOnTheLeft(const EssentialPart& essential, const Scalar& tau, Scalar* workspace); template<typename EssentialPart> void applyHouseholderOnTheRight(const EssentialPart& essential, const Scalar& tau, Scalar* workspace); ///////// Jacobi module ///////// template<typename OtherScalar> void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j); template<typename OtherScalar> void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j); ///////// SparseCore module ///////// template<typename OtherDerived> EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type cwiseProduct(const SparseMatrixBase<OtherDerived> &other) const { return other.cwiseProduct(derived()); } ///////// MatrixFunctions module ///////// typedef typename internal::stem_function<Scalar>::type StemFunction; #define EIGEN_MATRIX_FUNCTION(ReturnType, Name, Description) \ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the coefficient-wise Description use ArrayBase::##Name . */ \ const ReturnType<Derived> Name() const; #define EIGEN_MATRIX_FUNCTION_1(ReturnType, Name, Description, Argument) \ /** \returns an expression of the matrix Description of \c *this. \brief This function requires the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>. To compute the coefficient-wise Description use ArrayBase::##Name . */ \ const ReturnType<Derived> Name(Argument) const; EIGEN_MATRIX_FUNCTION(MatrixExponentialReturnValue, exp, exponential) /** \brief Helper function for the <a href="unsupported/group__MatrixFunctions__Module.html"> unsupported MatrixFunctions module</a>.*/ const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const; EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cosh, hyperbolic cosine) EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sinh, hyperbolic sine) EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, cos, cosine) EIGEN_MATRIX_FUNCTION(MatrixFunctionReturnValue, sin, sine) EIGEN_MATRIX_FUNCTION(MatrixSquareRootReturnValue, sqrt, square root) EIGEN_MATRIX_FUNCTION(MatrixLogarithmReturnValue, log, logarithm) EIGEN_MATRIX_FUNCTION_1(MatrixPowerReturnValue, pow, power to \c p, const RealScalar& p) EIGEN_MATRIX_FUNCTION_1(MatrixComplexPowerReturnValue, pow, power to \c p, const std::complex<RealScalar>& p) protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(MatrixBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(MatrixBase) private: EIGEN_DEVICE_FUNC explicit MatrixBase(int); EIGEN_DEVICE_FUNC MatrixBase(int,int); template<typename OtherDerived> EIGEN_DEVICE_FUNC explicit MatrixBase(const MatrixBase<OtherDerived>&); protected: // mixing arrays and matrices is not legal template<typename OtherDerived> Derived& operator+=(const ArrayBase<OtherDerived>& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template<typename OtherDerived> Derived& operator-=(const ArrayBase<OtherDerived>& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; /*************************************************************************** * Implementation of matrix base methods ***************************************************************************/ /** replaces \c *this by \c *this * \a other. * * \returns a reference to \c *this * * Example: \include MatrixBase_applyOnTheRight.cpp * Output: \verbinclude MatrixBase_applyOnTheRight.out */ template<typename Derived> template<typename OtherDerived> inline Derived& MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other) { other.derived().applyThisOnTheRight(derived()); return derived(); } /** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). * * Example: \include MatrixBase_applyOnTheRight.cpp * Output: \verbinclude MatrixBase_applyOnTheRight.out */ template<typename Derived> template<typename OtherDerived> inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other) { other.derived().applyThisOnTheRight(derived()); } /** replaces \c *this by \a other * \c *this. * * Example: \include MatrixBase_applyOnTheLeft.cpp * Output: \verbinclude MatrixBase_applyOnTheLeft.out */ template<typename Derived> template<typename OtherDerived> inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other) { other.derived().applyThisOnTheLeft(derived()); } } // end namespace Eigen #endif // EIGEN_MATRIXBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/DenseStorage.h
.h
22,205
571
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2010-2013 Hauke Heibel <hauke.heibel@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIXSTORAGE_H #define EIGEN_MATRIXSTORAGE_H #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) X; EIGEN_DENSE_STORAGE_CTOR_PLUGIN; #else #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) #endif namespace Eigen { namespace internal { struct constructor_without_unaligned_array_assert {}; template<typename T, int Size> EIGEN_DEVICE_FUNC void check_static_allocation_size() { // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit #if EIGEN_STACK_ALLOCATION_LIMIT EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); #endif } /** \internal * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: * to 16 bytes boundary if the total size is a multiple of 16 bytes. */ template <typename T, int Size, int MatrixOrArrayOptions, int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0 : compute_default_alignment<T,Size>::value > struct plain_array { T array[Size]; EIGEN_DEVICE_FUNC plain_array() { check_static_allocation_size<T,Size>(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size<T,Size>(); } }; #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) #elif EIGEN_GNUC_AT_LEAST(4,7) // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned. // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900 // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined: template<typename PtrType> EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; } #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ eigen_assert((internal::UIntPtr(eigen_unaligned_array_assert_workaround_gcc47(array)) & (sizemask)) == 0 \ && "this assertion is explained here: " \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ " **** READ THIS WEB PAGE !!! ****"); #else #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ eigen_assert((internal::UIntPtr(array) & (sizemask)) == 0 \ && "this assertion is explained here: " \ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ " **** READ THIS WEB PAGE !!! ****"); #endif template <typename T, int Size, int MatrixOrArrayOptions> struct plain_array<T, Size, MatrixOrArrayOptions, 8> { EIGEN_ALIGN_TO_BOUNDARY(8) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(7); check_static_allocation_size<T,Size>(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size<T,Size>(); } }; template <typename T, int Size, int MatrixOrArrayOptions> struct plain_array<T, Size, MatrixOrArrayOptions, 16> { EIGEN_ALIGN_TO_BOUNDARY(16) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(15); check_static_allocation_size<T,Size>(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size<T,Size>(); } }; template <typename T, int Size, int MatrixOrArrayOptions> struct plain_array<T, Size, MatrixOrArrayOptions, 32> { EIGEN_ALIGN_TO_BOUNDARY(32) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(31); check_static_allocation_size<T,Size>(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size<T,Size>(); } }; template <typename T, int Size, int MatrixOrArrayOptions> struct plain_array<T, Size, MatrixOrArrayOptions, 64> { EIGEN_ALIGN_TO_BOUNDARY(64) T array[Size]; EIGEN_DEVICE_FUNC plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(63); check_static_allocation_size<T,Size>(); } EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) { check_static_allocation_size<T,Size>(); } }; template <typename T, int MatrixOrArrayOptions, int Alignment> struct plain_array<T, 0, MatrixOrArrayOptions, Alignment> { T array[1]; EIGEN_DEVICE_FUNC plain_array() {} EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) {} }; } // end namespace internal /** \internal * * \class DenseStorage * \ingroup Core_Module * * \brief Stores the data of a matrix * * This class stores the data of fixed-size, dynamic-size or mixed matrices * in a way as compact as possible. * * \sa Matrix */ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage; // purely fixed-size matrix template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage { internal::plain_array<T,Size,_Options> m_data; public: EIGEN_DEVICE_FUNC DenseStorage() { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) } EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size) } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) m_data = other.m_data; return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows==_Rows && cols==_Cols); EIGEN_UNUSED_VARIABLE(size); EIGEN_UNUSED_VARIABLE(rows); EIGEN_UNUSED_VARIABLE(cols); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;} EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {} EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {} EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // null matrix template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options> { public: EIGEN_DEVICE_FUNC DenseStorage() {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {} EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {} EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;} EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {} EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {} EIGEN_DEVICE_FUNC const T *data() const { return 0; } EIGEN_DEVICE_FUNC T *data() { return 0; } }; // more specializations for null matrices; these are necessary to resolve ambiguities template<typename T, int _Options> class DenseStorage<T, 0, Dynamic, Dynamic, _Options> : public DenseStorage<T, 0, 0, 0, _Options> { }; template<typename T, int _Rows, int _Options> class DenseStorage<T, 0, _Rows, Dynamic, _Options> : public DenseStorage<T, 0, 0, 0, _Options> { }; template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic, _Cols, _Options> : public DenseStorage<T, 0, 0, 0, _Options> { }; // dynamic-size matrix with fixed-size storage template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options> { internal::plain_array<T,Size,_Options> m_data; Index m_rows; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {} EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { m_data = other.m_data; m_rows = other.m_rows; m_cols = other.m_cols; } return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC Index rows() const {return m_rows;} EIGEN_DEVICE_FUNC Index cols() const {return m_cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // dynamic-size matrix with fixed-size storage and fixed width template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options> { internal::plain_array<T,Size,_Options> m_data; Index m_rows; public: EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {} EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { m_data = other.m_data; m_rows = other.m_rows; } return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;} EIGEN_DEVICE_FUNC Index cols(void) const {return _Cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) { m_rows = rows; } EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index) { m_rows = rows; } EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // dynamic-size matrix with fixed-size storage and fixed height template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options> { internal::plain_array<T,Size,_Options> m_data; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {} EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { m_data = other.m_data; m_cols = other.m_cols; } return *this; } EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {} EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC Index rows(void) const {return _Rows;} EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;} void conservativeResize(Index, Index, Index cols) { m_cols = cols; } void resize(Index, Index, Index cols) { m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; } EIGEN_DEVICE_FUNC T *data() { return m_data.array; } }; // purely dynamic matrix. template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options> { T *m_data; Index m_rows; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows>=0 && cols >=0); } EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*other.m_cols)) , m_rows(other.m_rows) , m_cols(other.m_cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*m_cols) internal::smart_copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { DenseStorage tmp(other); this->swap(tmp); } return *this; } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)) , m_rows(std::move(other.m_rows)) , m_cols(std::move(other.m_cols)) { other.m_data = nullptr; other.m_rows = 0; other.m_cols = 0; } EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT { using std::swap; swap(m_data, other.m_data); swap(m_rows, other.m_rows); swap(m_cols, other.m_cols); return *this; } #endif EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;} EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;} void conservativeResize(Index size, Index rows, Index cols) { m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols); m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC void resize(Index size, Index rows, Index cols) { if(size != m_rows*m_cols) { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) } m_rows = rows; m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; } }; // matrix with dynamic width and fixed height (so that matrix has dynamic size). template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options> { T *m_data; Index m_cols; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows==_Rows && cols >=0); EIGEN_UNUSED_VARIABLE(rows); } EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(_Rows*other.m_cols)) , m_cols(other.m_cols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols*_Rows) internal::smart_copy(other.m_data, other.m_data+_Rows*m_cols, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { DenseStorage tmp(other); this->swap(tmp); } return *this; } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)) , m_cols(std::move(other.m_cols)) { other.m_data = nullptr; other.m_cols = 0; } EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT { using std::swap; swap(m_data, other.m_data); swap(m_cols, other.m_cols); return *this; } #endif EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;} EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;} EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols) { m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols); m_cols = cols; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index, Index cols) { if(size != _Rows*m_cols) { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) } m_cols = cols; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; } }; // matrix with dynamic height and fixed width (so that matrix has dynamic size). template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options> { T *m_data; Index m_rows; public: EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {} explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) eigen_internal_assert(size==rows*cols && rows>=0 && cols == _Cols); EIGEN_UNUSED_VARIABLE(cols); } EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*_Cols)) , m_rows(other.m_rows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*_Cols) internal::smart_copy(other.m_data, other.m_data+other.m_rows*_Cols, m_data); } EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) { if (this != &other) { DenseStorage tmp(other); this->swap(tmp); } return *this; } #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT : m_data(std::move(other.m_data)) , m_rows(std::move(other.m_rows)) { other.m_data = nullptr; other.m_rows = 0; } EIGEN_DEVICE_FUNC DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT { using std::swap; swap(m_data, other.m_data); swap(m_rows, other.m_rows); return *this; } #endif EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); } EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;} EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;} void conservativeResize(Index size, Index rows, Index) { m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols); m_rows = rows; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index rows, Index) { if(size != m_rows*_Cols) { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); if (size>0) // >0 and not simply !=0 to let the compiler knows that size cannot be negative m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size); else m_data = 0; EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) } m_rows = rows; } EIGEN_DEVICE_FUNC const T *data() const { return m_data; } EIGEN_DEVICE_FUNC T *data() { return m_data; } }; } // end namespace Eigen #endif // EIGEN_MATRIX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Block.h
.h
18,064
453
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BLOCK_H #define EIGEN_BLOCK_H namespace Eigen { namespace internal { template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprType> { typedef typename traits<XprType>::Scalar Scalar; typedef typename traits<XprType>::StorageKind StorageKind; typedef typename traits<XprType>::XprKind XprKind; typedef typename ref_selector<XprType>::type XprTypeNested; typedef typename remove_reference<XprTypeNested>::type _XprTypeNested; enum{ MatrixRows = traits<XprType>::RowsAtCompileTime, MatrixCols = traits<XprType>::ColsAtCompileTime, RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows, ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols, MaxRowsAtCompileTime = BlockRows==0 ? 0 : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : int(traits<XprType>::MaxRowsAtCompileTime), MaxColsAtCompileTime = BlockCols==0 ? 0 : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : int(traits<XprType>::MaxColsAtCompileTime), XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : XprTypeIsRowMajor, HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time<XprType>::ret) : int(outer_stride_at_compile_time<XprType>::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time<XprType>::ret) : int(inner_stride_at_compile_time<XprType>::ret), // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, Flags = (traits<XprType>::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit, // FIXME DirectAccessBit should not be handled by expressions // // Alignment is needed by MapBase's assertions // We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the respective evaluator Alignment = 0 }; }; template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false, bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class BlockImpl_dense; } // end namespace internal template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind> class BlockImpl; /** \class Block * \ingroup Core_Module * * \brief Expression of a fixed-size or dynamic-size block * * \tparam XprType the type of the expression in which we are taking a block * \tparam BlockRows the number of rows of the block we are taking at compile time (optional) * \tparam BlockCols the number of columns of the block we are taking at compile time (optional) * \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or * to set of columns of a column major matrix (optional). The parameter allows to determine * at compile time whether aligned access is possible on the block expression. * * This class represents an expression of either a fixed-size or dynamic-size block. It is the return * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and * most of the time this is the only way it is used. * * However, if you want to directly maniputate block expressions, * for instance if you want to write a function returning such an expression, you * will need to use this class. * * Here is an example illustrating the dynamic case: * \include class_Block.cpp * Output: \verbinclude class_Block.out * * \note Even though this expression has dynamic size, in the case where \a XprType * has fixed size, this expression inherits a fixed maximal size which means that evaluating * it does not cause a dynamic memory allocation. * * Here is an example illustrating the fixed-size case: * \include class_FixedBlock.cpp * Output: \verbinclude class_FixedBlock.out * * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock */ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class Block : public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> { typedef BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> Impl; public: //typedef typename Impl::Base Base; typedef Impl Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Block) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) typedef typename internal::remove_all<XprType>::type NestedExpression; /** Column or Row constructor */ EIGEN_DEVICE_FUNC inline Block(XprType& xpr, Index i) : Impl(xpr,i) { eigen_assert( (i>=0) && ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows()) ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols()))); } /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline Block(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) { EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE) eigen_assert(startRow >= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows() && startCol >= 0 && BlockCols >= 0 && startCol + BlockCols <= xpr.cols()); } /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline Block(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Impl(xpr, startRow, startCol, blockRows, blockCols) { eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); eigen_assert(startRow >= 0 && blockRows >= 0 && startRow <= xpr.rows() - blockRows && startCol >= 0 && blockCols >= 0 && startCol <= xpr.cols() - blockCols); } }; // The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense // that must be specialized for direct and non-direct access... template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense> : public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> { typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl; typedef typename XprType::StorageIndex StorageIndex; public: typedef Impl Base; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {} EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {} EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Impl(xpr, startRow, startCol, blockRows, blockCols) {} }; namespace internal { /** \internal Internal implementation of dense Blocks in the general case. */ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class BlockImpl_dense : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel> >::type { typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType; typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested; public: typedef typename internal::dense_xpr_base<BlockType>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) // class InnerIterator; // FIXME apparently never used /** Column or Row constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index i) : m_xpr(xpr), // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, // all other cases are invalid. // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), m_blockRows(BlockRows==1 ? 1 : xpr.rows()), m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(BlockRows), m_blockCols(BlockCols) {} /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) { EIGEN_STATIC_ASSERT_LVALUE(XprType) return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const { return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { EIGEN_STATIC_ASSERT_LVALUE(XprType) return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } template<int LoadMode> inline PacketScalar packet(Index rowId, Index colId) const { return m_xpr.template packet<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value()); } template<int LoadMode> inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { m_xpr.template writePacket<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value(), val); } template<int LoadMode> inline PacketScalar packet(Index index) const { return m_xpr.template packet<Unaligned> (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } template<int LoadMode> inline void writePacket(Index index, const PacketScalar& val) { m_xpr.template writePacket<Unaligned> (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val); } #ifdef EIGEN_PARSED_BY_DOXYGEN /** \sa MapBase::data() */ EIGEN_DEVICE_FUNC inline const Scalar* data() const; EIGEN_DEVICE_FUNC inline Index innerStride() const; EIGEN_DEVICE_FUNC inline Index outerStride() const; #endif EIGEN_DEVICE_FUNC const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const { return m_xpr; } EIGEN_DEVICE_FUNC XprType& nestedExpression() { return m_xpr; } EIGEN_DEVICE_FUNC StorageIndex startRow() const { return m_startRow.value(); } EIGEN_DEVICE_FUNC StorageIndex startCol() const { return m_startCol.value(); } protected: XprTypeNested m_xpr; const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow; const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol; const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows; const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols; }; /** \internal Internal implementation of dense Blocks in the direct access case.*/ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true> : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel> > { typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType; typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested; enum { XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0 }; public: typedef MapBase<BlockType> Base; EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) /** Column or Row constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index i) : Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor)) || ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()), BlockRows==1 ? 1 : xpr.rows(), BlockCols==1 ? 1 : xpr.cols()), m_xpr(xpr), m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0) { init(); } /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)), m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) { init(); } /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols), m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) { init(); } EIGEN_DEVICE_FUNC const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const { return m_xpr; } EIGEN_DEVICE_FUNC XprType& nestedExpression() { return m_xpr; } /** \sa MapBase::innerStride() */ EIGEN_DEVICE_FUNC inline Index innerStride() const { return internal::traits<BlockType>::HasSameStorageOrderAsXprType ? m_xpr.innerStride() : m_xpr.outerStride(); } /** \sa MapBase::outerStride() */ EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_outerStride; } EIGEN_DEVICE_FUNC StorageIndex startRow() const { return m_startRow.value(); } EIGEN_DEVICE_FUNC StorageIndex startCol() const { return m_startCol.value(); } #ifndef __SUNPRO_CC // FIXME sunstudio is not friendly with the above friend... // META-FIXME there is no 'friend' keyword around here. Is this obsolete? protected: #endif #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal used by allowAligned() */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) : Base(data, blockRows, blockCols), m_xpr(xpr) { init(); } #endif protected: EIGEN_DEVICE_FUNC void init() { m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride(); } XprTypeNested m_xpr; const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow; const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol; Index m_outerStride; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_BLOCK_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Stride.h
.h
3,865
112
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STRIDE_H #define EIGEN_STRIDE_H namespace Eigen { /** \class Stride * \ingroup Core_Module * * \brief Holds strides information for Map * * This class holds the strides information for mapping arrays with strides with class Map. * * It holds two values: the inner stride and the outer stride. * * The inner stride is the pointer increment between two consecutive entries within a given row of a * row-major matrix or within a given column of a column-major matrix. * * The outer stride is the pointer increment between two consecutive rows of a row-major matrix or * between two consecutive columns of a column-major matrix. * * These two values can be passed either at compile-time as template parameters, or at runtime as * arguments to the constructor. * * Indeed, this class takes two template parameters: * \tparam _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime. * \tparam _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime. * * Here is an example: * \include Map_general_stride.cpp * Output: \verbinclude Map_general_stride.out * * \sa class InnerStride, class OuterStride, \ref TopicStorageOrders */ template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime> class Stride { public: typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 enum { InnerStrideAtCompileTime = _InnerStrideAtCompileTime, OuterStrideAtCompileTime = _OuterStrideAtCompileTime }; /** Default constructor, for use when strides are fixed at compile time */ EIGEN_DEVICE_FUNC Stride() : m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime) { eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic); } /** Constructor allowing to pass the strides at runtime */ EIGEN_DEVICE_FUNC Stride(Index outerStride, Index innerStride) : m_outer(outerStride), m_inner(innerStride) { eigen_assert(innerStride>=0 && outerStride>=0); } /** Copy constructor */ EIGEN_DEVICE_FUNC Stride(const Stride& other) : m_outer(other.outer()), m_inner(other.inner()) {} /** \returns the outer stride */ EIGEN_DEVICE_FUNC inline Index outer() const { return m_outer.value(); } /** \returns the inner stride */ EIGEN_DEVICE_FUNC inline Index inner() const { return m_inner.value(); } protected: internal::variable_if_dynamic<Index, OuterStrideAtCompileTime> m_outer; internal::variable_if_dynamic<Index, InnerStrideAtCompileTime> m_inner; }; /** \brief Convenience specialization of Stride to specify only an inner stride * See class Map for some examples */ template<int Value> class InnerStride : public Stride<0, Value> { typedef Stride<0, Value> Base; public: EIGEN_DEVICE_FUNC InnerStride() : Base() {} EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code }; /** \brief Convenience specialization of Stride to specify only an outer stride * See class Map for some examples */ template<int Value> class OuterStride : public Stride<Value, 0> { typedef Stride<Value, 0> Base; public: EIGEN_DEVICE_FUNC OuterStride() : Base() {} EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code }; } // end namespace Eigen #endif // EIGEN_STRIDE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Array.h
.h
12,115
330
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAY_H #define EIGEN_ARRAY_H namespace Eigen { namespace internal { template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> struct traits<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > { typedef ArrayXpr XprKind; typedef ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > XprBase; }; } /** \class Array * \ingroup Core_Module * * \brief General-purpose arrays with easy API for coefficient-wise operations * * The %Array class is very similar to the Matrix class. It provides * general-purpose one- and two-dimensional arrays. The difference between the * %Array and the %Matrix class is primarily in the API: the API for the * %Array class provides easy access to coefficient-wise operations, while the * API for the %Matrix class provides easy access to linear-algebra * operations. * * See documentation of class Matrix for detailed information on the template parameters * storage layout. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. * * \sa \blank \ref TutorialArrayClass, \ref TopicClassHierarchy */ template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> class Array : public PlainObjectBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > { public: typedef PlainObjectBase<Array> Base; EIGEN_DENSE_PUBLIC_INTERFACE(Array) enum { Options = _Options }; typedef typename Base::PlainObject PlainObject; protected: template <typename Derived, typename OtherDerived, bool IsVector> friend struct internal::conservative_resize_like_impl; using Base::m_storage; public: using Base::base; using Base::coeff; using Base::coeffRef; /** * The usage of * using Base::operator=; * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped * the usage of 'using'. This should be done only for operator=. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other) { return Base::operator=(other); } /** Set all the entries to \a value. * \sa DenseBase::setConstant(), DenseBase::fill() */ /* This overload is needed because the usage of * using Base::operator=; * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped * the usage of 'using'. This should be done only for operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Scalar &value) { Base::setConstant(value); return *this; } /** Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const DenseBase<OtherDerived>& other) { return Base::_set(other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Array& other) { return Base::_set(other); } /** Default constructor. * * For fixed-size matrices, does nothing. * * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array() : Base() { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ?? /** \internal */ EIGEN_DEVICE_FUNC Array(internal::constructor_without_unaligned_array_assert) : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC Array(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value) : Base(std::move(other)) { Base::_check_template_params(); } EIGEN_DEVICE_FUNC Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value) { other.swap(*this); return *this; } #endif #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(const T& x) { Base::_check_template_params(); Base::template _init1<T>(x); } template<typename T0, typename T1> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) { Base::_check_template_params(); this->template _init2<T0,T1>(val0, val1); } #else /** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */ EIGEN_DEVICE_FUNC explicit Array(const Scalar *data); /** Constructs a vector or row-vector with given dimension. \only_for_vectors * * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, * it is redundant to pass the dimension here, so it makes more sense to use the default * constructor Array() instead. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(Index dim); /** constructs an initialized 1x1 Array with the given coefficient */ Array(const Scalar& value); /** constructs an uninitialized array with \a rows rows and \a cols columns. * * This is useful for dynamic-size arrays. For fixed-size arrays, * it is redundant to pass these parameters, so one should use the default constructor * Array() instead. */ Array(Index rows, Index cols); /** constructs an initialized 2D vector with given coefficients */ Array(const Scalar& val0, const Scalar& val1); #endif /** constructs an initialized 3D vector with given coefficients */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; } /** constructs an initialized 4D vector with given coefficients */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; m_storage.data()[3] = val3; } /** Copy constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Array& other) : Base(other) { } private: struct PrivateType {}; public: /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other, typename internal::enable_if<internal::is_convertible<typename OtherDerived::Scalar,Scalar>::value, PrivateType>::type = PrivateType()) : Base(other.derived()) { } EIGEN_DEVICE_FUNC inline Index innerStride() const { return 1; } EIGEN_DEVICE_FUNC inline Index outerStride() const { return this->innerSize(); } #ifdef EIGEN_ARRAY_PLUGIN #include EIGEN_ARRAY_PLUGIN #endif private: template<typename MatrixType, typename OtherDerived, bool SwapPointers> friend struct internal::matrix_swap_impl; }; /** \defgroup arraytypedefs Global array typedefs * \ingroup Core_Module * * Eigen defines several typedef shortcuts for most common 1D and 2D array types. * * The general patterns are the following: * * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd * for complex double. * * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats. * * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is * a fixed-size 1D array of 4 complex floats. * * \sa class Array */ #define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Size, 1> Array##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix; #define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>, cf) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd) #undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_ARRAY_TYPEDEFS #undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ using Eigen::Matrix##SizeSuffix##TypeSuffix; \ using Eigen::Vector##SizeSuffix##TypeSuffix; \ using Eigen::RowVector##SizeSuffix##TypeSuffix; #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ #define EIGEN_USING_ARRAY_TYPEDEFS \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) } // end namespace Eigen #endif // EIGEN_ARRAY_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/BandMatrix.h
.h
13,910
354
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BANDMATRIX_H #define EIGEN_BANDMATRIX_H namespace Eigen { namespace internal { template<typename Derived> class BandMatrixBase : public EigenBase<Derived> { public: enum { Flags = internal::traits<Derived>::Flags, CoeffReadCost = internal::traits<Derived>::CoeffReadCost, RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime, ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime, Supers = internal::traits<Derived>::Supers, Subs = internal::traits<Derived>::Subs, Options = internal::traits<Derived>::Options }; typedef typename internal::traits<Derived>::Scalar Scalar; typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType; typedef typename DenseMatrixType::StorageIndex StorageIndex; typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType; typedef EigenBase<Derived> Base; protected: enum { DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic, SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime) }; public: using Base::derived; using Base::rows; using Base::cols; /** \returns the number of super diagonals */ inline Index supers() const { return derived().supers(); } /** \returns the number of sub diagonals */ inline Index subs() const { return derived().subs(); } /** \returns an expression of the underlying coefficient matrix */ inline const CoefficientsType& coeffs() const { return derived().coeffs(); } /** \returns an expression of the underlying coefficient matrix */ inline CoefficientsType& coeffs() { return derived().coeffs(); } /** \returns a vector expression of the \a i -th column, * only the meaningful part is returned. * \warning the internal storage must be column major. */ inline Block<CoefficientsType,Dynamic,1> col(Index i) { EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); Index start = 0; Index len = coeffs().rows(); if (i<=supers()) { start = supers()-i; len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i))); } else if (i>=rows()-subs()) len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs())); return Block<CoefficientsType,Dynamic,1>(coeffs(), start, i, len, 1); } /** \returns a vector expression of the main diagonal */ inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal() { return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } /** \returns a vector expression of the main diagonal (const version) */ inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const { return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } template<int Index> struct DiagonalIntReturnType { enum { ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)), Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex, ActualIndex = ReturnOpposite ? -Index : Index, DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic) ? Dynamic : (ActualIndex<0 ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex) : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex)) }; typedef Block<CoefficientsType,1, DiagonalSize> BuildType; typedef typename internal::conditional<Conjugate, CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>,BuildType >, BuildType>::type Type; }; /** \returns a vector expression of the \a N -th sub or super diagonal */ template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal() { return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a N -th sub or super diagonal */ template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const { return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ inline Block<CoefficientsType,1,Dynamic> diagonal(Index i) { eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); return Block<CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ inline const Block<const CoefficientsType,1,Dynamic> diagonal(Index i) const { eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); return Block<const CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i)); } template<typename Dest> inline void evalTo(Dest& dst) const { dst.resize(rows(),cols()); dst.setZero(); dst.diagonal() = diagonal(); for (Index i=1; i<=supers();++i) dst.diagonal(i) = diagonal(i); for (Index i=1; i<=subs();++i) dst.diagonal(-i) = diagonal(-i); } DenseMatrixType toDenseMatrix() const { DenseMatrixType res(rows(),cols()); evalTo(res); return res; } protected: inline Index diagonalLength(Index i) const { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); } }; /** * \class BandMatrix * \ingroup Core_Module * * \brief Represents a rectangular matrix with a banded storage * * \tparam _Scalar Numeric type, i.e. float, double, int * \tparam _Rows Number of rows, or \b Dynamic * \tparam _Cols Number of columns, or \b Dynamic * \tparam _Supers Number of super diagonal * \tparam _Subs Number of sub diagonal * \tparam _Options A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint * The former controls \ref TopicStorageOrders "storage order", and defaults to * column-major. The latter controls whether the matrix represents a selfadjoint * matrix in which case either Supers of Subs have to be null. * * \sa class TridiagonalMatrix */ template<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options> struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef _Scalar Scalar; typedef Dense StorageKind; typedef Eigen::Index StorageIndex; enum { CoeffReadCost = NumTraits<Scalar>::ReadCost, RowsAtCompileTime = _Rows, ColsAtCompileTime = _Cols, MaxRowsAtCompileTime = _Rows, MaxColsAtCompileTime = _Cols, Flags = LvalueBit, Supers = _Supers, Subs = _Subs, Options = _Options, DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic }; typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> CoefficientsType; }; template<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options> class BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> > { public: typedef typename internal::traits<BandMatrix>::Scalar Scalar; typedef typename internal::traits<BandMatrix>::StorageIndex StorageIndex; typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType; explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) : m_coeffs(1+supers+subs,cols), m_rows(rows), m_supers(supers), m_subs(subs) { } /** \returns the number of columns */ inline Index rows() const { return m_rows.value(); } /** \returns the number of rows */ inline Index cols() const { return m_coeffs.cols(); } /** \returns the number of super diagonals */ inline Index supers() const { return m_supers.value(); } /** \returns the number of sub diagonals */ inline Index subs() const { return m_subs.value(); } inline const CoefficientsType& coeffs() const { return m_coeffs; } inline CoefficientsType& coeffs() { return m_coeffs; } protected: CoefficientsType m_coeffs; internal::variable_if_dynamic<Index, Rows> m_rows; internal::variable_if_dynamic<Index, Supers> m_supers; internal::variable_if_dynamic<Index, Subs> m_subs; }; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> class BandMatrixWrapper; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> struct traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef typename _CoefficientsType::Scalar Scalar; typedef typename _CoefficientsType::StorageKind StorageKind; typedef typename _CoefficientsType::StorageIndex StorageIndex; enum { CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost, RowsAtCompileTime = _Rows, ColsAtCompileTime = _Cols, MaxRowsAtCompileTime = _Rows, MaxColsAtCompileTime = _Cols, Flags = LvalueBit, Supers = _Supers, Subs = _Subs, Options = _Options, DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic }; typedef _CoefficientsType CoefficientsType; }; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > { public: typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar; typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType; typedef typename internal::traits<BandMatrixWrapper>::StorageIndex StorageIndex; explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs) : m_coeffs(coeffs), m_rows(rows), m_supers(supers), m_subs(subs) { EIGEN_UNUSED_VARIABLE(cols); //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows()); } /** \returns the number of columns */ inline Index rows() const { return m_rows.value(); } /** \returns the number of rows */ inline Index cols() const { return m_coeffs.cols(); } /** \returns the number of super diagonals */ inline Index supers() const { return m_supers.value(); } /** \returns the number of sub diagonals */ inline Index subs() const { return m_subs.value(); } inline const CoefficientsType& coeffs() const { return m_coeffs; } protected: const CoefficientsType& m_coeffs; internal::variable_if_dynamic<Index, _Rows> m_rows; internal::variable_if_dynamic<Index, _Supers> m_supers; internal::variable_if_dynamic<Index, _Subs> m_subs; }; /** * \class TridiagonalMatrix * \ingroup Core_Module * * \brief Represents a tridiagonal matrix with a compact banded storage * * \tparam Scalar Numeric type, i.e. float, double, int * \tparam Size Number of rows and cols, or \b Dynamic * \tparam Options Can be 0 or \b SelfAdjoint * * \sa class BandMatrix */ template<typename Scalar, int Size, int Options> class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> { typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base; typedef typename Base::StorageIndex StorageIndex; public: explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); } inline const typename Base::template DiagonalIntReturnType<1>::Type super() const { return Base::template diagonal<1>(); } inline typename Base::template DiagonalIntReturnType<-1>::Type sub() { return Base::template diagonal<-1>(); } inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const { return Base::template diagonal<-1>(); } protected: }; struct BandShape {}; template<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options> struct evaluator_traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> > : public evaluator_traits_base<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef BandShape Shape; }; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> struct evaluator_traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > : public evaluator_traits_base<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef BandShape Shape; }; template<> struct AssignmentKind<DenseShape,BandShape> { typedef EigenBase2EigenBase Kind; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_BANDMATRIX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/TriangularMatrix.h
.h
37,304
986
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRIANGULARMATRIX_H #define EIGEN_TRIANGULARMATRIX_H namespace Eigen { namespace internal { template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval; } /** \class TriangularBase * \ingroup Core_Module * * \brief Base class for triangular part in a matrix */ template<typename Derived> class TriangularBase : public EigenBase<Derived> { public: enum { Mode = internal::traits<Derived>::Mode, RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime, ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime, SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>::ret) }; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::StorageIndex StorageIndex; typedef typename internal::traits<Derived>::FullMatrixType DenseMatrixType; typedef DenseMatrixType DenseType; typedef Derived const& Nested; EIGEN_DEVICE_FUNC inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); } EIGEN_DEVICE_FUNC inline Index rows() const { return derived().rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return derived().cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().innerStride(); } // dummy resize function void resize(Index rows, Index cols) { EIGEN_UNUSED_VARIABLE(rows); EIGEN_UNUSED_VARIABLE(cols); eigen_assert(rows==this->rows() && cols==this->cols()); } EIGEN_DEVICE_FUNC inline Scalar coeff(Index row, Index col) const { return derived().coeff(row,col); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); } /** \see MatrixBase::copyCoeff(row,col) */ template<typename Other> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other) { derived().coeffRef(row, col) = other.coeff(row, col); } EIGEN_DEVICE_FUNC inline Scalar operator()(Index row, Index col) const { check_coordinates(row, col); return coeff(row,col); } EIGEN_DEVICE_FUNC inline Scalar& operator()(Index row, Index col) { check_coordinates(row, col); return coeffRef(row,col); } #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast<const Derived*>(this); } EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast<Derived*>(this); } #endif // not EIGEN_PARSED_BY_DOXYGEN template<typename DenseDerived> EIGEN_DEVICE_FUNC void evalTo(MatrixBase<DenseDerived> &other) const; template<typename DenseDerived> EIGEN_DEVICE_FUNC void evalToLazy(MatrixBase<DenseDerived> &other) const; EIGEN_DEVICE_FUNC DenseMatrixType toDenseMatrix() const { DenseMatrixType res(rows(), cols()); evalToLazy(res); return res; } protected: void check_coordinates(Index row, Index col) const { EIGEN_ONLY_USED_FOR_DEBUG(row); EIGEN_ONLY_USED_FOR_DEBUG(col); eigen_assert(col>=0 && col<cols() && row>=0 && row<rows()); const int mode = int(Mode) & ~SelfAdjoint; EIGEN_ONLY_USED_FOR_DEBUG(mode); eigen_assert((mode==Upper && col>=row) || (mode==Lower && col<=row) || ((mode==StrictlyUpper || mode==UnitUpper) && col>row) || ((mode==StrictlyLower || mode==UnitLower) && col<row)); } #ifdef EIGEN_INTERNAL_DEBUGGING void check_coordinates_internal(Index row, Index col) const { check_coordinates(row, col); } #else void check_coordinates_internal(Index , Index ) const {} #endif }; /** \class TriangularView * \ingroup Core_Module * * \brief Expression of a triangular part in a matrix * * \param MatrixType the type of the object in which we are taking the triangular part * \param Mode the kind of triangular matrix expression to construct. Can be #Upper, * #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower. * This is in fact a bit field; it must have either #Upper or #Lower, * and additionally it may have #UnitDiag or #ZeroDiag or neither. * * This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular * matrices one should speak of "trapezoid" parts. This class is the return type * of MatrixBase::triangularView() and SparseMatrixBase::triangularView(), and most of the time this is the only way it is used. * * \sa MatrixBase::triangularView() */ namespace internal { template<typename MatrixType, unsigned int _Mode> struct traits<TriangularView<MatrixType, _Mode> > : traits<MatrixType> { typedef typename ref_selector<MatrixType>::non_const_type MatrixTypeNested; typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef; typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned; typedef typename MatrixType::PlainObject FullMatrixType; typedef MatrixType ExpressionType; enum { Mode = _Mode, FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0, Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits | FlagsLvalueBit) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) }; }; } template<typename _MatrixType, unsigned int _Mode, typename StorageKind> class TriangularViewImpl; template<typename _MatrixType, unsigned int _Mode> class TriangularView : public TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind > { public: typedef TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind > Base; typedef typename internal::traits<TriangularView>::Scalar Scalar; typedef _MatrixType MatrixType; protected: typedef typename internal::traits<TriangularView>::MatrixTypeNested MatrixTypeNested; typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef; typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType; public: typedef typename internal::traits<TriangularView>::StorageKind StorageKind; typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned NestedExpression; enum { Mode = _Mode, Flags = internal::traits<TriangularView>::Flags, TransposeMode = (Mode & Upper ? Lower : 0) | (Mode & Lower ? Upper : 0) | (Mode & (UnitDiag)) | (Mode & (ZeroDiag)), IsVectorAtCompileTime = false }; EIGEN_DEVICE_FUNC explicit inline TriangularView(MatrixType& matrix) : m_matrix(matrix) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TriangularView) /** \copydoc EigenBase::rows() */ EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); } /** \copydoc EigenBase::cols() */ EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); } /** \returns a const reference to the nested expression */ EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; } /** \returns a reference to the nested expression */ EIGEN_DEVICE_FUNC NestedExpression& nestedExpression() { return m_matrix; } typedef TriangularView<const MatrixConjugateReturnType,Mode> ConjugateReturnType; /** \sa MatrixBase::conjugate() const */ EIGEN_DEVICE_FUNC inline const ConjugateReturnType conjugate() const { return ConjugateReturnType(m_matrix.conjugate()); } typedef TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType; /** \sa MatrixBase::adjoint() const */ EIGEN_DEVICE_FUNC inline const AdjointReturnType adjoint() const { return AdjointReturnType(m_matrix.adjoint()); } typedef TriangularView<typename MatrixType::TransposeReturnType,TransposeMode> TransposeReturnType; /** \sa MatrixBase::transpose() */ EIGEN_DEVICE_FUNC inline TransposeReturnType transpose() { EIGEN_STATIC_ASSERT_LVALUE(MatrixType) typename MatrixType::TransposeReturnType tmp(m_matrix); return TransposeReturnType(tmp); } typedef TriangularView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType; /** \sa MatrixBase::transpose() const */ EIGEN_DEVICE_FUNC inline const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(m_matrix.transpose()); } template<typename Other> EIGEN_DEVICE_FUNC inline const Solve<TriangularView, Other> solve(const MatrixBase<Other>& other) const { return Solve<TriangularView, Other>(*this, other.derived()); } // workaround MSVC ICE #if EIGEN_COMP_MSVC template<int Side, typename Other> EIGEN_DEVICE_FUNC inline const internal::triangular_solve_retval<Side,TriangularView, Other> solve(const MatrixBase<Other>& other) const { return Base::template solve<Side>(other); } #else using Base::solve; #endif /** \returns a selfadjoint view of the referenced triangular part which must be either \c #Upper or \c #Lower. * * This is a shortcut for \code this->nestedExpression().selfadjointView<(*this)::Mode>() \endcode * \sa MatrixBase::selfadjointView() */ EIGEN_DEVICE_FUNC SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() { EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR); return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix); } /** This is the const version of selfadjointView() */ EIGEN_DEVICE_FUNC const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const { EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR); return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix); } /** \returns the determinant of the triangular matrix * \sa MatrixBase::determinant() */ EIGEN_DEVICE_FUNC Scalar determinant() const { if (Mode & UnitDiag) return 1; else if (Mode & ZeroDiag) return 0; else return m_matrix.diagonal().prod(); } protected: MatrixTypeNested m_matrix; }; /** \ingroup Core_Module * * \brief Base class for a triangular part in a \b dense matrix * * This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated. * It extends class TriangularView with additional methods which available for dense expressions only. * * \sa class TriangularView, MatrixBase::triangularView() */ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_MatrixType,_Mode,Dense> : public TriangularBase<TriangularView<_MatrixType, _Mode> > { public: typedef TriangularView<_MatrixType, _Mode> TriangularViewType; typedef TriangularBase<TriangularViewType> Base; typedef typename internal::traits<TriangularViewType>::Scalar Scalar; typedef _MatrixType MatrixType; typedef typename MatrixType::PlainObject DenseMatrixType; typedef DenseMatrixType PlainObject; public: using Base::evalToLazy; using Base::derived; typedef typename internal::traits<TriangularViewType>::StorageKind StorageKind; enum { Mode = _Mode, Flags = internal::traits<TriangularViewType>::Flags }; /** \returns the outer-stride of the underlying dense matrix * \sa DenseCoeffsBase::outerStride() */ EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); } /** \returns the inner-stride of the underlying dense matrix * \sa DenseCoeffsBase::innerStride() */ EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); } /** \sa MatrixBase::operator+=() */ template<typename Other> EIGEN_DEVICE_FUNC TriangularViewType& operator+=(const DenseBase<Other>& other) { internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename Other::Scalar>()); return derived(); } /** \sa MatrixBase::operator-=() */ template<typename Other> EIGEN_DEVICE_FUNC TriangularViewType& operator-=(const DenseBase<Other>& other) { internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>()); return derived(); } /** \sa MatrixBase::operator*=() */ EIGEN_DEVICE_FUNC TriangularViewType& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; } /** \sa DenseBase::operator/=() */ EIGEN_DEVICE_FUNC TriangularViewType& operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() / other; } /** \sa MatrixBase::fill() */ EIGEN_DEVICE_FUNC void fill(const Scalar& value) { setConstant(value); } /** \sa MatrixBase::setConstant() */ EIGEN_DEVICE_FUNC TriangularViewType& setConstant(const Scalar& value) { return *this = MatrixType::Constant(derived().rows(), derived().cols(), value); } /** \sa MatrixBase::setZero() */ EIGEN_DEVICE_FUNC TriangularViewType& setZero() { return setConstant(Scalar(0)); } /** \sa MatrixBase::setOnes() */ EIGEN_DEVICE_FUNC TriangularViewType& setOnes() { return setConstant(Scalar(1)); } /** \sa MatrixBase::coeff() * \warning the coordinates must fit into the referenced triangular part */ EIGEN_DEVICE_FUNC inline Scalar coeff(Index row, Index col) const { Base::check_coordinates_internal(row, col); return derived().nestedExpression().coeff(row, col); } /** \sa MatrixBase::coeffRef() * \warning the coordinates must fit into the referenced triangular part */ EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) { EIGEN_STATIC_ASSERT_LVALUE(TriangularViewType); Base::check_coordinates_internal(row, col); return derived().nestedExpression().coeffRef(row, col); } /** Assigns a triangular matrix to a triangular part of a dense matrix */ template<typename OtherDerived> EIGEN_DEVICE_FUNC TriangularViewType& operator=(const TriangularBase<OtherDerived>& other); /** Shortcut for\code *this = other.other.triangularView<(*this)::Mode>() \endcode */ template<typename OtherDerived> EIGEN_DEVICE_FUNC TriangularViewType& operator=(const MatrixBase<OtherDerived>& other); #ifndef EIGEN_PARSED_BY_DOXYGEN EIGEN_DEVICE_FUNC TriangularViewType& operator=(const TriangularViewImpl& other) { return *this = other.derived().nestedExpression(); } /** \deprecated */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void lazyAssign(const TriangularBase<OtherDerived>& other); /** \deprecated */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void lazyAssign(const MatrixBase<OtherDerived>& other); #endif /** Efficient triangular matrix times vector/matrix product */ template<typename OtherDerived> EIGEN_DEVICE_FUNC const Product<TriangularViewType,OtherDerived> operator*(const MatrixBase<OtherDerived>& rhs) const { return Product<TriangularViewType,OtherDerived>(derived(), rhs.derived()); } /** Efficient vector/matrix times triangular matrix product */ template<typename OtherDerived> friend EIGEN_DEVICE_FUNC const Product<OtherDerived,TriangularViewType> operator*(const MatrixBase<OtherDerived>& lhs, const TriangularViewImpl& rhs) { return Product<OtherDerived,TriangularViewType>(lhs.derived(),rhs.derived()); } /** \returns the product of the inverse of \c *this with \a other, \a *this being triangular. * * This function computes the inverse-matrix matrix product inverse(\c *this) * \a other if * \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if * \a Side==OnTheRight. * * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft * * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this * is an upper (resp. lower) triangular matrix. * * Example: \include Triangular_solve.cpp * Output: \verbinclude Triangular_solve.out * * This function returns an expression of the inverse-multiply and can works in-place if it is assigned * to the same matrix or vector \a other. * * For users coming from BLAS, this function (and more specifically solveInPlace()) offer * all the operations supported by the \c *TRSV and \c *TRSM BLAS routines. * * \sa TriangularView::solveInPlace() */ template<int Side, typename Other> EIGEN_DEVICE_FUNC inline const internal::triangular_solve_retval<Side,TriangularViewType, Other> solve(const MatrixBase<Other>& other) const; /** "in-place" version of TriangularView::solve() where the result is written in \a other * * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. * This function will const_cast it, so constness isn't honored here. * * Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft * * See TriangularView:solve() for the details. */ template<int Side, typename OtherDerived> EIGEN_DEVICE_FUNC void solveInPlace(const MatrixBase<OtherDerived>& other) const; template<typename OtherDerived> EIGEN_DEVICE_FUNC void solveInPlace(const MatrixBase<OtherDerived>& other) const { return solveInPlace<OnTheLeft>(other); } /** Swaps the coefficients of the common triangular parts of two matrices */ template<typename OtherDerived> EIGEN_DEVICE_FUNC #ifdef EIGEN_PARSED_BY_DOXYGEN void swap(TriangularBase<OtherDerived> &other) #else void swap(TriangularBase<OtherDerived> const & other) #endif { EIGEN_STATIC_ASSERT_LVALUE(OtherDerived); call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>()); } /** \deprecated * Shortcut for \code (*this).swap(other.triangularView<(*this)::Mode>()) \endcode */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void swap(MatrixBase<OtherDerived> const & other) { EIGEN_STATIC_ASSERT_LVALUE(OtherDerived); call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>()); } template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const { if(!internal::is_same_dense(dst,rhs)) dst = rhs; this->solveInPlace(dst); } template<typename ProductType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TriangularViewType& _assignProduct(const ProductType& prod, const Scalar& alpha, bool beta); protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(TriangularViewImpl) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TriangularViewImpl) }; /*************************************************************************** * Implementation of triangular evaluation/assignment ***************************************************************************/ #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME should we keep that possibility template<typename MatrixType, unsigned int Mode> template<typename OtherDerived> inline TriangularView<MatrixType, Mode>& TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other) { internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } // FIXME should we keep that possibility template<typename MatrixType, unsigned int Mode> template<typename OtherDerived> void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other) { internal::call_assignment_no_alias(derived(), other.template triangularView<Mode>()); } template<typename MatrixType, unsigned int Mode> template<typename OtherDerived> inline TriangularView<MatrixType, Mode>& TriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<OtherDerived>& other) { eigen_assert(Mode == int(OtherDerived::Mode)); internal::call_assignment(derived(), other.derived()); return derived(); } template<typename MatrixType, unsigned int Mode> template<typename OtherDerived> void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBase<OtherDerived>& other) { eigen_assert(Mode == int(OtherDerived::Mode)); internal::call_assignment_no_alias(derived(), other.derived()); } #endif /*************************************************************************** * Implementation of TriangularBase methods ***************************************************************************/ /** Assigns a triangular or selfadjoint matrix to a dense matrix. * If the matrix is triangular, the opposite part is set to zero. */ template<typename Derived> template<typename DenseDerived> void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const { evalToLazy(other.derived()); } /*************************************************************************** * Implementation of TriangularView methods ***************************************************************************/ /*************************************************************************** * Implementation of MatrixBase methods ***************************************************************************/ /** * \returns an expression of a triangular view extracted from the current matrix * * The parameter \a Mode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper, * \c #Lower, \c #StrictlyLower, \c #UnitLower. * * Example: \include MatrixBase_triangularView.cpp * Output: \verbinclude MatrixBase_triangularView.out * * \sa class TriangularView */ template<typename Derived> template<unsigned int Mode> typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type MatrixBase<Derived>::triangularView() { return typename TriangularViewReturnType<Mode>::Type(derived()); } /** This is the const version of MatrixBase::triangularView() */ template<typename Derived> template<unsigned int Mode> typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type MatrixBase<Derived>::triangularView() const { return typename ConstTriangularViewReturnType<Mode>::Type(derived()); } /** \returns true if *this is approximately equal to an upper triangular matrix, * within the precision given by \a prec. * * \sa isLowerTriangular() */ template<typename Derived> bool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const { RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1); for(Index j = 0; j < cols(); ++j) { Index maxi = numext::mini(j, rows()-1); for(Index i = 0; i <= maxi; ++i) { RealScalar absValue = numext::abs(coeff(i,j)); if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue; } } RealScalar threshold = maxAbsOnUpperPart * prec; for(Index j = 0; j < cols(); ++j) for(Index i = j+1; i < rows(); ++i) if(numext::abs(coeff(i, j)) > threshold) return false; return true; } /** \returns true if *this is approximately equal to a lower triangular matrix, * within the precision given by \a prec. * * \sa isUpperTriangular() */ template<typename Derived> bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const { RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1); for(Index j = 0; j < cols(); ++j) for(Index i = j; i < rows(); ++i) { RealScalar absValue = numext::abs(coeff(i,j)); if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue; } RealScalar threshold = maxAbsOnLowerPart * prec; for(Index j = 1; j < cols(); ++j) { Index maxi = numext::mini(j, rows()-1); for(Index i = 0; i < maxi; ++i) if(numext::abs(coeff(i, j)) > threshold) return false; } return true; } /*************************************************************************** **************************************************************************** * Evaluators and Assignment of triangular expressions *************************************************************************** ***************************************************************************/ namespace internal { // TODO currently a triangular expression has the form TriangularView<.,.> // in the future triangular-ness should be defined by the expression traits // such that Transpose<TriangularView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work) template<typename MatrixType, unsigned int Mode> struct evaluator_traits<TriangularView<MatrixType,Mode> > { typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind; typedef typename glue_shapes<typename evaluator_traits<MatrixType>::Shape, TriangularShape>::type Shape; }; template<typename MatrixType, unsigned int Mode> struct unary_evaluator<TriangularView<MatrixType,Mode>, IndexBased> : evaluator<typename internal::remove_all<MatrixType>::type> { typedef TriangularView<MatrixType,Mode> XprType; typedef evaluator<typename internal::remove_all<MatrixType>::type> Base; unary_evaluator(const XprType &xpr) : Base(xpr.nestedExpression()) {} }; // Additional assignment kinds: struct Triangular2Triangular {}; struct Triangular2Dense {}; struct Dense2Triangular {}; template<typename Kernel, unsigned int Mode, int UnrollCount, bool ClearOpposite> struct triangular_assignment_loop; /** \internal Specialization of the dense assignment kernel for triangular matrices. * The main difference is that the triangular, diagonal, and opposite parts are processed through three different functions. * \tparam UpLo must be either Lower or Upper * \tparam Mode must be either 0, UnitDiag, ZeroDiag, or SelfAdjoint */ template<int UpLo, int Mode, int SetOpposite, typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version = Specialized> class triangular_dense_assignment_kernel : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> { protected: typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> Base; typedef typename Base::DstXprType DstXprType; typedef typename Base::SrcXprType SrcXprType; using Base::m_dst; using Base::m_src; using Base::m_functor; public: typedef typename Base::DstEvaluatorType DstEvaluatorType; typedef typename Base::SrcEvaluatorType SrcEvaluatorType; typedef typename Base::Scalar Scalar; typedef typename Base::AssignmentTraits AssignmentTraits; EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr) : Base(dst, src, func, dstExpr) {} #ifdef EIGEN_INTERNAL_DEBUGGING EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col) { eigen_internal_assert(row!=col); Base::assignCoeff(row,col); } #else using Base::assignCoeff; #endif EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id) { if(Mode==UnitDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(1)); else if(Mode==ZeroDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(0)); else if(Mode==0) Base::assignCoeff(id,id); } EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index row, Index col) { eigen_internal_assert(row!=col); if(SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(row,col), Scalar(0)); } }; template<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType, typename Functor> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func) { typedef evaluator<DstXprType> DstEvaluatorType; typedef evaluator<SrcXprType> SrcEvaluatorType; SrcEvaluatorType srcEvaluator(src); Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); DstEvaluatorType dstEvaluator(dst); typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite, DstEvaluatorType,SrcEvaluatorType,Functor> Kernel; Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); enum { unroll = DstXprType::SizeAtCompileTime != Dynamic && SrcEvaluatorType::CoeffReadCost < HugeCost && DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT }; triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel); } template<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src) { call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>()); } template<> struct AssignmentKind<TriangularShape,TriangularShape> { typedef Triangular2Triangular Kind; }; template<> struct AssignmentKind<DenseShape,TriangularShape> { typedef Triangular2Dense Kind; }; template<> struct AssignmentKind<TriangularShape,DenseShape> { typedef Dense2Triangular Kind; }; template< typename DstXprType, typename SrcXprType, typename Functor> struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Triangular> { EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) { eigen_assert(int(DstXprType::Mode) == int(SrcXprType::Mode)); call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func); } }; template< typename DstXprType, typename SrcXprType, typename Functor> struct Assignment<DstXprType, SrcXprType, Functor, Triangular2Dense> { EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) { call_triangular_assignment_loop<SrcXprType::Mode, (SrcXprType::Mode&SelfAdjoint)==0>(dst, src, func); } }; template< typename DstXprType, typename SrcXprType, typename Functor> struct Assignment<DstXprType, SrcXprType, Functor, Dense2Triangular> { EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func) { call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func); } }; template<typename Kernel, unsigned int Mode, int UnrollCount, bool SetOpposite> struct triangular_assignment_loop { // FIXME: this is not very clean, perhaps this information should be provided by the kernel? typedef typename Kernel::DstEvaluatorType DstEvaluatorType; typedef typename DstEvaluatorType::XprType DstXprType; enum { col = (UnrollCount-1) / DstXprType::RowsAtCompileTime, row = (UnrollCount-1) % DstXprType::RowsAtCompileTime }; typedef typename Kernel::Scalar Scalar; EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { triangular_assignment_loop<Kernel, Mode, UnrollCount-1, SetOpposite>::run(kernel); if(row==col) kernel.assignDiagonalCoeff(row); else if( ((Mode&Lower) && row>col) || ((Mode&Upper) && row<col) ) kernel.assignCoeff(row,col); else if(SetOpposite) kernel.assignOppositeCoeff(row,col); } }; // prevent buggy user code from causing an infinite recursion template<typename Kernel, unsigned int Mode, bool SetOpposite> struct triangular_assignment_loop<Kernel, Mode, 0, SetOpposite> { EIGEN_DEVICE_FUNC static inline void run(Kernel &) {} }; // TODO: experiment with a recursive assignment procedure splitting the current // triangular part into one rectangular and two triangular parts. template<typename Kernel, unsigned int Mode, bool SetOpposite> struct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite> { typedef typename Kernel::Scalar Scalar; EIGEN_DEVICE_FUNC static inline void run(Kernel &kernel) { for(Index j = 0; j < kernel.cols(); ++j) { Index maxi = numext::mini(j, kernel.rows()); Index i = 0; if (((Mode&Lower) && SetOpposite) || (Mode&Upper)) { for(; i < maxi; ++i) if(Mode&Upper) kernel.assignCoeff(i, j); else kernel.assignOppositeCoeff(i, j); } else i = maxi; if(i<kernel.rows()) // then i==j kernel.assignDiagonalCoeff(i++); if (((Mode&Upper) && SetOpposite) || (Mode&Lower)) { for(; i < kernel.rows(); ++i) if(Mode&Lower) kernel.assignCoeff(i, j); else kernel.assignOppositeCoeff(i, j); } } } }; } // end namespace internal /** Assigns a triangular or selfadjoint matrix to a dense matrix. * If the matrix is triangular, the opposite part is set to zero. */ template<typename Derived> template<typename DenseDerived> void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const { other.derived().resize(this->rows(), this->cols()); internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression()); } namespace internal { // Triangular = Product template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular> { typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); dst._assignProduct(src, 1, 0); } }; // Triangular += Product template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular> { typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &) { dst._assignProduct(src, 1, 1); } }; // Triangular -= Product template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> struct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular> { typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType; static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &) { dst._assignProduct(src, -1, 1); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_TRIANGULARMATRIX_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/BooleanRedux.h
.h
4,249
165
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALLANDANY_H #define EIGEN_ALLANDANY_H namespace Eigen { namespace internal { template<typename Derived, int UnrollCount> struct all_unroller { typedef typename Derived::ExpressionTraits Traits; enum { col = (UnrollCount-1) / Traits::RowsAtCompileTime, row = (UnrollCount-1) % Traits::RowsAtCompileTime }; static inline bool run(const Derived &mat) { return all_unroller<Derived, UnrollCount-1>::run(mat) && mat.coeff(row, col); } }; template<typename Derived> struct all_unroller<Derived, 0> { static inline bool run(const Derived &/*mat*/) { return true; } }; template<typename Derived> struct all_unroller<Derived, Dynamic> { static inline bool run(const Derived &) { return false; } }; template<typename Derived, int UnrollCount> struct any_unroller { typedef typename Derived::ExpressionTraits Traits; enum { col = (UnrollCount-1) / Traits::RowsAtCompileTime, row = (UnrollCount-1) % Traits::RowsAtCompileTime }; static inline bool run(const Derived &mat) { return any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col); } }; template<typename Derived> struct any_unroller<Derived, 0> { static inline bool run(const Derived & /*mat*/) { return false; } }; template<typename Derived> struct any_unroller<Derived, Dynamic> { static inline bool run(const Derived &) { return false; } }; } // end namespace internal /** \returns true if all coefficients are true * * Example: \include MatrixBase_all.cpp * Output: \verbinclude MatrixBase_all.out * * \sa any(), Cwise::operator<() */ template<typename Derived> inline bool DenseBase<Derived>::all() const { typedef internal::evaluator<Derived> Evaluator; enum { unroll = SizeAtCompileTime != Dynamic && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT }; Evaluator evaluator(derived()); if(unroll) return internal::all_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (!evaluator.coeff(i, j)) return false; return true; } } /** \returns true if at least one coefficient is true * * \sa all() */ template<typename Derived> inline bool DenseBase<Derived>::any() const { typedef internal::evaluator<Derived> Evaluator; enum { unroll = SizeAtCompileTime != Dynamic && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT }; Evaluator evaluator(derived()); if(unroll) return internal::any_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (evaluator.coeff(i, j)) return true; return false; } } /** \returns the number of coefficients which evaluate to true * * \sa all(), any() */ template<typename Derived> inline Eigen::Index DenseBase<Derived>::count() const { return derived().template cast<bool>().template cast<Index>().sum(); } /** \returns true is \c *this contains at least one Not A Number (NaN). * * \sa allFinite() */ template<typename Derived> inline bool DenseBase<Derived>::hasNaN() const { #if EIGEN_COMP_MSVC || (defined __FAST_MATH__) return derived().array().isNaN().any(); #else return !((derived().array()==derived().array()).all()); #endif } /** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values. * * \sa hasNaN() */ template<typename Derived> inline bool DenseBase<Derived>::allFinite() const { #if EIGEN_COMP_MSVC || (defined __FAST_MATH__) return derived().array().isFinite().all(); #else return !((derived()-derived()).hasNaN()); #endif } } // end namespace Eigen #endif // EIGEN_ALLANDANY_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Product.h
.h
7,235
187
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PRODUCT_H #define EIGEN_PRODUCT_H namespace Eigen { template<typename Lhs, typename Rhs, int Option, typename StorageKind> class ProductImpl; namespace internal { template<typename Lhs, typename Rhs, int Option> struct traits<Product<Lhs, Rhs, Option> > { typedef typename remove_all<Lhs>::type LhsCleaned; typedef typename remove_all<Rhs>::type RhsCleaned; typedef traits<LhsCleaned> LhsTraits; typedef traits<RhsCleaned> RhsTraits; typedef MatrixXpr XprKind; typedef typename ScalarBinaryOpTraits<typename traits<LhsCleaned>::Scalar, typename traits<RhsCleaned>::Scalar>::ReturnType Scalar; typedef typename product_promote_storage_type<typename LhsTraits::StorageKind, typename RhsTraits::StorageKind, internal::product_type<Lhs,Rhs>::ret>::ret StorageKind; typedef typename promote_index_type<typename LhsTraits::StorageIndex, typename RhsTraits::StorageIndex>::type StorageIndex; enum { RowsAtCompileTime = LhsTraits::RowsAtCompileTime, ColsAtCompileTime = RhsTraits::ColsAtCompileTime, MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime, MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime, // FIXME: only needed by GeneralMatrixMatrixTriangular InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime), // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator. Flags = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? RowMajorBit : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 : ( ((LhsTraits::Flags&NoPreferredStorageOrderBit) && (RhsTraits::Flags&RowMajorBit)) || ((RhsTraits::Flags&NoPreferredStorageOrderBit) && (LhsTraits::Flags&RowMajorBit)) ) ? RowMajorBit : NoPreferredStorageOrderBit }; }; } // end namespace internal /** \class Product * \ingroup Core_Module * * \brief Expression of the product of two arbitrary matrices or vectors * * \tparam _Lhs the type of the left-hand side expression * \tparam _Rhs the type of the right-hand side expression * * This class represents an expression of the product of two arbitrary matrices. * * The other template parameters are: * \tparam Option can be DefaultProduct, AliasFreeProduct, or LazyProduct * */ template<typename _Lhs, typename _Rhs, int Option> class Product : public ProductImpl<_Lhs,_Rhs,Option, typename internal::product_promote_storage_type<typename internal::traits<_Lhs>::StorageKind, typename internal::traits<_Rhs>::StorageKind, internal::product_type<_Lhs,_Rhs>::ret>::ret> { public: typedef _Lhs Lhs; typedef _Rhs Rhs; typedef typename ProductImpl< Lhs, Rhs, Option, typename internal::product_promote_storage_type<typename internal::traits<Lhs>::StorageKind, typename internal::traits<Rhs>::StorageKind, internal::product_type<Lhs,Rhs>::ret>::ret>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Product) typedef typename internal::ref_selector<Lhs>::type LhsNested; typedef typename internal::ref_selector<Rhs>::type RhsNested; typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned; typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned; EIGEN_DEVICE_FUNC Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { eigen_assert(lhs.cols() == rhs.rows() && "invalid matrix product" && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); } EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; } EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; } protected: LhsNested m_lhs; RhsNested m_rhs; }; namespace internal { template<typename Lhs, typename Rhs, int Option, int ProductTag = internal::product_type<Lhs,Rhs>::ret> class dense_product_base : public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type {}; /** Convertion to scalar for inner-products */ template<typename Lhs, typename Rhs, int Option> class dense_product_base<Lhs, Rhs, Option, InnerProduct> : public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type { typedef Product<Lhs,Rhs,Option> ProductXpr; typedef typename internal::dense_xpr_base<ProductXpr>::type Base; public: using Base::derived; typedef typename Base::Scalar Scalar; EIGEN_STRONG_INLINE operator const Scalar() const { return internal::evaluator<ProductXpr>(derived()).coeff(0,0); } }; } // namespace internal // Generic API dispatcher template<typename Lhs, typename Rhs, int Option, typename StorageKind> class ProductImpl : public internal::generic_xpr_base<Product<Lhs,Rhs,Option>, MatrixXpr, StorageKind>::type { public: typedef typename internal::generic_xpr_base<Product<Lhs,Rhs,Option>, MatrixXpr, StorageKind>::type Base; }; template<typename Lhs, typename Rhs, int Option> class ProductImpl<Lhs,Rhs,Option,Dense> : public internal::dense_product_base<Lhs,Rhs,Option> { typedef Product<Lhs, Rhs, Option> Derived; public: typedef typename internal::dense_product_base<Lhs, Rhs, Option> Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) protected: enum { IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) && (ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic), EnableCoeff = IsOneByOne || Option==LazyProduct }; public: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index row, Index col) const { EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) ); return internal::evaluator<Derived>(derived()).coeff(row,col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar coeff(Index i) const { EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS); eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) ); return internal::evaluator<Derived>(derived()).coeff(i); } }; } // end namespace Eigen #endif // EIGEN_PRODUCT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/ArrayBase.h
.h
8,237
227
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYBASE_H #define EIGEN_ARRAYBASE_H namespace Eigen { template<typename ExpressionType> class MatrixWrapper; /** \class ArrayBase * \ingroup Core_Module * * \brief Base class for all 1D and 2D array, and related expressions * * An array is similar to a dense vector or matrix. While matrices are mathematical * objects with well defined linear algebra operators, an array is just a collection * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence, * all operations applied to an array are performed coefficient wise. Furthermore, * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient * constructors allowing to easily write generic code working for both scalar values * and arrays. * * This class is the base that is inherited by all array expression types. * * \tparam Derived is the derived type, e.g., an array or an expression type. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. * * \sa class MatrixBase, \ref TopicClassHierarchy */ template<typename Derived> class ArrayBase : public DenseBase<Derived> { public: #ifndef EIGEN_PARSED_BY_DOXYGEN /** The base class for a given storage type. */ typedef ArrayBase StorageBaseType; typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef DenseBase<Derived> Base; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::operator=; using Base::operator+=; using Base::operator-=; using Base::operator*=; using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Base::PlainObject PlainObject; /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase #define EIGEN_DOC_UNARY_ADDONS(X,Y) # include "../plugins/CommonCwiseUnaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/ArrayCwiseUnaryOps.h" # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # include "../plugins/ArrayCwiseBinaryOps.h" # ifdef EIGEN_ARRAYBASE_PLUGIN # include EIGEN_ARRAYBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_UNARY_ADDONS /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ArrayBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } /** Set all the entries to \a value. * \sa DenseBase::setConstant(), DenseBase::fill() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Scalar &value) { Base::setConstant(value); return derived(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const Scalar& scalar); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const Scalar& scalar); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const ArrayBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const ArrayBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const ArrayBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const ArrayBase<OtherDerived>& other); public: EIGEN_DEVICE_FUNC ArrayBase<Derived>& array() { return *this; } EIGEN_DEVICE_FUNC const ArrayBase<Derived>& array() const { return *this; } /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array * \sa MatrixBase::array() */ EIGEN_DEVICE_FUNC MatrixWrapper<Derived> matrix() { return MatrixWrapper<Derived>(derived()); } EIGEN_DEVICE_FUNC const MatrixWrapper<const Derived> matrix() const { return MatrixWrapper<const Derived>(derived()); } // template<typename Dest> // inline void evalTo(Dest& dst) const { dst = matrix(); } protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase) private: explicit ArrayBase(Index); ArrayBase(Index,Index); template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&); protected: // mixing arrays and matrices is not legal template<typename OtherDerived> Derived& operator+=(const MatrixBase<OtherDerived>& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template<typename OtherDerived> Derived& operator-=(const MatrixBase<OtherDerived>& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; /** replaces \c *this by \c *this - \a other. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other) { call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } /** replaces \c *this by \c *this + \a other. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other) { call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } /** replaces \c *this by \c *this * \a other coefficient wise. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other) { call_assignment(derived(), other.derived(), internal::mul_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } /** replaces \c *this by \c *this / \a other coefficient wise. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other) { call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } } // end namespace Eigen #endif // EIGEN_ARRAYBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Inverse.h
.h
3,519
119
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INVERSE_H #define EIGEN_INVERSE_H namespace Eigen { template<typename XprType,typename StorageKind> class InverseImpl; namespace internal { template<typename XprType> struct traits<Inverse<XprType> > : traits<typename XprType::PlainObject> { typedef typename XprType::PlainObject PlainObject; typedef traits<PlainObject> BaseTraits; enum { Flags = BaseTraits::Flags & RowMajorBit }; }; } // end namespace internal /** \class Inverse * * \brief Expression of the inverse of another expression * * \tparam XprType the type of the expression we are taking the inverse * * This class represents an abstract expression of A.inverse() * and most of the time this is the only way it is used. * */ template<typename XprType> class Inverse : public InverseImpl<XprType,typename internal::traits<XprType>::StorageKind> { public: typedef typename XprType::StorageIndex StorageIndex; typedef typename XprType::PlainObject PlainObject; typedef typename XprType::Scalar Scalar; typedef typename internal::ref_selector<XprType>::type XprTypeNested; typedef typename internal::remove_all<XprTypeNested>::type XprTypeNestedCleaned; typedef typename internal::ref_selector<Inverse>::type Nested; typedef typename internal::remove_all<XprType>::type NestedExpression; explicit EIGEN_DEVICE_FUNC Inverse(const XprType &xpr) : m_xpr(xpr) {} EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); } EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); } EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; } protected: XprTypeNested m_xpr; }; // Generic API dispatcher template<typename XprType, typename StorageKind> class InverseImpl : public internal::generic_xpr_base<Inverse<XprType> >::type { public: typedef typename internal::generic_xpr_base<Inverse<XprType> >::type Base; typedef typename XprType::Scalar Scalar; private: Scalar coeff(Index row, Index col) const; Scalar coeff(Index i) const; }; namespace internal { /** \internal * \brief Default evaluator for Inverse expression. * * This default evaluator for Inverse expression simply evaluate the inverse into a temporary * by a call to internal::call_assignment_no_alias. * Therefore, inverse implementers only have to specialize Assignment<Dst,Inverse<...>, ...> for * there own nested expression. * * \sa class Inverse */ template<typename ArgType> struct unary_evaluator<Inverse<ArgType> > : public evaluator<typename Inverse<ArgType>::PlainObject> { typedef Inverse<ArgType> InverseType; typedef typename InverseType::PlainObject PlainObject; typedef evaluator<PlainObject> Base; enum { Flags = Base::Flags | EvalBeforeNestingBit }; unary_evaluator(const InverseType& inv_xpr) : m_result(inv_xpr.rows(), inv_xpr.cols()) { ::new (static_cast<Base*>(this)) Base(m_result); internal::call_assignment_no_alias(m_result, inv_xpr); } protected: PlainObject m_result; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_INVERSE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/SolveTriangular.h
.h
9,208
236
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SOLVETRIANGULAR_H #define EIGEN_SOLVETRIANGULAR_H namespace Eigen { namespace internal { // Forward declarations: // The following two routines are implemented in the products/TriangularSolver*.h files template<typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder> struct triangular_solve_vector; template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder, int OtherInnerStride> struct triangular_solve_matrix; // small helper struct extracting some traits on the underlying solver operation template<typename Lhs, typename Rhs, int Side> class trsolve_traits { private: enum { RhsIsVectorAtCompileTime = (Side==OnTheLeft ? Rhs::ColsAtCompileTime : Rhs::RowsAtCompileTime)==1 }; public: enum { Unrolling = (RhsIsVectorAtCompileTime && Rhs::SizeAtCompileTime != Dynamic && Rhs::SizeAtCompileTime <= 8) ? CompleteUnrolling : NoUnrolling, RhsVectors = RhsIsVectorAtCompileTime ? 1 : Dynamic }; }; template<typename Lhs, typename Rhs, int Side, // can be OnTheLeft/OnTheRight int Mode, // can be Upper/Lower | UnitDiag int Unrolling = trsolve_traits<Lhs,Rhs,Side>::Unrolling, int RhsVectors = trsolve_traits<Lhs,Rhs,Side>::RhsVectors > struct triangular_solver_selector; template<typename Lhs, typename Rhs, int Side, int Mode> struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1> { typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef blas_traits<Lhs> LhsProductTraits; typedef typename LhsProductTraits::ExtractType ActualLhsType; typedef Map<Matrix<RhsScalar,Dynamic,1>, Aligned> MappedRhs; static void run(const Lhs& lhs, Rhs& rhs) { ActualLhsType actualLhs = LhsProductTraits::extract(lhs); // FIXME find a way to allow an inner stride if packet_traits<Scalar>::size==1 bool useRhsDirectly = Rhs::InnerStrideAtCompileTime==1 || rhs.innerStride()==1; ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(), (useRhsDirectly ? rhs.data() : 0)); if(!useRhsDirectly) MappedRhs(actualRhs,rhs.size()) = rhs; triangular_solve_vector<LhsScalar, RhsScalar, Index, Side, Mode, LhsProductTraits::NeedToConjugate, (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor> ::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs); if(!useRhsDirectly) rhs = MappedRhs(actualRhs, rhs.size()); } }; // the rhs is a matrix template<typename Lhs, typename Rhs, int Side, int Mode> struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic> { typedef typename Rhs::Scalar Scalar; typedef blas_traits<Lhs> LhsProductTraits; typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType; static void run(const Lhs& lhs, Rhs& rhs) { typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsProductTraits::extract(lhs); const Index size = lhs.rows(); const Index othersize = Side==OnTheLeft? rhs.cols() : rhs.rows(); typedef internal::gemm_blocking_space<(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar, Rhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxRowsAtCompileTime,4> BlockingType; BlockingType blocking(rhs.rows(), rhs.cols(), size, 1, false); triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor, (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor, Rhs::InnerStrideAtCompileTime> ::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.innerStride(), rhs.outerStride(), blocking); } }; /*************************************************************************** * meta-unrolling implementation ***************************************************************************/ template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size, bool Stop = LoopIndex==Size> struct triangular_solver_unroller; template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size> struct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,false> { enum { IsLower = ((Mode&Lower)==Lower), DiagIndex = IsLower ? LoopIndex : Size - LoopIndex - 1, StartIndex = IsLower ? 0 : DiagIndex+1 }; static void run(const Lhs& lhs, Rhs& rhs) { if (LoopIndex>0) rhs.coeffRef(DiagIndex) -= lhs.row(DiagIndex).template segment<LoopIndex>(StartIndex).transpose() .cwiseProduct(rhs.template segment<LoopIndex>(StartIndex)).sum(); if(!(Mode & UnitDiag)) rhs.coeffRef(DiagIndex) /= lhs.coeff(DiagIndex,DiagIndex); triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex+1,Size>::run(lhs,rhs); } }; template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size> struct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,true> { static void run(const Lhs&, Rhs&) {} }; template<typename Lhs, typename Rhs, int Mode> struct triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,CompleteUnrolling,1> { static void run(const Lhs& lhs, Rhs& rhs) { triangular_solver_unroller<Lhs,Rhs,Mode,0,Rhs::SizeAtCompileTime>::run(lhs,rhs); } }; template<typename Lhs, typename Rhs, int Mode> struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> { static void run(const Lhs& lhs, Rhs& rhs) { Transpose<const Lhs> trLhs(lhs); Transpose<Rhs> trRhs(rhs); triangular_solver_unroller<Transpose<const Lhs>,Transpose<Rhs>, ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag), 0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs); } }; } // end namespace internal /*************************************************************************** * TriangularView methods ***************************************************************************/ #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename MatrixType, unsigned int Mode> template<int Side, typename OtherDerived> void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<OtherDerived>& _other) const { OtherDerived& other = _other.const_cast_derived(); eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) ); eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower))); // If solving for a 0x0 matrix, nothing to do, simply return. if (derived().cols() == 0) return; enum { copy = (internal::traits<OtherDerived>::Flags & RowMajorBit) && OtherDerived::IsVectorAtCompileTime && OtherDerived::SizeAtCompileTime!=1}; typedef typename internal::conditional<copy, typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy; OtherCopy otherCopy(other); internal::triangular_solver_selector<MatrixType, typename internal::remove_reference<OtherCopy>::type, Side, Mode>::run(derived().nestedExpression(), otherCopy); if (copy) other = otherCopy; } template<typename Derived, unsigned int Mode> template<int Side, typename Other> const internal::triangular_solve_retval<Side,TriangularView<Derived,Mode>,Other> TriangularViewImpl<Derived,Mode,Dense>::solve(const MatrixBase<Other>& other) const { return internal::triangular_solve_retval<Side,TriangularViewType,Other>(derived(), other.derived()); } #endif namespace internal { template<int Side, typename TriangularType, typename Rhs> struct traits<triangular_solve_retval<Side, TriangularType, Rhs> > { typedef typename internal::plain_matrix_type_column_major<Rhs>::type ReturnType; }; template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval : public ReturnByValue<triangular_solve_retval<Side, TriangularType, Rhs> > { typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned; typedef ReturnByValue<triangular_solve_retval> Base; triangular_solve_retval(const TriangularType& tri, const Rhs& rhs) : m_triangularMatrix(tri), m_rhs(rhs) {} inline Index rows() const { return m_rhs.rows(); } inline Index cols() const { return m_rhs.cols(); } template<typename Dest> inline void evalTo(Dest& dst) const { if(!is_same_dense(dst,m_rhs)) dst = m_rhs; m_triangularMatrix.template solveInPlace<Side>(dst); } protected: const TriangularType& m_triangularMatrix; typename Rhs::Nested m_rhs; }; } // namespace internal } // end namespace Eigen #endif // EIGEN_SOLVETRIANGULAR_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/SolverBase.h
.h
4,365
131
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SOLVERBASE_H #define EIGEN_SOLVERBASE_H namespace Eigen { namespace internal { } // end namespace internal /** \class SolverBase * \brief A base class for matrix decomposition and solvers * * \tparam Derived the actual type of the decomposition/solver. * * Any matrix decomposition inheriting this base class provide the following API: * * \code * MatrixType A, b, x; * DecompositionType dec(A); * x = dec.solve(b); // solve A * x = b * x = dec.transpose().solve(b); // solve A^T * x = b * x = dec.adjoint().solve(b); // solve A' * x = b * \endcode * * \warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation errors. * * \sa class PartialPivLU, class FullPivLU */ template<typename Derived> class SolverBase : public EigenBase<Derived> { public: typedef EigenBase<Derived> Base; typedef typename internal::traits<Derived>::Scalar Scalar; typedef Scalar CoeffReturnType; enum { RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime, ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime, SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime>::ret), MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime, MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>::ret), IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1 || internal::traits<Derived>::MaxColsAtCompileTime == 1 }; /** Default constructor */ SolverBase() {} ~SolverBase() {} using Base::derived; /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A. */ template<typename Rhs> inline const Solve<Derived, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); return Solve<Derived, Rhs>(derived(), b.derived()); } /** \internal the return type of transpose() */ typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType; /** \returns an expression of the transposed of the factored matrix. * * A typical usage is to solve for the transposed problem A^T x = b: * \code x = dec.transpose().solve(b); \endcode * * \sa adjoint(), solve() */ inline ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); } /** \internal the return type of adjoint() */ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex, CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>, ConstTransposeReturnType >::type AdjointReturnType; /** \returns an expression of the adjoint of the factored matrix * * A typical usage is to solve for the adjoint problem A' x = b: * \code x = dec.adjoint().solve(b); \endcode * * For real scalar types, this function is equivalent to transpose(). * * \sa transpose(), solve() */ inline AdjointReturnType adjoint() const { return AdjointReturnType(derived().transpose()); } protected: }; namespace internal { template<typename Derived> struct generic_xpr_base<Derived, MatrixXpr, SolverStorage> { typedef SolverBase<Derived> type; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SOLVERBASE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/ForceAlignedAccess.h
.h
4,769
147
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FORCEALIGNEDACCESS_H #define EIGEN_FORCEALIGNEDACCESS_H namespace Eigen { /** \class ForceAlignedAccess * \ingroup Core_Module * * \brief Enforce aligned packet loads and stores regardless of what is requested * * \param ExpressionType the type of the object of which we are forcing aligned packet access * * This class is the return type of MatrixBase::forceAlignedAccess() * and most of the time this is the only way it is used. * * \sa MatrixBase::forceAlignedAccess() */ namespace internal { template<typename ExpressionType> struct traits<ForceAlignedAccess<ExpressionType> > : public traits<ExpressionType> {}; } template<typename ExpressionType> class ForceAlignedAccess : public internal::dense_xpr_base< ForceAlignedAccess<ExpressionType> >::type { public: typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess) EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const { return m_expression.coeff(row, col); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col) { return m_expression.const_cast_derived().coeffRef(row, col); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_expression.coeff(index); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { return m_expression.const_cast_derived().coeffRef(index); } template<int LoadMode> inline const PacketScalar packet(Index row, Index col) const { return m_expression.template packet<Aligned>(row, col); } template<int LoadMode> inline void writePacket(Index row, Index col, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x); } template<int LoadMode> inline const PacketScalar packet(Index index) const { return m_expression.template packet<Aligned>(index); } template<int LoadMode> inline void writePacket(Index index, const PacketScalar& x) { m_expression.const_cast_derived().template writePacket<Aligned>(index, x); } EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; } protected: const ExpressionType& m_expression; private: ForceAlignedAccess& operator=(const ForceAlignedAccess&); }; /** \returns an expression of *this with forced aligned access * \sa forceAlignedAccessIf(),class ForceAlignedAccess */ template<typename Derived> inline const ForceAlignedAccess<Derived> MatrixBase<Derived>::forceAlignedAccess() const { return ForceAlignedAccess<Derived>(derived()); } /** \returns an expression of *this with forced aligned access * \sa forceAlignedAccessIf(), class ForceAlignedAccess */ template<typename Derived> inline ForceAlignedAccess<Derived> MatrixBase<Derived>::forceAlignedAccess() { return ForceAlignedAccess<Derived>(derived()); } /** \returns an expression of *this with forced aligned access if \a Enable is true. * \sa forceAlignedAccess(), class ForceAlignedAccess */ template<typename Derived> template<bool Enable> inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type MatrixBase<Derived>::forceAlignedAccessIf() const { return derived(); // FIXME This should not work but apparently is never used } /** \returns an expression of *this with forced aligned access if \a Enable is true. * \sa forceAlignedAccess(), class ForceAlignedAccess */ template<typename Derived> template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type MatrixBase<Derived>::forceAlignedAccessIf() { return derived(); // FIXME This should not work but apparently is never used } } // end namespace Eigen #endif // EIGEN_FORCEALIGNEDACCESS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/CoreIterators.h
.h
4,525
128
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COREITERATORS_H #define EIGEN_COREITERATORS_H namespace Eigen { /* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core */ namespace internal { template<typename XprType, typename EvaluatorKind> class inner_iterator_selector; } /** \class InnerIterator * \brief An InnerIterator allows to loop over the element of any matrix expression. * * \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is constructed. * * TODO: add a usage example */ template<typename XprType> class InnerIterator { protected: typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType; typedef internal::evaluator<XprType> EvaluatorType; typedef typename internal::traits<XprType>::Scalar Scalar; public: /** Construct an iterator over the \a outerId -th row or column of \a xpr */ InnerIterator(const XprType &xpr, const Index &outerId) : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize()) {} /// \returns the value of the current coefficient. EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); } /** Increment the iterator \c *this to the next non-zero coefficient. * Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView */ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; } /// \returns the column or row index of the current coefficient. EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } /// \returns the row index of the current coefficient. EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } /// \returns the column index of the current coefficient. EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } /// \returns \c true if the iterator \c *this still references a valid coefficient. EIGEN_STRONG_INLINE operator bool() const { return m_iter; } protected: EvaluatorType m_eval; IteratorType m_iter; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix<double,RowMajor> A; // SparseMatrix<double>::InnerIterator it(A,0); template<typename T> InnerIterator(const EigenBase<T>&,Index outer); }; namespace internal { // Generic inner iterator implementation for dense objects template<typename XprType> class inner_iterator_selector<XprType, IndexBased> { protected: typedef evaluator<XprType> EvaluatorType; typedef typename traits<XprType>::Scalar Scalar; enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize) : m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize) {} EIGEN_STRONG_INLINE Scalar value() const { return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner) : m_eval.coeff(m_inner, m_outer); } EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; } EIGEN_STRONG_INLINE Index index() const { return m_inner; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const EvaluatorType& m_eval; Index m_inner; const Index m_outer; const Index m_end; }; // For iterator-based evaluator, inner-iterator is already implemented as // evaluator<>::InnerIterator template<typename XprType> class inner_iterator_selector<XprType, IteratorBased> : public evaluator<XprType>::InnerIterator { protected: typedef typename evaluator<XprType>::InnerIterator Base; typedef evaluator<XprType> EvaluatorType; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/) : Base(eval, outerId) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_COREITERATORS_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Replicate.h
.h
5,595
143
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REPLICATE_H #define EIGEN_REPLICATE_H namespace Eigen { namespace internal { template<typename MatrixType,int RowFactor,int ColFactor> struct traits<Replicate<MatrixType,RowFactor,ColFactor> > : traits<MatrixType> { typedef typename MatrixType::Scalar Scalar; typedef typename traits<MatrixType>::StorageKind StorageKind; typedef typename traits<MatrixType>::XprKind XprKind; typedef typename ref_selector<MatrixType>::type MatrixTypeNested; typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; enum { RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic ? Dynamic : RowFactor * MatrixType::RowsAtCompileTime, ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic ? Dynamic : ColFactor * MatrixType::ColsAtCompileTime, //FIXME we don't propagate the max sizes !!! MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1 : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0 : (MatrixType::Flags & RowMajorBit) ? 1 : 0, // FIXME enable DirectAccess with negative strides? Flags = IsRowMajor ? RowMajorBit : 0 }; }; } /** * \class Replicate * \ingroup Core_Module * * \brief Expression of the multiple replication of a matrix or vector * * \tparam MatrixType the type of the object we are replicating * \tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic. * \tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic. * * This class represents an expression of the multiple replication of a matrix or vector. * It is the return type of DenseBase::replicate() and most of the time * this is the only way it is used. * * \sa DenseBase::replicate() */ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate : public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type { typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested; typedef typename internal::traits<Replicate>::_MatrixTypeNested _MatrixTypeNested; public: typedef typename internal::dense_xpr_base<Replicate>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) typedef typename internal::remove_all<MatrixType>::type NestedExpression; template<typename OriginalMatrixType> EIGEN_DEVICE_FUNC inline explicit Replicate(const OriginalMatrixType& matrix) : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) { EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic); } template<typename OriginalMatrixType> EIGEN_DEVICE_FUNC inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor) : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) { EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value), THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) } EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } EIGEN_DEVICE_FUNC const _MatrixTypeNested& nestedExpression() const { return m_matrix; } protected: MatrixTypeNested m_matrix; const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor; const internal::variable_if_dynamic<Index, ColFactor> m_colFactor; }; /** * \return an expression of the replication of \c *this * * Example: \include MatrixBase_replicate.cpp * Output: \verbinclude MatrixBase_replicate.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate */ template<typename Derived> template<int RowFactor, int ColFactor> const Replicate<Derived,RowFactor,ColFactor> DenseBase<Derived>::replicate() const { return Replicate<Derived,RowFactor,ColFactor>(derived()); } /** * \return an expression of the replication of each column (or row) of \c *this * * Example: \include DirectionWise_replicate_int.cpp * Output: \verbinclude DirectionWise_replicate_int.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate */ template<typename ExpressionType, int Direction> const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const { return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); } } // end namespace Eigen #endif // EIGEN_REPLICATE_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/Select.h
.h
6,020
163
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SELECT_H #define EIGEN_SELECT_H namespace Eigen { /** \class Select * \ingroup Core_Module * * \brief Expression of a coefficient wise version of the C++ ternary operator ?: * * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix * \param ThenMatrixType the type of the \em then expression * \param ElseMatrixType the type of the \em else expression * * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. * It is the return type of DenseBase::select() and most of the time this is the only way it is used. * * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const */ namespace internal { template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > : traits<ThenMatrixType> { typedef typename traits<ThenMatrixType>::Scalar Scalar; typedef Dense StorageKind; typedef typename traits<ThenMatrixType>::XprKind XprKind; typedef typename ConditionMatrixType::Nested ConditionMatrixNested; typedef typename ThenMatrixType::Nested ThenMatrixNested; typedef typename ElseMatrixType::Nested ElseMatrixNested; enum { RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit }; }; } template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select : public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type, internal::no_assignment_operator { public: typedef typename internal::dense_xpr_base<Select>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Select) inline EIGEN_DEVICE_FUNC Select(const ConditionMatrixType& a_conditionMatrix, const ThenMatrixType& a_thenMatrix, const ElseMatrixType& a_elseMatrix) : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix) { eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows()); eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols()); } inline EIGEN_DEVICE_FUNC Index rows() const { return m_condition.rows(); } inline EIGEN_DEVICE_FUNC Index cols() const { return m_condition.cols(); } inline EIGEN_DEVICE_FUNC const Scalar coeff(Index i, Index j) const { if (m_condition.coeff(i,j)) return m_then.coeff(i,j); else return m_else.coeff(i,j); } inline EIGEN_DEVICE_FUNC const Scalar coeff(Index i) const { if (m_condition.coeff(i)) return m_then.coeff(i); else return m_else.coeff(i); } inline EIGEN_DEVICE_FUNC const ConditionMatrixType& conditionMatrix() const { return m_condition; } inline EIGEN_DEVICE_FUNC const ThenMatrixType& thenMatrix() const { return m_then; } inline EIGEN_DEVICE_FUNC const ElseMatrixType& elseMatrix() const { return m_else; } protected: typename ConditionMatrixType::Nested m_condition; typename ThenMatrixType::Nested m_then; typename ElseMatrixType::Nested m_else; }; /** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j) * if \c *this(i,j), and \a elseMatrix(i,j) otherwise. * * Example: \include MatrixBase_select.cpp * Output: \verbinclude MatrixBase_select.out * * \sa class Select */ template<typename Derived> template<typename ThenDerived,typename ElseDerived> inline const Select<Derived,ThenDerived,ElseDerived> DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix, const DenseBase<ElseDerived>& elseMatrix) const { return Select<Derived,ThenDerived,ElseDerived>(derived(), thenMatrix.derived(), elseMatrix.derived()); } /** Version of DenseBase::select(const DenseBase&, const DenseBase&) with * the \em else expression being a scalar value. * * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select */ template<typename Derived> template<typename ThenDerived> inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType> DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const { return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>( derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar)); } /** Version of DenseBase::select(const DenseBase&, const DenseBase&) with * the \em then expression being a scalar value. * * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select */ template<typename Derived> template<typename ElseDerived> inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived > DenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const { return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>( derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived()); } } // end namespace Eigen #endif // EIGEN_SELECT_H
Unknown
2D
JaeHyunLee94/mpm2d
external/eigen-3.3.9/Eigen/src/Core/CwiseTernaryOp.h
.h
8,256
198
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_TERNARY_OP_H #define EIGEN_CWISE_TERNARY_OP_H namespace Eigen { namespace internal { template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> struct traits<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > { // we must not inherit from traits<Arg1> since it has // the potential to cause problems with MSVC typedef typename remove_all<Arg1>::type Ancestor; typedef typename traits<Ancestor>::XprKind XprKind; enum { RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime, ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime, MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime }; // even though we require Arg1, Arg2, and Arg3 to have the same scalar type // (see CwiseTernaryOp constructor), // we still want to handle the case when the result type is different. typedef typename result_of<TernaryOp( const typename Arg1::Scalar&, const typename Arg2::Scalar&, const typename Arg3::Scalar&)>::type Scalar; typedef typename internal::traits<Arg1>::StorageKind StorageKind; typedef typename internal::traits<Arg1>::StorageIndex StorageIndex; typedef typename Arg1::Nested Arg1Nested; typedef typename Arg2::Nested Arg2Nested; typedef typename Arg3::Nested Arg3Nested; typedef typename remove_reference<Arg1Nested>::type _Arg1Nested; typedef typename remove_reference<Arg2Nested>::type _Arg2Nested; typedef typename remove_reference<Arg3Nested>::type _Arg3Nested; enum { Flags = _Arg1Nested::Flags & RowMajorBit }; }; } // end namespace internal template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind> class CwiseTernaryOpImpl; /** \class CwiseTernaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise ternary operator is * applied to two expressions * * \tparam TernaryOp template functor implementing the operator * \tparam Arg1Type the type of the first argument * \tparam Arg2Type the type of the second argument * \tparam Arg3Type the type of the third argument * * This class represents an expression where a coefficient-wise ternary * operator is applied to three expressions. * It is the return type of ternary operators, by which we mean only those * ternary operators where * all three arguments are Eigen expressions. * For example, the return type of betainc(matrix1, matrix2, matrix3) is a * CwiseTernaryOp. * * Most of the time, this is the only way that it is used, so you typically * don't have to name * CwiseTernaryOp types explicitly. * * \sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const * MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp, * class CwiseUnaryOp, class CwiseNullaryOp */ template <typename TernaryOp, typename Arg1Type, typename Arg2Type, typename Arg3Type> class CwiseTernaryOp : public CwiseTernaryOpImpl< TernaryOp, Arg1Type, Arg2Type, Arg3Type, typename internal::traits<Arg1Type>::StorageKind>, internal::no_assignment_operator { public: typedef typename internal::remove_all<Arg1Type>::type Arg1; typedef typename internal::remove_all<Arg2Type>::type Arg2; typedef typename internal::remove_all<Arg3Type>::type Arg3; typedef typename CwiseTernaryOpImpl< TernaryOp, Arg1Type, Arg2Type, Arg3Type, typename internal::traits<Arg1Type>::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp) typedef typename internal::ref_selector<Arg1Type>::type Arg1Nested; typedef typename internal::ref_selector<Arg2Type>::type Arg2Nested; typedef typename internal::ref_selector<Arg3Type>::type Arg3Nested; typedef typename internal::remove_reference<Arg1Nested>::type _Arg1Nested; typedef typename internal::remove_reference<Arg2Nested>::type _Arg2Nested; typedef typename internal::remove_reference<Arg3Nested>::type _Arg3Nested; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2, const Arg3& a3, const TernaryOp& func = TernaryOp()) : m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) { // require the sizes to match EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3) // The index types should match EIGEN_STATIC_ASSERT((internal::is_same< typename internal::traits<Arg1Type>::StorageKind, typename internal::traits<Arg2Type>::StorageKind>::value), STORAGE_KIND_MUST_MATCH) EIGEN_STATIC_ASSERT((internal::is_same< typename internal::traits<Arg1Type>::StorageKind, typename internal::traits<Arg3Type>::StorageKind>::value), STORAGE_KIND_MUST_MATCH) eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() && a1.rows() == a3.rows() && a1.cols() == a3.cols()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { // return the fixed size type if available to enable compile time // optimizations if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: RowsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg2Nested>::type>:: RowsAtCompileTime == Dynamic) return m_arg3.rows(); else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: RowsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg3Nested>::type>:: RowsAtCompileTime == Dynamic) return m_arg2.rows(); else return m_arg1.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { // return the fixed size type if available to enable compile time // optimizations if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: ColsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg2Nested>::type>:: ColsAtCompileTime == Dynamic) return m_arg3.cols(); else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: ColsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg3Nested>::type>:: ColsAtCompileTime == Dynamic) return m_arg2.cols(); else return m_arg1.cols(); } /** \returns the first argument nested expression */ EIGEN_DEVICE_FUNC const _Arg1Nested& arg1() const { return m_arg1; } /** \returns the first argument nested expression */ EIGEN_DEVICE_FUNC const _Arg2Nested& arg2() const { return m_arg2; } /** \returns the third argument nested expression */ EIGEN_DEVICE_FUNC const _Arg3Nested& arg3() const { return m_arg3; } /** \returns the functor representing the ternary operation */ EIGEN_DEVICE_FUNC const TernaryOp& functor() const { return m_functor; } protected: Arg1Nested m_arg1; Arg2Nested m_arg2; Arg3Nested m_arg3; const TernaryOp m_functor; }; // Generic API dispatcher template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind> class CwiseTernaryOpImpl : public internal::generic_xpr_base< CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type { public: typedef typename internal::generic_xpr_base< CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type Base; }; } // end namespace Eigen #endif // EIGEN_CWISE_TERNARY_OP_H
Unknown