add Eigen as a dependency
This commit is contained in:
189
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
vendored
Normal file
189
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
|
||||
/* NOTE The functions of this file have been adapted from the GMM++ library */
|
||||
|
||||
//========================================================================
|
||||
//
|
||||
// Copyright (C) 2002-2007 Yves Renard
|
||||
//
|
||||
// This file is a part of GETFEM++
|
||||
//
|
||||
// Getfem++ is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as
|
||||
// published by the Free Software Foundation; version 2.1 of the License.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
|
||||
// USA.
|
||||
//
|
||||
//========================================================================
|
||||
|
||||
#include "../../../../Eigen/src/Core/util/NonMPL2.h"
|
||||
|
||||
#ifndef EIGEN_CONSTRAINEDCG_H
|
||||
#define EIGEN_CONSTRAINEDCG_H
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \ingroup IterativeSolvers_Module
|
||||
* Compute the pseudo inverse of the non-square matrix C such that
|
||||
* \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method.
|
||||
*
|
||||
* This function is internally used by constrained_cg.
|
||||
*/
|
||||
template <typename CMatrix, typename CINVMatrix>
|
||||
void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
|
||||
{
|
||||
// optimisable : copie de la ligne, precalcul de C * trans(C).
|
||||
typedef typename CMatrix::Scalar Scalar;
|
||||
typedef typename CMatrix::Index Index;
|
||||
// FIXME use sparse vectors ?
|
||||
typedef Matrix<Scalar,Dynamic,1> TmpVec;
|
||||
|
||||
Index rows = C.rows(), cols = C.cols();
|
||||
|
||||
TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows);
|
||||
Scalar rho, rho_1, alpha;
|
||||
d.setZero();
|
||||
|
||||
typedef Triplet<double> T;
|
||||
std::vector<T> tripletList;
|
||||
|
||||
for (Index i = 0; i < rows; ++i)
|
||||
{
|
||||
d[i] = 1.0;
|
||||
rho = 1.0;
|
||||
e.setZero();
|
||||
r = d;
|
||||
p = d;
|
||||
|
||||
while (rho >= 1e-38)
|
||||
{ /* conjugate gradient to compute e */
|
||||
/* which is the i-th row of inv(C * trans(C)) */
|
||||
l = C.transpose() * p;
|
||||
q = C * l;
|
||||
alpha = rho / p.dot(q);
|
||||
e += alpha * p;
|
||||
r += -alpha * q;
|
||||
rho_1 = rho;
|
||||
rho = r.dot(r);
|
||||
p = (rho/rho_1) * p + r;
|
||||
}
|
||||
|
||||
l = C.transpose() * e; // l is the i-th row of CINV
|
||||
// FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse
|
||||
for (Index j=0; j<l.size(); ++j)
|
||||
if (l[j]<1e-15)
|
||||
tripletList.push_back(T(i,j,l(j)));
|
||||
|
||||
|
||||
d[i] = 0.0;
|
||||
}
|
||||
CINV.setFromTriplets(tripletList.begin(), tripletList.end());
|
||||
}
|
||||
|
||||
|
||||
|
||||
/** \ingroup IterativeSolvers_Module
|
||||
* Constrained conjugate gradient
|
||||
*
|
||||
* Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$
|
||||
*/
|
||||
template<typename TMatrix, typename CMatrix,
|
||||
typename VectorX, typename VectorB, typename VectorF>
|
||||
void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
|
||||
const VectorB& b, const VectorF& f, IterationController &iter)
|
||||
{
|
||||
using std::sqrt;
|
||||
typedef typename TMatrix::Scalar Scalar;
|
||||
typedef typename TMatrix::Index Index;
|
||||
typedef Matrix<Scalar,Dynamic,1> TmpVec;
|
||||
|
||||
Scalar rho = 1.0, rho_1, lambda, gamma;
|
||||
Index xSize = x.size();
|
||||
TmpVec p(xSize), q(xSize), q2(xSize),
|
||||
r(xSize), old_z(xSize), z(xSize),
|
||||
memox(xSize);
|
||||
std::vector<bool> satured(C.rows());
|
||||
p.setZero();
|
||||
iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b)
|
||||
if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0);
|
||||
|
||||
SparseMatrix<Scalar,RowMajor> CINV(C.rows(), C.cols());
|
||||
pseudo_inverse(C, CINV);
|
||||
|
||||
while(true)
|
||||
{
|
||||
// computation of residual
|
||||
old_z = z;
|
||||
memox = x;
|
||||
r = b;
|
||||
r += A * -x;
|
||||
z = r;
|
||||
bool transition = false;
|
||||
for (Index i = 0; i < C.rows(); ++i)
|
||||
{
|
||||
Scalar al = C.row(i).dot(x) - f.coeff(i);
|
||||
if (al >= -1.0E-15)
|
||||
{
|
||||
if (!satured[i])
|
||||
{
|
||||
satured[i] = true;
|
||||
transition = true;
|
||||
}
|
||||
Scalar bb = CINV.row(i).dot(z);
|
||||
if (bb > 0.0)
|
||||
// FIXME: we should allow that: z += -bb * C.row(i);
|
||||
for (typename CMatrix::InnerIterator it(C,i); it; ++it)
|
||||
z.coeffRef(it.index()) -= bb*it.value();
|
||||
}
|
||||
else
|
||||
satured[i] = false;
|
||||
}
|
||||
|
||||
// descent direction
|
||||
rho_1 = rho;
|
||||
rho = r.dot(z);
|
||||
|
||||
if (iter.finished(rho)) break;
|
||||
|
||||
if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n";
|
||||
if (transition || iter.first()) gamma = 0.0;
|
||||
else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1);
|
||||
p = z + gamma*p;
|
||||
|
||||
++iter;
|
||||
// one dimensionnal optimization
|
||||
q = A * p;
|
||||
lambda = rho / q.dot(p);
|
||||
for (Index i = 0; i < C.rows(); ++i)
|
||||
{
|
||||
if (!satured[i])
|
||||
{
|
||||
Scalar bb = C.row(i).dot(p) - f[i];
|
||||
if (bb > 0.0)
|
||||
lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
|
||||
}
|
||||
}
|
||||
x += lambda * p;
|
||||
memox -= x;
|
||||
}
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CONSTRAINEDCG_H
|
||||
510
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
vendored
Normal file
510
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
vendored
Normal file
@@ -0,0 +1,510 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_DGMRES_H
|
||||
#define EIGEN_DGMRES_H
|
||||
|
||||
#include <Eigen/Eigenvalues>
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template< typename _MatrixType,
|
||||
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
|
||||
class DGMRES;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
struct traits<DGMRES<_MatrixType,_Preconditioner> >
|
||||
{
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
};
|
||||
|
||||
/** \brief Computes a permutation vector to have a sorted sequence
|
||||
* \param vec The vector to reorder.
|
||||
* \param perm gives the sorted sequence on output. Must be initialized with 0..n-1
|
||||
* \param ncut Put the ncut smallest elements at the end of the vector
|
||||
* WARNING This is an expensive sort, so should be used only
|
||||
* for small size vectors
|
||||
* TODO Use modified QuickSplit or std::nth_element to get the smallest values
|
||||
*/
|
||||
template <typename VectorType, typename IndexType>
|
||||
void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut)
|
||||
{
|
||||
eigen_assert(vec.size() == perm.size());
|
||||
bool flag;
|
||||
for (Index k = 0; k < ncut; k++)
|
||||
{
|
||||
flag = false;
|
||||
for (Index j = 0; j < vec.size()-1; j++)
|
||||
{
|
||||
if ( vec(perm(j)) < vec(perm(j+1)) )
|
||||
{
|
||||
std::swap(perm(j),perm(j+1));
|
||||
flag = true;
|
||||
}
|
||||
if (!flag) break; // The vector is in sorted order
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
/**
|
||||
* \ingroup IterativeLInearSolvers_Module
|
||||
* \brief A Restarted GMRES with deflation.
|
||||
* This class implements a modification of the GMRES solver for
|
||||
* sparse linear systems. The basis is built with modified
|
||||
* Gram-Schmidt. At each restart, a few approximated eigenvectors
|
||||
* corresponding to the smallest eigenvalues are used to build a
|
||||
* preconditioner for the next cycle. This preconditioner
|
||||
* for deflation can be combined with any other preconditioner,
|
||||
* the IncompleteLUT for instance. The preconditioner is applied
|
||||
* at right of the matrix and the combination is multiplicative.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
* Typical usage :
|
||||
* \code
|
||||
* SparseMatrix<double> A;
|
||||
* VectorXd x, b;
|
||||
* //Fill A and b ...
|
||||
* DGMRES<SparseMatrix<double> > solver;
|
||||
* solver.set_restart(30); // Set restarting value
|
||||
* solver.setEigenv(1); // Set the number of eigenvalues to deflate
|
||||
* solver.compute(A);
|
||||
* x = solver.solve(b);
|
||||
* \endcode
|
||||
*
|
||||
* DGMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||
*
|
||||
* References :
|
||||
* [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid
|
||||
* Algebraic Solvers for Linear Systems Arising from Compressible
|
||||
* Flows, Computers and Fluids, In Press,
|
||||
* http://dx.doi.org/10.1016/j.compfluid.2012.03.023
|
||||
* [2] K. Burrage and J. Erhel, On the performance of various
|
||||
* adaptive preconditioned GMRES strategies, 5(1998), 101-121.
|
||||
* [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES
|
||||
* preconditioned by deflation,J. Computational and Applied
|
||||
* Mathematics, 69(1996), 303-318.
|
||||
|
||||
*
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
|
||||
{
|
||||
typedef IterativeSolverBase<DGMRES> Base;
|
||||
using Base::matrix;
|
||||
using Base::m_error;
|
||||
using Base::m_iterations;
|
||||
using Base::m_info;
|
||||
using Base::m_isInitialized;
|
||||
using Base::m_tolerance;
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
|
||||
typedef Matrix<RealScalar,Dynamic,Dynamic> DenseRealMatrix;
|
||||
typedef Matrix<Scalar,Dynamic,1> DenseVector;
|
||||
typedef Matrix<RealScalar,Dynamic,1> DenseRealVector;
|
||||
typedef Matrix<std::complex<RealScalar>, Dynamic, 1> ComplexVector;
|
||||
|
||||
|
||||
/** Default constructor. */
|
||||
DGMRES() : Base(),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {}
|
||||
|
||||
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
||||
*
|
||||
* This constructor is a shortcut for the default constructor followed
|
||||
* by a call to compute().
|
||||
*
|
||||
* \warning this class stores a reference to the matrix A as well as some
|
||||
* precomputed values that depend on it. Therefore, if \a A is changed
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
template<typename MatrixDerived>
|
||||
explicit DGMRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {}
|
||||
|
||||
~DGMRES() {}
|
||||
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
|
||||
{
|
||||
bool failed = false;
|
||||
for(Index j=0; j<b.cols(); ++j)
|
||||
{
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
typename Dest::ColXpr xj(x,j);
|
||||
dgmres(matrix(), b.col(j), xj, Base::m_preconditioner);
|
||||
}
|
||||
m_info = failed ? NumericalIssue
|
||||
: m_error <= Base::m_tolerance ? Success
|
||||
: NoConvergence;
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_impl(const Rhs& b, MatrixBase<Dest>& x) const
|
||||
{
|
||||
x = b;
|
||||
_solve_with_guess_impl(b,x.derived());
|
||||
}
|
||||
/**
|
||||
* Get the restart value
|
||||
*/
|
||||
Index restart() { return m_restart; }
|
||||
|
||||
/**
|
||||
* Set the restart value (default is 30)
|
||||
*/
|
||||
void set_restart(const Index restart) { m_restart=restart; }
|
||||
|
||||
/**
|
||||
* Set the number of eigenvalues to deflate at each restart
|
||||
*/
|
||||
void setEigenv(const Index neig)
|
||||
{
|
||||
m_neig = neig;
|
||||
if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the size of the deflation subspace size
|
||||
*/
|
||||
Index deflSize() {return m_r; }
|
||||
|
||||
/**
|
||||
* Set the maximum size of the deflation subspace
|
||||
*/
|
||||
void setMaxEigenv(const Index maxNeig) { m_maxNeig = maxNeig; }
|
||||
|
||||
protected:
|
||||
// DGMRES algorithm
|
||||
template<typename Rhs, typename Dest>
|
||||
void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const;
|
||||
// Perform one cycle of GMRES
|
||||
template<typename Dest>
|
||||
Index dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const;
|
||||
// Compute data to use for deflation
|
||||
Index dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const;
|
||||
// Apply deflation to a vector
|
||||
template<typename RhsType, typename DestType>
|
||||
Index dgmresApplyDeflation(const RhsType& In, DestType& Out) const;
|
||||
ComplexVector schurValues(const ComplexSchur<DenseMatrix>& schurofH) const;
|
||||
ComplexVector schurValues(const RealSchur<DenseMatrix>& schurofH) const;
|
||||
// Init data for deflation
|
||||
void dgmresInitDeflation(Index& rows) const;
|
||||
mutable DenseMatrix m_V; // Krylov basis vectors
|
||||
mutable DenseMatrix m_H; // Hessenberg matrix
|
||||
mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied
|
||||
mutable Index m_restart; // Maximum size of the Krylov subspace
|
||||
mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace
|
||||
mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
|
||||
mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */
|
||||
mutable PartialPivLU<DenseMatrix> m_luT; // LU factorization of m_T
|
||||
mutable StorageIndex m_neig; //Number of eigenvalues to extract at each restart
|
||||
mutable Index m_r; // Current number of deflated eigenvalues, size of m_U
|
||||
mutable Index m_maxNeig; // Maximum number of eigenvalues to deflate
|
||||
mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A
|
||||
mutable bool m_isDeflAllocated;
|
||||
mutable bool m_isDeflInitialized;
|
||||
|
||||
//Adaptive strategy
|
||||
mutable RealScalar m_smv; // Smaller multiple of the remaining number of steps allowed
|
||||
mutable bool m_force; // Force the use of deflation at each restart
|
||||
|
||||
};
|
||||
/**
|
||||
* \brief Perform several cycles of restarted GMRES with modified Gram Schmidt,
|
||||
*
|
||||
* A right preconditioner is used combined with deflation.
|
||||
*
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
template<typename Rhs, typename Dest>
|
||||
void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x,
|
||||
const Preconditioner& precond) const
|
||||
{
|
||||
//Initialization
|
||||
Index n = mat.rows();
|
||||
DenseVector r0(n);
|
||||
Index nbIts = 0;
|
||||
m_H.resize(m_restart+1, m_restart);
|
||||
m_Hes.resize(m_restart, m_restart);
|
||||
m_V.resize(n,m_restart+1);
|
||||
//Initial residual vector and intial norm
|
||||
x = precond.solve(x);
|
||||
r0 = rhs - mat * x;
|
||||
RealScalar beta = r0.norm();
|
||||
RealScalar normRhs = rhs.norm();
|
||||
m_error = beta/normRhs;
|
||||
if(m_error < m_tolerance)
|
||||
m_info = Success;
|
||||
else
|
||||
m_info = NoConvergence;
|
||||
|
||||
// Iterative process
|
||||
while (nbIts < m_iterations && m_info == NoConvergence)
|
||||
{
|
||||
dgmresCycle(mat, precond, x, r0, beta, normRhs, nbIts);
|
||||
|
||||
// Compute the new residual vector for the restart
|
||||
if (nbIts < m_iterations && m_info == NoConvergence)
|
||||
r0 = rhs - mat * x;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Perform one restart cycle of DGMRES
|
||||
* \param mat The coefficient matrix
|
||||
* \param precond The preconditioner
|
||||
* \param x the new approximated solution
|
||||
* \param r0 The initial residual vector
|
||||
* \param beta The norm of the residual computed so far
|
||||
* \param normRhs The norm of the right hand side vector
|
||||
* \param nbIts The number of iterations
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
template<typename Dest>
|
||||
Index DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const
|
||||
{
|
||||
//Initialization
|
||||
DenseVector g(m_restart+1); // Right hand side of the least square problem
|
||||
g.setZero();
|
||||
g(0) = Scalar(beta);
|
||||
m_V.col(0) = r0/beta;
|
||||
m_info = NoConvergence;
|
||||
std::vector<JacobiRotation<Scalar> >gr(m_restart); // Givens rotations
|
||||
Index it = 0; // Number of inner iterations
|
||||
Index n = mat.rows();
|
||||
DenseVector tv1(n), tv2(n); //Temporary vectors
|
||||
while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations)
|
||||
{
|
||||
// Apply preconditioner(s) at right
|
||||
if (m_isDeflInitialized )
|
||||
{
|
||||
dgmresApplyDeflation(m_V.col(it), tv1); // Deflation
|
||||
tv2 = precond.solve(tv1);
|
||||
}
|
||||
else
|
||||
{
|
||||
tv2 = precond.solve(m_V.col(it)); // User's selected preconditioner
|
||||
}
|
||||
tv1 = mat * tv2;
|
||||
|
||||
// Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt
|
||||
Scalar coef;
|
||||
for (Index i = 0; i <= it; ++i)
|
||||
{
|
||||
coef = tv1.dot(m_V.col(i));
|
||||
tv1 = tv1 - coef * m_V.col(i);
|
||||
m_H(i,it) = coef;
|
||||
m_Hes(i,it) = coef;
|
||||
}
|
||||
// Normalize the vector
|
||||
coef = tv1.norm();
|
||||
m_V.col(it+1) = tv1/coef;
|
||||
m_H(it+1, it) = coef;
|
||||
// m_Hes(it+1,it) = coef;
|
||||
|
||||
// FIXME Check for happy breakdown
|
||||
|
||||
// Update Hessenberg matrix with Givens rotations
|
||||
for (Index i = 1; i <= it; ++i)
|
||||
{
|
||||
m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint());
|
||||
}
|
||||
// Compute the new plane rotation
|
||||
gr[it].makeGivens(m_H(it, it), m_H(it+1,it));
|
||||
// Apply the new rotation
|
||||
m_H.col(it).applyOnTheLeft(it,it+1,gr[it].adjoint());
|
||||
g.applyOnTheLeft(it,it+1, gr[it].adjoint());
|
||||
|
||||
beta = std::abs(g(it+1));
|
||||
m_error = beta/normRhs;
|
||||
// std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl;
|
||||
it++; nbIts++;
|
||||
|
||||
if (m_error < m_tolerance)
|
||||
{
|
||||
// The method has converged
|
||||
m_info = Success;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the new coefficients by solving the least square problem
|
||||
// it++;
|
||||
//FIXME Check first if the matrix is singular ... zero diagonal
|
||||
DenseVector nrs(m_restart);
|
||||
nrs = m_H.topLeftCorner(it,it).template triangularView<Upper>().solve(g.head(it));
|
||||
|
||||
// Form the new solution
|
||||
if (m_isDeflInitialized)
|
||||
{
|
||||
tv1 = m_V.leftCols(it) * nrs;
|
||||
dgmresApplyDeflation(tv1, tv2);
|
||||
x = x + precond.solve(tv2);
|
||||
}
|
||||
else
|
||||
x = x + precond.solve(m_V.leftCols(it) * nrs);
|
||||
|
||||
// Go for a new cycle and compute data for deflation
|
||||
if(nbIts < m_iterations && m_info == NoConvergence && m_neig > 0 && (m_r+m_neig) < m_maxNeig)
|
||||
dgmresComputeDeflationData(mat, precond, it, m_neig);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const
|
||||
{
|
||||
m_U.resize(rows, m_maxNeig);
|
||||
m_MU.resize(rows, m_maxNeig);
|
||||
m_T.resize(m_maxNeig, m_maxNeig);
|
||||
m_lambdaN = 0.0;
|
||||
m_isDeflAllocated = true;
|
||||
}
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur<DenseMatrix>& schurofH) const
|
||||
{
|
||||
return schurofH.matrixT().diagonal();
|
||||
}
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur<DenseMatrix>& schurofH) const
|
||||
{
|
||||
const DenseMatrix& T = schurofH.matrixT();
|
||||
Index it = T.rows();
|
||||
ComplexVector eig(it);
|
||||
Index j = 0;
|
||||
while (j < it-1)
|
||||
{
|
||||
if (T(j+1,j) ==Scalar(0))
|
||||
{
|
||||
eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0));
|
||||
j++;
|
||||
}
|
||||
else
|
||||
{
|
||||
eig(j) = std::complex<RealScalar>(T(j,j),T(j+1,j));
|
||||
eig(j+1) = std::complex<RealScalar>(T(j,j+1),T(j+1,j+1));
|
||||
j++;
|
||||
}
|
||||
}
|
||||
if (j < it-1) eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0));
|
||||
return eig;
|
||||
}
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
Index DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const
|
||||
{
|
||||
// First, find the Schur form of the Hessenberg matrix H
|
||||
typename internal::conditional<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> >::type schurofH;
|
||||
bool computeU = true;
|
||||
DenseMatrix matrixQ(it,it);
|
||||
matrixQ.setIdentity();
|
||||
schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU);
|
||||
|
||||
ComplexVector eig(it);
|
||||
Matrix<StorageIndex,Dynamic,1>perm(it);
|
||||
eig = this->schurValues(schurofH);
|
||||
|
||||
// Reorder the absolute values of Schur values
|
||||
DenseRealVector modulEig(it);
|
||||
for (Index j=0; j<it; ++j) modulEig(j) = std::abs(eig(j));
|
||||
perm.setLinSpaced(it,0,internal::convert_index<StorageIndex>(it-1));
|
||||
internal::sortWithPermutation(modulEig, perm, neig);
|
||||
|
||||
if (!m_lambdaN)
|
||||
{
|
||||
m_lambdaN = (std::max)(modulEig.maxCoeff(), m_lambdaN);
|
||||
}
|
||||
//Count the real number of extracted eigenvalues (with complex conjugates)
|
||||
Index nbrEig = 0;
|
||||
while (nbrEig < neig)
|
||||
{
|
||||
if(eig(perm(it-nbrEig-1)).imag() == RealScalar(0)) nbrEig++;
|
||||
else nbrEig += 2;
|
||||
}
|
||||
// Extract the Schur vectors corresponding to the smallest Ritz values
|
||||
DenseMatrix Sr(it, nbrEig);
|
||||
Sr.setZero();
|
||||
for (Index j = 0; j < nbrEig; j++)
|
||||
{
|
||||
Sr.col(j) = schurofH.matrixU().col(perm(it-j-1));
|
||||
}
|
||||
|
||||
// Form the Schur vectors of the initial matrix using the Krylov basis
|
||||
DenseMatrix X;
|
||||
X = m_V.leftCols(it) * Sr;
|
||||
if (m_r)
|
||||
{
|
||||
// Orthogonalize X against m_U using modified Gram-Schmidt
|
||||
for (Index j = 0; j < nbrEig; j++)
|
||||
for (Index k =0; k < m_r; k++)
|
||||
X.col(j) = X.col(j) - (m_U.col(k).dot(X.col(j)))*m_U.col(k);
|
||||
}
|
||||
|
||||
// Compute m_MX = A * M^-1 * X
|
||||
Index m = m_V.rows();
|
||||
if (!m_isDeflAllocated)
|
||||
dgmresInitDeflation(m);
|
||||
DenseMatrix MX(m, nbrEig);
|
||||
DenseVector tv1(m);
|
||||
for (Index j = 0; j < nbrEig; j++)
|
||||
{
|
||||
tv1 = mat * X.col(j);
|
||||
MX.col(j) = precond.solve(tv1);
|
||||
}
|
||||
|
||||
//Update m_T = [U'MU U'MX; X'MU X'MX]
|
||||
m_T.block(m_r, m_r, nbrEig, nbrEig) = X.transpose() * MX;
|
||||
if(m_r)
|
||||
{
|
||||
m_T.block(0, m_r, m_r, nbrEig) = m_U.leftCols(m_r).transpose() * MX;
|
||||
m_T.block(m_r, 0, nbrEig, m_r) = X.transpose() * m_MU.leftCols(m_r);
|
||||
}
|
||||
|
||||
// Save X into m_U and m_MX in m_MU
|
||||
for (Index j = 0; j < nbrEig; j++) m_U.col(m_r+j) = X.col(j);
|
||||
for (Index j = 0; j < nbrEig; j++) m_MU.col(m_r+j) = MX.col(j);
|
||||
// Increase the size of the invariant subspace
|
||||
m_r += nbrEig;
|
||||
|
||||
// Factorize m_T into m_luT
|
||||
m_luT.compute(m_T.topLeftCorner(m_r, m_r));
|
||||
|
||||
//FIXME CHeck if the factorization was correctly done (nonsingular matrix)
|
||||
m_isDeflInitialized = true;
|
||||
return 0;
|
||||
}
|
||||
template<typename _MatrixType, typename _Preconditioner>
|
||||
template<typename RhsType, typename DestType>
|
||||
Index DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
|
||||
{
|
||||
DenseVector x1 = m_U.leftCols(m_r).transpose() * x;
|
||||
y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
#endif
|
||||
343
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/GMRES.h
vendored
Normal file
343
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/GMRES.h
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_GMRES_H
|
||||
#define EIGEN_GMRES_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/**
|
||||
* Generalized Minimal Residual Algorithm based on the
|
||||
* Arnoldi algorithm implemented with Householder reflections.
|
||||
*
|
||||
* Parameters:
|
||||
* \param mat matrix of linear system of equations
|
||||
* \param rhs right hand side vector of linear system of equations
|
||||
* \param x on input: initial guess, on output: solution
|
||||
* \param precond preconditioner used
|
||||
* \param iters on input: maximum number of iterations to perform
|
||||
* on output: number of iterations performed
|
||||
* \param restart number of iterations for a restart
|
||||
* \param tol_error on input: relative residual tolerance
|
||||
* on output: residuum achieved
|
||||
*
|
||||
* \sa IterativeMethods::bicgstab()
|
||||
*
|
||||
*
|
||||
* For references, please see:
|
||||
*
|
||||
* Saad, Y. and Schultz, M. H.
|
||||
* GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems.
|
||||
* SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869.
|
||||
*
|
||||
* Saad, Y.
|
||||
* Iterative Methods for Sparse Linear Systems.
|
||||
* Society for Industrial and Applied Mathematics, Philadelphia, 2003.
|
||||
*
|
||||
* Walker, H. F.
|
||||
* Implementations of the GMRES method.
|
||||
* Comput.Phys.Comm. 53, 1989, pp. 311 - 320.
|
||||
*
|
||||
* Walker, H. F.
|
||||
* Implementation of the GMRES Method using Householder Transformations.
|
||||
* SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163.
|
||||
*
|
||||
*/
|
||||
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond,
|
||||
Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) {
|
||||
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
||||
typedef typename Dest::RealScalar RealScalar;
|
||||
typedef typename Dest::Scalar Scalar;
|
||||
typedef Matrix < Scalar, Dynamic, 1 > VectorType;
|
||||
typedef Matrix < Scalar, Dynamic, Dynamic, ColMajor> FMatrixType;
|
||||
|
||||
RealScalar tol = tol_error;
|
||||
const Index maxIters = iters;
|
||||
iters = 0;
|
||||
|
||||
const Index m = mat.rows();
|
||||
|
||||
// residual and preconditioned residual
|
||||
VectorType p0 = rhs - mat*x;
|
||||
VectorType r0 = precond.solve(p0);
|
||||
|
||||
const RealScalar r0Norm = r0.norm();
|
||||
|
||||
// is initial guess already good enough?
|
||||
if(r0Norm == 0)
|
||||
{
|
||||
tol_error = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
// storage for Hessenberg matrix and Householder data
|
||||
FMatrixType H = FMatrixType::Zero(m, restart + 1);
|
||||
VectorType w = VectorType::Zero(restart + 1);
|
||||
VectorType tau = VectorType::Zero(restart + 1);
|
||||
|
||||
// storage for Jacobi rotations
|
||||
std::vector < JacobiRotation < Scalar > > G(restart);
|
||||
|
||||
// storage for temporaries
|
||||
VectorType t(m), v(m), workspace(m), x_new(m);
|
||||
|
||||
// generate first Householder vector
|
||||
Ref<VectorType> H0_tail = H.col(0).tail(m - 1);
|
||||
RealScalar beta;
|
||||
r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta);
|
||||
w(0) = Scalar(beta);
|
||||
|
||||
for (Index k = 1; k <= restart; ++k)
|
||||
{
|
||||
++iters;
|
||||
|
||||
v = VectorType::Unit(m, k - 1);
|
||||
|
||||
// apply Householder reflections H_{1} ... H_{k-1} to v
|
||||
// TODO: use a HouseholderSequence
|
||||
for (Index i = k - 1; i >= 0; --i) {
|
||||
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
|
||||
}
|
||||
|
||||
// apply matrix M to v: v = mat * v;
|
||||
t.noalias() = mat * v;
|
||||
v = precond.solve(t);
|
||||
|
||||
// apply Householder reflections H_{k-1} ... H_{1} to v
|
||||
// TODO: use a HouseholderSequence
|
||||
for (Index i = 0; i < k; ++i) {
|
||||
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
|
||||
}
|
||||
|
||||
if (v.tail(m - k).norm() != 0.0)
|
||||
{
|
||||
if (k <= restart)
|
||||
{
|
||||
// generate new Householder vector
|
||||
Ref<VectorType> Hk_tail = H.col(k).tail(m - k - 1);
|
||||
v.tail(m - k).makeHouseholder(Hk_tail, tau.coeffRef(k), beta);
|
||||
|
||||
// apply Householder reflection H_{k} to v
|
||||
v.tail(m - k).applyHouseholderOnTheLeft(Hk_tail, tau.coeffRef(k), workspace.data());
|
||||
}
|
||||
}
|
||||
|
||||
if (k > 1)
|
||||
{
|
||||
for (Index i = 0; i < k - 1; ++i)
|
||||
{
|
||||
// apply old Givens rotations to v
|
||||
v.applyOnTheLeft(i, i + 1, G[i].adjoint());
|
||||
}
|
||||
}
|
||||
|
||||
if (k<m && v(k) != (Scalar) 0)
|
||||
{
|
||||
// determine next Givens rotation
|
||||
G[k - 1].makeGivens(v(k - 1), v(k));
|
||||
|
||||
// apply Givens rotation to v and w
|
||||
v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
|
||||
w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
|
||||
}
|
||||
|
||||
// insert coefficients into upper matrix triangle
|
||||
H.col(k-1).head(k) = v.head(k);
|
||||
|
||||
tol_error = abs(w(k)) / r0Norm;
|
||||
bool stop = (k==m || tol_error < tol || iters == maxIters);
|
||||
|
||||
if (stop || k == restart)
|
||||
{
|
||||
// solve upper triangular system
|
||||
Ref<VectorType> y = w.head(k);
|
||||
H.topLeftCorner(k, k).template triangularView <Upper>().solveInPlace(y);
|
||||
|
||||
// use Horner-like scheme to calculate solution vector
|
||||
x_new.setZero();
|
||||
for (Index i = k - 1; i >= 0; --i)
|
||||
{
|
||||
x_new(i) += y(i);
|
||||
// apply Householder reflection H_{i} to x_new
|
||||
x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
|
||||
}
|
||||
|
||||
x += x_new;
|
||||
|
||||
if(stop)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
k=0;
|
||||
|
||||
// reset data for restart
|
||||
p0.noalias() = rhs - mat*x;
|
||||
r0 = precond.solve(p0);
|
||||
|
||||
// clear Hessenberg matrix and Householder data
|
||||
H.setZero();
|
||||
w.setZero();
|
||||
tau.setZero();
|
||||
|
||||
// generate first Householder vector
|
||||
r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta);
|
||||
w(0) = Scalar(beta);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template< typename _MatrixType,
|
||||
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
|
||||
class GMRES;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
struct traits<GMRES<_MatrixType,_Preconditioner> >
|
||||
{
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/** \ingroup IterativeLinearSolvers_Module
|
||||
* \brief A GMRES solver for sparse square problems
|
||||
*
|
||||
* This class allows to solve for A.x = b sparse linear problems using a generalized minimal
|
||||
* residual method. The vectors x and b can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
||||
* and NumTraits<Scalar>::epsilon() for the tolerance.
|
||||
*
|
||||
* This class can be used as the direct solver classes. Here is a typical usage example:
|
||||
* \code
|
||||
* int n = 10000;
|
||||
* VectorXd x(n), b(n);
|
||||
* SparseMatrix<double> A(n,n);
|
||||
* // fill A and b
|
||||
* GMRES<SparseMatrix<double> > solver(A);
|
||||
* x = solver.solve(b);
|
||||
* std::cout << "#iterations: " << solver.iterations() << std::endl;
|
||||
* std::cout << "estimated error: " << solver.error() << std::endl;
|
||||
* // update b, and solve again
|
||||
* x = solver.solve(b);
|
||||
* \endcode
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* GMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
template< typename _MatrixType, typename _Preconditioner>
|
||||
class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
|
||||
{
|
||||
typedef IterativeSolverBase<GMRES> Base;
|
||||
using Base::matrix;
|
||||
using Base::m_error;
|
||||
using Base::m_iterations;
|
||||
using Base::m_info;
|
||||
using Base::m_isInitialized;
|
||||
|
||||
private:
|
||||
Index m_restart;
|
||||
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
|
||||
public:
|
||||
|
||||
/** Default constructor. */
|
||||
GMRES() : Base(), m_restart(30) {}
|
||||
|
||||
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
||||
*
|
||||
* This constructor is a shortcut for the default constructor followed
|
||||
* by a call to compute().
|
||||
*
|
||||
* \warning this class stores a reference to the matrix A as well as some
|
||||
* precomputed values that depend on it. Therefore, if \a A is changed
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
template<typename MatrixDerived>
|
||||
explicit GMRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_restart(30) {}
|
||||
|
||||
~GMRES() {}
|
||||
|
||||
/** Get the number of iterations after that a restart is performed.
|
||||
*/
|
||||
Index get_restart() { return m_restart; }
|
||||
|
||||
/** Set the number of iterations after that a restart is performed.
|
||||
* \param restart number of iterations for a restarti, default is 30.
|
||||
*/
|
||||
void set_restart(const Index restart) { m_restart=restart; }
|
||||
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
|
||||
{
|
||||
bool failed = false;
|
||||
for(Index j=0; j<b.cols(); ++j)
|
||||
{
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
typename Dest::ColXpr xj(x,j);
|
||||
if(!internal::gmres(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
|
||||
failed = true;
|
||||
}
|
||||
m_info = failed ? NumericalIssue
|
||||
: m_error <= Base::m_tolerance ? Success
|
||||
: NoConvergence;
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const
|
||||
{
|
||||
x = b;
|
||||
if(x.squaredNorm() == 0) return; // Check Zero right hand side
|
||||
_solve_with_guess_impl(b,x.derived());
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_GMRES_H
|
||||
90
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
vendored
Normal file
90
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_INCOMPLETE_LU_H
|
||||
#define EIGEN_INCOMPLETE_LU_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template <typename _Scalar>
|
||||
class IncompleteLU : public SparseSolverBase<IncompleteLU<_Scalar> >
|
||||
{
|
||||
protected:
|
||||
typedef SparseSolverBase<IncompleteLU<_Scalar> > Base;
|
||||
using Base::m_isInitialized;
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef Matrix<Scalar,Dynamic,1> Vector;
|
||||
typedef typename Vector::Index Index;
|
||||
typedef SparseMatrix<Scalar,RowMajor> FactorType;
|
||||
|
||||
public:
|
||||
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
|
||||
|
||||
IncompleteLU() {}
|
||||
|
||||
template<typename MatrixType>
|
||||
IncompleteLU(const MatrixType& mat)
|
||||
{
|
||||
compute(mat);
|
||||
}
|
||||
|
||||
Index rows() const { return m_lu.rows(); }
|
||||
Index cols() const { return m_lu.cols(); }
|
||||
|
||||
template<typename MatrixType>
|
||||
IncompleteLU& compute(const MatrixType& mat)
|
||||
{
|
||||
m_lu = mat;
|
||||
int size = mat.cols();
|
||||
Vector diag(size);
|
||||
for(int i=0; i<size; ++i)
|
||||
{
|
||||
typename FactorType::InnerIterator k_it(m_lu,i);
|
||||
for(; k_it && k_it.index()<i; ++k_it)
|
||||
{
|
||||
int k = k_it.index();
|
||||
k_it.valueRef() /= diag(k);
|
||||
|
||||
typename FactorType::InnerIterator j_it(k_it);
|
||||
typename FactorType::InnerIterator kj_it(m_lu, k);
|
||||
while(kj_it && kj_it.index()<=k) ++kj_it;
|
||||
for(++j_it; j_it; )
|
||||
{
|
||||
if(kj_it.index()==j_it.index())
|
||||
{
|
||||
j_it.valueRef() -= k_it.value() * kj_it.value();
|
||||
++j_it;
|
||||
++kj_it;
|
||||
}
|
||||
else if(kj_it.index()<j_it.index()) ++kj_it;
|
||||
else ++j_it;
|
||||
}
|
||||
}
|
||||
if(k_it && k_it.index()==i) diag(i) = k_it.value();
|
||||
else diag(i) = 1;
|
||||
}
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename Rhs, typename Dest>
|
||||
void _solve_impl(const Rhs& b, Dest& x) const
|
||||
{
|
||||
x = m_lu.template triangularView<UnitLower>().solve(b);
|
||||
x = m_lu.template triangularView<Upper>().solve(x);
|
||||
}
|
||||
|
||||
protected:
|
||||
FactorType m_lu;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_INCOMPLETE_LU_H
|
||||
154
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/IterationController.h
vendored
Normal file
154
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/IterationController.h
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
|
||||
/* NOTE The class IterationController has been adapted from the iteration
|
||||
* class of the GMM++ and ITL libraries.
|
||||
*/
|
||||
|
||||
//=======================================================================
|
||||
// Copyright (C) 1997-2001
|
||||
// Authors: Andrew Lumsdaine <lums@osl.iu.edu>
|
||||
// Lie-Quan Lee <llee@osl.iu.edu>
|
||||
//
|
||||
// This file is part of the Iterative Template Library
|
||||
//
|
||||
// You should have received a copy of the License Agreement for the
|
||||
// Iterative Template Library along with the software; see the
|
||||
// file LICENSE.
|
||||
//
|
||||
// Permission to modify the code and to distribute modified code is
|
||||
// granted, provided the text of this NOTICE is retained, a notice that
|
||||
// the code was modified is included with the above COPYRIGHT NOTICE and
|
||||
// with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE
|
||||
// file is distributed with the modified code.
|
||||
//
|
||||
// LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED.
|
||||
// By way of example, but not limitation, Licensor MAKES NO
|
||||
// REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY
|
||||
// PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS
|
||||
// OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS
|
||||
// OR OTHER RIGHTS.
|
||||
//=======================================================================
|
||||
|
||||
//========================================================================
|
||||
//
|
||||
// Copyright (C) 2002-2007 Yves Renard
|
||||
//
|
||||
// This file is a part of GETFEM++
|
||||
//
|
||||
// Getfem++ is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as
|
||||
// published by the Free Software Foundation; version 2.1 of the License.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
|
||||
// USA.
|
||||
//
|
||||
//========================================================================
|
||||
|
||||
#include "../../../../Eigen/src/Core/util/NonMPL2.h"
|
||||
|
||||
#ifndef EIGEN_ITERATION_CONTROLLER_H
|
||||
#define EIGEN_ITERATION_CONTROLLER_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup IterativeSolvers_Module
|
||||
* \class IterationController
|
||||
*
|
||||
* \brief Controls the iterations of the iterative solvers
|
||||
*
|
||||
* This class has been adapted from the iteration class of GMM++ and ITL libraries.
|
||||
*
|
||||
*/
|
||||
class IterationController
|
||||
{
|
||||
protected :
|
||||
double m_rhsn; ///< Right hand side norm
|
||||
size_t m_maxiter; ///< Max. number of iterations
|
||||
int m_noise; ///< if noise > 0 iterations are printed
|
||||
double m_resmax; ///< maximum residual
|
||||
double m_resminreach, m_resadd;
|
||||
size_t m_nit; ///< iteration number
|
||||
double m_res; ///< last computed residual
|
||||
bool m_written;
|
||||
void (*m_callback)(const IterationController&);
|
||||
public :
|
||||
|
||||
void init()
|
||||
{
|
||||
m_nit = 0; m_res = 0.0; m_written = false;
|
||||
m_resminreach = 1E50; m_resadd = 0.0;
|
||||
m_callback = 0;
|
||||
}
|
||||
|
||||
IterationController(double r = 1.0E-8, int noi = 0, size_t mit = size_t(-1))
|
||||
: m_rhsn(1.0), m_maxiter(mit), m_noise(noi), m_resmax(r) { init(); }
|
||||
|
||||
void operator ++(int) { m_nit++; m_written = false; m_resadd += m_res; }
|
||||
void operator ++() { (*this)++; }
|
||||
|
||||
bool first() { return m_nit == 0; }
|
||||
|
||||
/* get/set the "noisyness" (verbosity) of the solvers */
|
||||
int noiseLevel() const { return m_noise; }
|
||||
void setNoiseLevel(int n) { m_noise = n; }
|
||||
void reduceNoiseLevel() { if (m_noise > 0) m_noise--; }
|
||||
|
||||
double maxResidual() const { return m_resmax; }
|
||||
void setMaxResidual(double r) { m_resmax = r; }
|
||||
|
||||
double residual() const { return m_res; }
|
||||
|
||||
/* change the user-definable callback, called after each iteration */
|
||||
void setCallback(void (*t)(const IterationController&))
|
||||
{
|
||||
m_callback = t;
|
||||
}
|
||||
|
||||
size_t iteration() const { return m_nit; }
|
||||
void setIteration(size_t i) { m_nit = i; }
|
||||
|
||||
size_t maxIterarions() const { return m_maxiter; }
|
||||
void setMaxIterations(size_t i) { m_maxiter = i; }
|
||||
|
||||
double rhsNorm() const { return m_rhsn; }
|
||||
void setRhsNorm(double r) { m_rhsn = r; }
|
||||
|
||||
bool converged() const { return m_res <= m_rhsn * m_resmax; }
|
||||
bool converged(double nr)
|
||||
{
|
||||
using std::abs;
|
||||
m_res = abs(nr);
|
||||
m_resminreach = (std::min)(m_resminreach, m_res);
|
||||
return converged();
|
||||
}
|
||||
template<typename VectorType> bool converged(const VectorType &v)
|
||||
{ return converged(v.squaredNorm()); }
|
||||
|
||||
bool finished(double nr)
|
||||
{
|
||||
if (m_callback) m_callback(*this);
|
||||
if (m_noise > 0 && !m_written)
|
||||
{
|
||||
converged(nr);
|
||||
m_written = true;
|
||||
}
|
||||
return (m_nit >= m_maxiter || converged(nr));
|
||||
}
|
||||
template <typename VectorType>
|
||||
bool finished(const MatrixBase<VectorType> &v)
|
||||
{ return finished(double(v.squaredNorm())); }
|
||||
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_ITERATION_CONTROLLER_H
|
||||
289
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/MINRES.h
vendored
Normal file
289
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/MINRES.h
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
|
||||
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
#ifndef EIGEN_MINRES_H_
|
||||
#define EIGEN_MINRES_H_
|
||||
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \internal Low-level MINRES algorithm
|
||||
* \param mat The matrix A
|
||||
* \param rhs The right hand side vector b
|
||||
* \param x On input and initial solution, on output the computed solution.
|
||||
* \param precond A right preconditioner being able to efficiently solve for an
|
||||
* approximation of Ax=b (regardless of b)
|
||||
* \param iters On input the max number of iteration, on output the number of performed iterations.
|
||||
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
|
||||
*/
|
||||
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
EIGEN_DONT_INLINE
|
||||
void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
const Preconditioner& precond, Index& iters,
|
||||
typename Dest::RealScalar& tol_error)
|
||||
{
|
||||
using std::sqrt;
|
||||
typedef typename Dest::RealScalar RealScalar;
|
||||
typedef typename Dest::Scalar Scalar;
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
|
||||
// Check for zero rhs
|
||||
const RealScalar rhsNorm2(rhs.squaredNorm());
|
||||
if(rhsNorm2 == 0)
|
||||
{
|
||||
x.setZero();
|
||||
iters = 0;
|
||||
tol_error = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
// initialize
|
||||
const Index maxIters(iters); // initialize maxIters to iters
|
||||
const Index N(mat.cols()); // the size of the matrix
|
||||
const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
|
||||
|
||||
// Initialize preconditioned Lanczos
|
||||
VectorType v_old(N); // will be initialized inside loop
|
||||
VectorType v( VectorType::Zero(N) ); //initialize v
|
||||
VectorType v_new(rhs-mat*x); //initialize v_new
|
||||
RealScalar residualNorm2(v_new.squaredNorm());
|
||||
VectorType w(N); // will be initialized inside loop
|
||||
VectorType w_new(precond.solve(v_new)); // initialize w_new
|
||||
// RealScalar beta; // will be initialized inside loop
|
||||
RealScalar beta_new2(v_new.dot(w_new));
|
||||
eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
|
||||
RealScalar beta_new(sqrt(beta_new2));
|
||||
const RealScalar beta_one(beta_new);
|
||||
v_new /= beta_new;
|
||||
w_new /= beta_new;
|
||||
// Initialize other variables
|
||||
RealScalar c(1.0); // the cosine of the Givens rotation
|
||||
RealScalar c_old(1.0);
|
||||
RealScalar s(0.0); // the sine of the Givens rotation
|
||||
RealScalar s_old(0.0); // the sine of the Givens rotation
|
||||
VectorType p_oold(N); // will be initialized in loop
|
||||
VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
|
||||
VectorType p(p_old); // initialize p=0
|
||||
RealScalar eta(1.0);
|
||||
|
||||
iters = 0; // reset iters
|
||||
while ( iters < maxIters )
|
||||
{
|
||||
// Preconditioned Lanczos
|
||||
/* Note that there are 4 variants on the Lanczos algorithm. These are
|
||||
* described in Paige, C. C. (1972). Computational variants of
|
||||
* the Lanczos method for the eigenproblem. IMA Journal of Applied
|
||||
* Mathematics, 10(3), 373–381. The current implementation corresponds
|
||||
* to the case A(2,7) in the paper. It also corresponds to
|
||||
* algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
|
||||
* Systems, 2003 p.173. For the preconditioned version see
|
||||
* A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
|
||||
*/
|
||||
const RealScalar beta(beta_new);
|
||||
v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
|
||||
// const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
|
||||
v = v_new; // update
|
||||
w = w_new; // update
|
||||
// const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
|
||||
v_new.noalias() = mat*w - beta*v_old; // compute v_new
|
||||
const RealScalar alpha = v_new.dot(w);
|
||||
v_new -= alpha*v; // overwrite v_new
|
||||
w_new = precond.solve(v_new); // overwrite w_new
|
||||
beta_new2 = v_new.dot(w_new); // compute beta_new
|
||||
eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
|
||||
beta_new = sqrt(beta_new2); // compute beta_new
|
||||
v_new /= beta_new; // overwrite v_new for next iteration
|
||||
w_new /= beta_new; // overwrite w_new for next iteration
|
||||
|
||||
// Givens rotation
|
||||
const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
|
||||
const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration
|
||||
const RealScalar r1_hat=c*alpha-c_old*s*beta;
|
||||
const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) );
|
||||
c_old = c; // store for next iteration
|
||||
s_old = s; // store for next iteration
|
||||
c=r1_hat/r1; // new cosine
|
||||
s=beta_new/r1; // new sine
|
||||
|
||||
// Update solution
|
||||
p_oold = p_old;
|
||||
// const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
|
||||
p_old = p;
|
||||
p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
|
||||
x += beta_one*c*eta*p;
|
||||
|
||||
/* Update the squared residual. Note that this is the estimated residual.
|
||||
The real residual |Ax-b|^2 may be slightly larger */
|
||||
residualNorm2 *= s*s;
|
||||
|
||||
if ( residualNorm2 < threshold2)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
eta=-s*eta; // update eta
|
||||
iters++; // increment iteration number (for output purposes)
|
||||
}
|
||||
|
||||
/* Compute error. Note that this is the estimated error. The real
|
||||
error |Ax-b|/|b| may be slightly larger */
|
||||
tol_error = std::sqrt(residualNorm2 / rhsNorm2);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template< typename _MatrixType, int _UpLo=Lower,
|
||||
typename _Preconditioner = IdentityPreconditioner>
|
||||
class MINRES;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
||||
struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
|
||||
{
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/** \ingroup IterativeLinearSolvers_Module
|
||||
* \brief A minimal residual solver for sparse symmetric problems
|
||||
*
|
||||
* This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm
|
||||
* of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
|
||||
* The vectors x and b can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
|
||||
* Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
|
||||
* and NumTraits<Scalar>::epsilon() for the tolerance.
|
||||
*
|
||||
* This class can be used as the direct solver classes. Here is a typical usage example:
|
||||
* \code
|
||||
* int n = 10000;
|
||||
* VectorXd x(n), b(n);
|
||||
* SparseMatrix<double> A(n,n);
|
||||
* // fill A and b
|
||||
* MINRES<SparseMatrix<double> > mr;
|
||||
* mr.compute(A);
|
||||
* x = mr.solve(b);
|
||||
* std::cout << "#iterations: " << mr.iterations() << std::endl;
|
||||
* std::cout << "estimated error: " << mr.error() << std::endl;
|
||||
* // update b, and solve again
|
||||
* x = mr.solve(b);
|
||||
* \endcode
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* MINRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||
*
|
||||
* \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
||||
class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
|
||||
{
|
||||
|
||||
typedef IterativeSolverBase<MINRES> Base;
|
||||
using Base::matrix;
|
||||
using Base::m_error;
|
||||
using Base::m_iterations;
|
||||
using Base::m_info;
|
||||
using Base::m_isInitialized;
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::RealScalar RealScalar;
|
||||
typedef _Preconditioner Preconditioner;
|
||||
|
||||
enum {UpLo = _UpLo};
|
||||
|
||||
public:
|
||||
|
||||
/** Default constructor. */
|
||||
MINRES() : Base() {}
|
||||
|
||||
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
|
||||
*
|
||||
* This constructor is a shortcut for the default constructor followed
|
||||
* by a call to compute().
|
||||
*
|
||||
* \warning this class stores a reference to the matrix A as well as some
|
||||
* precomputed values that depend on it. Therefore, if \a A is changed
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
template<typename MatrixDerived>
|
||||
explicit MINRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
||||
|
||||
/** Destructor. */
|
||||
~MINRES(){}
|
||||
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
|
||||
{
|
||||
typedef typename Base::MatrixWrapper MatrixWrapper;
|
||||
typedef typename Base::ActualMatrixType ActualMatrixType;
|
||||
enum {
|
||||
TransposeInput = (!MatrixWrapper::MatrixFree)
|
||||
&& (UpLo==(Lower|Upper))
|
||||
&& (!MatrixType::IsRowMajor)
|
||||
&& (!NumTraits<Scalar>::IsComplex)
|
||||
};
|
||||
typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
|
||||
EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
|
||||
typedef typename internal::conditional<UpLo==(Lower|Upper),
|
||||
RowMajorWrapper,
|
||||
typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
|
||||
>::type SelfAdjointWrapper;
|
||||
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
RowMajorWrapper row_mat(matrix());
|
||||
for(int j=0; j<b.cols(); ++j)
|
||||
{
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
typename Dest::ColXpr xj(x,j);
|
||||
internal::minres(SelfAdjointWrapper(row_mat), b.col(j), xj,
|
||||
Base::m_preconditioner, m_iterations, m_error);
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
|
||||
}
|
||||
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const
|
||||
{
|
||||
x.setZero();
|
||||
_solve_with_guess_impl(b,x.derived());
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MINRES_H
|
||||
|
||||
187
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/Scaling.h
vendored
Normal file
187
external/include/eigen3/unsupported/Eigen/src/IterativeSolvers/Scaling.h
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_ITERSCALING_H
|
||||
#define EIGEN_ITERSCALING_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/**
|
||||
* \ingroup IterativeSolvers_Module
|
||||
* \brief iterative scaling algorithm to equilibrate rows and column norms in matrices
|
||||
*
|
||||
* This class can be used as a preprocessing tool to accelerate the convergence of iterative methods
|
||||
*
|
||||
* This feature is useful to limit the pivoting amount during LU/ILU factorization
|
||||
* The scaling strategy as presented here preserves the symmetry of the problem
|
||||
* NOTE It is assumed that the matrix does not have empty row or column,
|
||||
*
|
||||
* Example with key steps
|
||||
* \code
|
||||
* VectorXd x(n), b(n);
|
||||
* SparseMatrix<double> A;
|
||||
* // fill A and b;
|
||||
* IterScaling<SparseMatrix<double> > scal;
|
||||
* // Compute the left and right scaling vectors. The matrix is equilibrated at output
|
||||
* scal.computeRef(A);
|
||||
* // Scale the right hand side
|
||||
* b = scal.LeftScaling().cwiseProduct(b);
|
||||
* // Now, solve the equilibrated linear system with any available solver
|
||||
*
|
||||
* // Scale back the computed solution
|
||||
* x = scal.RightScaling().cwiseProduct(x);
|
||||
* \endcode
|
||||
*
|
||||
* \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix
|
||||
*
|
||||
* References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552
|
||||
*
|
||||
* \sa \ref IncompleteLUT
|
||||
*/
|
||||
template<typename _MatrixType>
|
||||
class IterScaling
|
||||
{
|
||||
public:
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
public:
|
||||
IterScaling() { init(); }
|
||||
|
||||
IterScaling(const MatrixType& matrix)
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
}
|
||||
|
||||
~IterScaling() { }
|
||||
|
||||
/**
|
||||
* Compute the left and right diagonal matrices to scale the input matrix @p mat
|
||||
*
|
||||
* FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal.
|
||||
*
|
||||
* \sa LeftScaling() RightScaling()
|
||||
*/
|
||||
void compute (const MatrixType& mat)
|
||||
{
|
||||
using std::abs;
|
||||
int m = mat.rows();
|
||||
int n = mat.cols();
|
||||
eigen_assert((m>0 && m == n) && "Please give a non - empty matrix");
|
||||
m_left.resize(m);
|
||||
m_right.resize(n);
|
||||
m_left.setOnes();
|
||||
m_right.setOnes();
|
||||
m_matrix = mat;
|
||||
VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors
|
||||
Dr.resize(m); Dc.resize(n);
|
||||
DrRes.resize(m); DcRes.resize(n);
|
||||
double EpsRow = 1.0, EpsCol = 1.0;
|
||||
int its = 0;
|
||||
do
|
||||
{ // Iterate until the infinite norm of each row and column is approximately 1
|
||||
// Get the maximum value in each row and column
|
||||
Dr.setZero(); Dc.setZero();
|
||||
for (int k=0; k<m_matrix.outerSize(); ++k)
|
||||
{
|
||||
for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
|
||||
{
|
||||
if ( Dr(it.row()) < abs(it.value()) )
|
||||
Dr(it.row()) = abs(it.value());
|
||||
|
||||
if ( Dc(it.col()) < abs(it.value()) )
|
||||
Dc(it.col()) = abs(it.value());
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < m; ++i)
|
||||
{
|
||||
Dr(i) = std::sqrt(Dr(i));
|
||||
Dc(i) = std::sqrt(Dc(i));
|
||||
}
|
||||
// Save the scaling factors
|
||||
for (int i = 0; i < m; ++i)
|
||||
{
|
||||
m_left(i) /= Dr(i);
|
||||
m_right(i) /= Dc(i);
|
||||
}
|
||||
// Scale the rows and the columns of the matrix
|
||||
DrRes.setZero(); DcRes.setZero();
|
||||
for (int k=0; k<m_matrix.outerSize(); ++k)
|
||||
{
|
||||
for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
|
||||
{
|
||||
it.valueRef() = it.value()/( Dr(it.row()) * Dc(it.col()) );
|
||||
// Accumulate the norms of the row and column vectors
|
||||
if ( DrRes(it.row()) < abs(it.value()) )
|
||||
DrRes(it.row()) = abs(it.value());
|
||||
|
||||
if ( DcRes(it.col()) < abs(it.value()) )
|
||||
DcRes(it.col()) = abs(it.value());
|
||||
}
|
||||
}
|
||||
DrRes.array() = (1-DrRes.array()).abs();
|
||||
EpsRow = DrRes.maxCoeff();
|
||||
DcRes.array() = (1-DcRes.array()).abs();
|
||||
EpsCol = DcRes.maxCoeff();
|
||||
its++;
|
||||
}while ( (EpsRow >m_tol || EpsCol > m_tol) && (its < m_maxits) );
|
||||
m_isInitialized = true;
|
||||
}
|
||||
/** Compute the left and right vectors to scale the vectors
|
||||
* the input matrix is scaled with the computed vectors at output
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
void computeRef (MatrixType& mat)
|
||||
{
|
||||
compute (mat);
|
||||
mat = m_matrix;
|
||||
}
|
||||
/** Get the vector to scale the rows of the matrix
|
||||
*/
|
||||
VectorXd& LeftScaling()
|
||||
{
|
||||
return m_left;
|
||||
}
|
||||
|
||||
/** Get the vector to scale the columns of the matrix
|
||||
*/
|
||||
VectorXd& RightScaling()
|
||||
{
|
||||
return m_right;
|
||||
}
|
||||
|
||||
/** Set the tolerance for the convergence of the iterative scaling algorithm
|
||||
*/
|
||||
void setTolerance(double tol)
|
||||
{
|
||||
m_tol = tol;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
void init()
|
||||
{
|
||||
m_tol = 1e-10;
|
||||
m_maxits = 5;
|
||||
m_isInitialized = false;
|
||||
}
|
||||
|
||||
MatrixType m_matrix;
|
||||
mutable ComputationInfo m_info;
|
||||
bool m_isInitialized;
|
||||
VectorXd m_left; // Left scaling vector
|
||||
VectorXd m_right; // m_right scaling vector
|
||||
double m_tol;
|
||||
int m_maxits; // Maximum number of iterations allowed
|
||||
};
|
||||
}
|
||||
#endif
|
||||
Reference in New Issue
Block a user