Commit 96279a72 authored by ralfh's avatar ralfh

Reformatting of comments

parent c8f59f62
# include <Eigen/Dense>
# include <Eigen/SVD>
# include <Eigen/QR>
#include <Eigen/Dense>
#include <Eigen/QR>
#include <Eigen/SVD>
using Eigen::VectorXd;
using Eigen::MatrixXd;
using Eigen::VectorXd;
/* SAM_LISTING_BEGIN_0 */
// Gaussian elimination with \com{partial pivoting}, \cref{cpp:gepiv}
void lu_solve(const MatrixXd& A, const VectorXd& b, VectorXd& x) {
void lu_solve(const MatrixXd &A, const VectorXd &b, VectorXd &x) {
x = A.lu().solve(b); // 'lu()' is short for 'partialPivLu()'
}
// Gaussian elimination with total pivoting
void fullpivlu_solve(const MatrixXd& A, const VectorXd& b, VectorXd& x) {
x = A.fullPivLu().solve(b); // total pivoting
void fullpivlu_solve(const MatrixXd &A, const VectorXd &b, VectorXd &x) {
x = A.fullPivLu().solve(b); // total pivoting
}
// An elimination solver based on Householder transformations
void qr_solve(const MatrixXd& A, const VectorXd& b, VectorXd& x) {
Eigen::HouseholderQR<MatrixXd> solver(A);
void qr_solve(const MatrixXd &A, const VectorXd &b, VectorXd &x) {
Eigen::HouseholderQR<MatrixXd> solver(A); // see \cref{sec:QR}
x = solver.solve(b);
}
// Use singular value decomposition (SVD)
void svd_solve(const MatrixXd& A, const VectorXd& b, VectorXd& x) {
void svd_solve(const MatrixXd &A, const VectorXd &b, VectorXd &x) {
// SVD based solvers, see \cref{sec:SVD}
x = A.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(b);
}
/* SAM_LISTING_END_0 */
# include <Eigen/Sparse>
# include <Eigen/SparseLU>
#include <Eigen/Sparse>
#include <Eigen/SparseLU>
// or for QR: \#include <Eigen/SparseQR>
# include <Eigen/IterativeLinearSolvers> // use only if A is SPD!
#include <Eigen/IterativeLinearSolvers> // use only if A is SPD!
/* SAM_LISTING_BEGIN_0 */
using SparseMatrix = Eigen::SparseMatrix<double>;
// Perform sparse elimination
void sparse_solve(const SparseMatrix& A, const VectorXd& b, VectorXd& x) {
void sparse_solve(const SparseMatrix &A, const VectorXd &b, VectorXd &x) {
Eigen::SparseLU<SparseMatrix> solver(A);
if (solver.info() != Eigen::Success) {
throw "Matrix factorization failed";
}
x = solver.solve(b);
}
/* SAM_LISTING_END_0 */
// Solve LSE, if the system matrix A is symmetric, positive definite,
// using conjugate gradient (CG) iterative solver
void sparseSpd_solve(const SparseMatrix& A, const VectorXd& b, VectorXd& x) {
void sparseSpd_solve(const SparseMatrix &A, const VectorXd &b, VectorXd &x) {
Eigen::ConjugateGradient<SparseMatrix> solver(A);
x = solver.solve(b);
}
......@@ -6,11 +6,11 @@
/// Do not remove this header.
//////////////////////////////////////////////////////////////////////////
# include <iostream>
# include "./denseSolve.hpp"
# include "./sparseSolve.hpp"
#include "./denseSolve.hpp"
#include "./sparseSolve.hpp"
#include <iostream>
void test(const MatrixXd& A, const VectorXd& b) {
void test(const MatrixXd &A, const VectorXd &b) {
VectorXd xLu, xQr, xSvd, xSp, xSpd;
lu_solve(A, b, xLu);
qr_solve(A, b, xQr);
......@@ -22,23 +22,22 @@ void test(const MatrixXd& A, const VectorXd& b) {
MatrixXd A_spd = upper.toDenseMatrix() + upper.toDenseMatrix().transpose();
// A_spd now is hermitian, lets make it strongly diagonally dominant
for (int i = 0; i < A.rows(); ++i) {
A_spd(i,i) = 2*(A_spd.row(i).cwiseAbs().sum());
A_spd(i, i) = 2 * (A_spd.row(i).cwiseAbs().sum());
}
sparseSpd_solve(A_spd.sparseView(), b, xSpd);
std::cout << "-- Error w/ size n = " << b.size() << " ------------\n"
<< "LU: " << (A*xLu - b).norm() << "\n"
<< "QR: " << (A*xQr - b).norm() << "\n"
<< "SVD: " << (A*xSvd - b).norm() << "\n"
<< "SPD: " << (A_spd*xSpd - b).norm() << "\n"
<< "Sparse: " << (A*xSp - b).norm() << "\n";
<< "LU: " << (A * xLu - b).norm() << "\n"
<< "QR: " << (A * xQr - b).norm() << "\n"
<< "SVD: " << (A * xSvd - b).norm() << "\n"
<< "SPD: " << (A_spd * xSpd - b).norm() << "\n"
<< "Sparse: " << (A * xSp - b).norm() << "\n";
}
int main() {
for (int n = 5; n < 50; n *= 2) {
test(Eigen::MatrixXd::Random(n,n), Eigen::VectorXd::Random(n));
test(Eigen::MatrixXd::Random(n, n), Eigen::VectorXd::Random(n));
}
return 0;
......
......@@ -6,25 +6,30 @@
/// Do not remove this header.
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <Eigen/Dense>
#include <iostream>
using namespace std;
using namespace Eigen;
int main () {
MatrixXd A(3,3);
A.setRandom();
/* SAM_LISTING_BEGIN_0 */
Eigen::PartialPivLU<MatrixXd> lu(A);
MatrixXd L = MatrixXd::Identity(3,3);
L.triangularView<StrictlyLower>() += lu.matrixLU();
MatrixXd U = lu.matrixLU().triangularView<Upper>();
MatrixXd P = lu.permutationP();
/* SAM_LISTING_END_0 */
std::cout << A << std::endl;
std::cout << L << std::endl;
std::cout << U << std::endl;
std::cout << P*L*U-A << std::endl;
return 0;
int main() {
MatrixXd A(3, 3);
A.setRandom();
/* SAM_LISTING_BEGIN_0 */
const Eigen::MatrixXd::Index n = A.cols();
assert(n == A.rows()); // ensure square matrix
Eigen::PartialPivLU<MatrixXd> lu(A);
// Normalized lower-triangule factor
MatrixXd L = MatrixXd::Identity(n, n);
L.triangularView<StrictlyLower>() += lu.matrixLU();
// Upper triangular factor
MatrixXd U = lu.matrixLU().triangularView<Upper>();
// Permutation matrix, see \cref{def:permmat}
MatrixXd P = lu.permutationP();
/* SAM_LISTING_END_0 */
std::cout << "A = " << A << std::endl;
std::cout << "L = " << L << std::endl;
std::cout << "U = " << U << std::endl;
// Verify correctness of decomposition
std::cout << "residual matrix = " << P * L * U - A << std::endl;
return 0;
}
......@@ -13,13 +13,13 @@
using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
VectorXd arrowsys_fast(const VectorXd &d, const VectorXd &c, const VectorXd &b, const double alpha, const VectorXd &y){
int n = d.size();
VectorXd z = c.array() / d.array(); // \Blue{$\Vz = \VD^{-1}\Vc$}
VectorXd w = y.head(n).array() / d.array(); // \Blue{$\Vw = \VD^{-1}\Vy_1$}
double xi = (y(n) - b.dot(w)) / (alpha - b.dot(z));
VectorXd x(n+1);
x << w - xi*z, xi;
return x;
VectorXd arrowsys_fast(const VectorXd &d, const VectorXd &c, const VectorXd &b,
const double alpha, const VectorXd &y) {
int n = d.size();
VectorXd z = c.array() / d.array(); // \Blue{$\Vz = \VD^{-1}\Vc$}
VectorXd w = y.head(n).array() / d.array(); // \Blue{$\Vw = \VD^{-1}\Vy_1$}
double xi = (y(n) - b.dot(w)) / (alpha - b.dot(z));
return (VectorXd(n + 1) << w - xi * z, xi).finished();
}
/* SAM_LISTING_END_0 */
......@@ -13,13 +13,15 @@
using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
VectorXd arrowsys_slow(const VectorXd &d, const VectorXd &c, const VectorXd &b, const double alpha, const VectorXd &y){
int n = d.size();
MatrixXd A(n + 1,n + 1); A.setZero();
A.diagonal().head(n) = d;
A.col(n).head(n) = c;
A.row(n).head(n) = b;
A(n, n) = alpha;
return A.lu().solve(y);
VectorXd arrowsys_slow(const VectorXd &d, const VectorXd &c, const VectorXd &b,
const double alpha, const VectorXd &y) {
int n = d.size();
MatrixXd A(n + 1, n + 1); // Empty dense matrix
A.setZero(); // Initialize with all zeros.
A.diagonal().head(n) = d; // Initializee matrix diagonal from a vector.
A.col(n).head(n) = c; // Set rightmost column $\cob{\Vc}$.
A.row(n).head(n) = b; // Set bottom row $\cob{\Vb^{\top}}$.
A(n, n) = alpha; // Set bottom-right entry $\cob{\alpha}$.
return A.lu().solve(y); // Gaussian elimination
}
/* SAM_LISTING_END_0 */
......@@ -11,8 +11,8 @@
#include <vector>
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <Eigen/IterativeLinearSolvers>
#include <Eigen/Sparse>
using namespace Eigen;
......@@ -21,33 +21,43 @@ using namespace Eigen;
#include "arrowsys_sparse.hpp"
#include "timer.h"
/* SAM_LISTING_BEGIN_0 */
MatrixXd arrowsystiming(){
std::vector<int> n = {8,16,32,64,128,256,512,1024,2048,4096};
int nruns = 3;
MatrixXd times(n.size(),6);
for(int i = 0; i < n.size(); ++i){
Timer t1, t2, t3, t4; // timer class
double alpha = 2;
VectorXd b = VectorXd::Ones(n[i],1);
VectorXd c = VectorXd::LinSpaced(n[i],1,n[i]);
VectorXd d = -b;
VectorXd y = VectorXd::Constant(n[i]+1,-1).binaryExpr( VectorXd::LinSpaced(n[i]+1,1,n[i]+1), [](double x, double y){return pow(x,y);} ).array();
VectorXd x1(n[i]+1), x2(n[i]+1), x3(n[i]+1), x4(n[i]+1);
for(int j = 0; j < nruns; ++j){
t1.start(); x1 = arrowsys_slow(d,c,b,alpha,y); t2.stop();
t2.start(); x2 = arrowsys_fast(d,c,b,alpha,y); t2.stop();
t3.start();
x3 = arrowsys_sparse<SparseLU<SparseMatrix<double> > >(d,c,b,alpha,y);
t3.stop();
t4.start();
x4 = arrowsys_sparse<BiCGSTAB<SparseMatrix<double> > >(d,c,b,alpha,y);
t4.stop();
}
times(i,0)=n[i]; times(i,1)=t1.min(); times(i,2)=t2.min();
times(i,3)=t3.min();times(i,4)=t4.min();times(i,5)=(x4-x3).norm();
}
return times;
MatrixXd arrowsystiming() {
std::vector<int> n = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
int nruns = 3;
MatrixXd times(n.size(), 6);
for (int i = 0; i < n.size(); ++i) {
Timer t1, t2, t3, t4; // timer class
double alpha = 2;
VectorXd b = VectorXd::Ones(n[i], 1);
VectorXd c = VectorXd::LinSpaced(n[i], 1, n[i]);
VectorXd d = -b;
VectorXd y = VectorXd::Constant(n[i] + 1, -1)
.binaryExpr(VectorXd::LinSpaced(n[i] + 1, 1, n[i] + 1),
[](double x, double y) { return pow(x, y); })
.array();
VectorXd x1(n[i] + 1), x2(n[i] + 1), x3(n[i] + 1), x4(n[i] + 1);
for (int j = 0; j < nruns; ++j) {
t1.start();
x1 = arrowsys_slow(d, c, b, alpha, y);
t2.stop();
t2.start();
x2 = arrowsys_fast(d, c, b, alpha, y);
t2.stop();
t3.start();
x3 = arrowsys_sparse<SparseLU<SparseMatrix<double>>>(d, c, b, alpha, y);
t3.stop();
t4.start();
x4 = arrowsys_sparse<BiCGSTAB<SparseMatrix<double>>>(d, c, b, alpha, y);
t4.stop();
}
times(i, 0) = n[i];
times(i, 1) = t1.min();
times(i, 2) = t2.min();
times(i, 3) = t3.min();
times(i, 4) = t4.min();
times(i, 5) = (x4 - x3).norm();
}
return times;
}
/* SAM_LISTING_END_0 */
......@@ -16,12 +16,12 @@ using namespace Eigen;
//! in-situ Gaussian elimination, no pivoting
//! right hand side in rightmost column of \Blue{$\VA$}
//! back substitution is not done in this code!
void blockgs(MatrixXd &A){
void blockgs(Eigen::MatrixXd &A){
int n = A.rows();
for(int i = 1; i < n; ++i){
// \Red{rank-1 modification} of \Blue{$\VC$}
A.bottomRightCorner(n-i,n-i+1) -= A.col(i-1).tail(n-i) * A.row(i-1).tail(n-i+1) / A(i-1,i-1);
A.col(i-1).tail(n-i).setZero();// set $\Vd=0$
// \Red{rank-1 modification} of \Blue{$\VC$}
A.bottomRightCorner(n-i,n-i+1) -= A.col(i-1).tail(n-i) * A.row(i-1).tail(n-i+1) / A(i-1,i-1);
A.col(i - 1).tail(n - i).setZero(); // set $\Vd=0$ \Label[line]{bgscpp:1}
}
}
/* SAM_LISTING_END_0 */
......@@ -6,8 +6,8 @@
/// Do not remove this header.
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <cmath>
#include <iostream>
#include <limits>
#include <Eigen/Dense>
......@@ -16,35 +16,42 @@
using namespace std;
using namespace Eigen;
int main () {
/* SAM_LISTING_BEGIN_0 */
int n = 10;
VectorXd u = VectorXd::LinSpaced(n,1,n) / 3.0;
VectorXd v = u.cwiseInverse().array() * VectorXd::LinSpaced(n,1,n).unaryExpr( [](double x){return pow(-1,x);} ).array();
VectorXd x = VectorXd::Ones(n);
double nx = x.lpNorm<Infinity>();
VectorXd expo = VectorXd::LinSpaced(19,-5,-14 );
Eigen::MatrixXd res(expo.size(),4);
for(int i = 0; i <= expo.size(); ++i){
double epsilon = std::pow(10, expo(i));
MatrixXd A = u*v.transpose() + epsilon * MatrixXd::Identity(n,n);
VectorXd b = A * x;
double nb = b.lpNorm<Infinity>();
VectorXd xt = A.lu().solve(b);
VectorXd r = b - A*xt;
res(i,0) = epsilon; res(i,1) = (x-xt).lpNorm<Infinity>()/nx;
res(i,2) = r.lpNorm<Infinity>()/nb;
// L-infinity condition number
res(i,3) = A.inverse().cwiseAbs().rowwise().sum().maxCoeff() * A.cwiseAbs().rowwise().sum().maxCoeff();
}
/* SAM_LISTING_END_0 */
// Plotting
mgl::Figure fig1;
fig1.setlog(true, true);
fig1.plot(res.col(0),res.col(1), " +r-").label("relative error");
fig1.plot(res.col(0),res.col(2), " *b-").label("relative residual");
fig1.xlabel("\\epsilon");// not nice
fig1.legend(0.05,0.5);
fig1.save("gausstab");
return 0;
int main() {
/* SAM_LISTING_BEGIN_0 */
int n = 10;
// Initialize vectors $\cob{\Vu}$ and $\cob{\Vv}$
VectorXd u = VectorXd::LinSpaced(n, 1, n) / 3.0;
VectorXd v = u.cwiseInverse().array() *
VectorXd::LinSpaced(n, 1, n)
.unaryExpr([](double x) { return pow(-1, x); })
.array();
VectorXd x = VectorXd::Ones(n);
double nx = x.lpNorm<Infinity>();
VectorXd expo = VectorXd::LinSpaced(19, -5, -14);
Eigen::MatrixXd res(expo.size(), 4);
for (int i = 0; i <= expo.size(); ++i) {
// Build coefficient matrix $\cob{\VA}$
double epsilon = std::pow(10, expo(i));
MatrixXd A = u * v.transpose() + epsilon * MatrixXd::Identity(n, n);
VectorXd b = A * x; // right-hand-side vector
double nb = b.lpNorm<Infinity>(); // maximum norm
VectorXd xt = A.lu().solve(b); // Gaussian elimination
VectorXd r = b - A * xt; // \com{residual vector}
res(i, 0) = epsilon;
res(i, 1) = (x - xt).lpNorm<Infinity>() / nx;
res(i, 2) = r.lpNorm<Infinity>() / nb;
// L-infinity condition number
res(i, 3) = A.inverse().cwiseAbs().rowwise().sum().maxCoeff() *
A.cwiseAbs().rowwise().sum().maxCoeff();
}
/* SAM_LISTING_END_0 */
// Plotting
mgl::Figure fig1;
fig1.setlog(true, true);
fig1.plot(res.col(0), res.col(1), " +r-").label("relative error");
fig1.plot(res.col(0), res.col(2), " *b-").label("relative residual");
fig1.xlabel("\\epsilon"); // not nice
fig1.legend(0.05, 0.5);
fig1.save("gausstab");
return 0;
}
......@@ -16,17 +16,20 @@ using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
//! Algorithm of Crout: LU-factorization of \Blue{$\VA\in\bbK^{n,n}$}
void lufak(const MatrixXd& A, MatrixXd& L, MatrixXd& U){
int n = A.rows(); assert( n == A.cols());
L = MatrixXd::Identity(n,n);
U = MatrixXd::Zero(n,n);
for(int k = 0; k < n; ++k){
// Compute row of \Blue{$\VU$}
for(int j = k; j < n; ++j)
U(k,j) = A(k,j) - (L.block(k,0,1,k) * U.block(0,j,k,1))(0,0);
// Compute column of \Blue{$\VL$}
for(int i = k+1; i < n; ++i)
L(i,k) = (A(i,k) - (L.block(i,0,1,k) * U.block(0,k,k,1))(0,0))/U(k,k);
std::pair<MatrixXd, MatrixXd> lufak(const MatrixXd &A) {
int n = A.rows();
assert(n == A.cols()); // Ensure matrix is square
MatrixXd L{MatrixXd::Identity(n, n)};
MatrixXd U{MatrixXd::Zero(n, n)};
for (int k = 0; k < n; ++k) {
// Compute row of \Blue{$\VU$}
for (int j = k; j < n; ++j)
U(k, j) = A(k, j) - (L.block(k, 0, 1, k) * U.block(0, j, k, 1))(0, 0);
// Compute column of \Blue{$\VL$}
for (int i = k + 1; i < n; ++i)
L(i, k) = (A(i, k) - (L.block(i, 0, 1, k) * U.block(0, k, k, 1))(0, 0)) /
U(k, k);
}
return { L, U };
}
/* SAM_LISTING_END_0 */
......@@ -6,20 +6,16 @@
/// Do not remove this header.
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <Eigen/Dense>
#include <iostream>
#include "lufak.hpp"
int main () {
int n = 3;
Eigen::MatrixXd A(n,n);
Eigen::MatrixXd L(n,n);
Eigen::MatrixXd U(n,n);
A << 1, 0, 2,
-1, 4, 1,
-2, 1, 2;
lufak(A,L,U);
std::cout << "L=\n" << L << std::endl << "U=\n" << U << std::endl;
return 0;
int main() {
int n = 3;
Eigen::MatrixXd A(n, n);
A << 1, 0, 2, -1, 4, 1, -2, 1, 2;
auto [L,U] = lufak(A);
std::cout << "L=\n" << L << std::endl << "U=\n" << U << std::endl;
return 0;
}
......@@ -16,15 +16,17 @@ using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
//! Multiplication of normalized lower/upper triangular matrices
void lumult(const MatrixXd& L, const MatrixXd& U, MatrixXd& A){
Eigen::MatrixXd lumult(const MatrixXd &L, const MatrixXd &U) {
int n = L.rows();
assert( n == L.cols() && n == U.cols() && n == U.rows() );
A = MatrixXd::Zero(n,n);
for(int k = 0; k < n; ++k){
for(int j = k; j < n; ++j)
A(k,j) = U(k,j) + (L.block(k,0,1,k) * U.block(0,j,k,1))(0,0);
for(int i = k+1; i < n; ++i)
A(i,k) = (L.block(i,0,1,k) * U.block(0,k,k,1))(0,0) + L(i,k)*U(k,k);
assert(n == L.cols() && n == U.cols() && n == U.rows());
Eigen::MatrixXd A{MatrixXd::Zero(n, n)};
for (int k = 0; k < n; ++k) {
for (int j = k; j < n; ++j)
A(k, j) = U(k, j) + (L.block(k, 0, 1, k) * U.block(0, j, k, 1))(0, 0);
for (int i = k + 1; i < n; ++i)
A(i, k) =
(L.block(i, 0, 1, k) * U.block(0, k, k, 1))(0, 0) + L(i, k) * U(k, k);
}
return A;
}
/* SAM_LISTING_END_0 */
......@@ -12,26 +12,21 @@
#include "lumult.hpp"
int main () {
int n = 3;
Eigen::MatrixXd A(n,n);
Eigen::MatrixXd L(n,n);
Eigen::MatrixXd U(n,n);
L << 1, 0, 0,
-1, 1, 0,
-2, 0.25, 1;
U << 1, 0, 2,
0, 4, 3,
0, 0, 5.25;
lumult(L,U,A);
std::cout << "L=\n" << L << std::endl << "U=\n" << U << std::endl;
std::cout << "A=\n" << A << std::endl;
/* should be equal
A << 1, 0, 2,
-1, 4, 1,
-2, 1, 2;
*/
return 0;
int main() {
int n = 3;
Eigen::MatrixXd L(n, n);
Eigen::MatrixXd U(n, n);
L << 1, 0, 0, -1, 1, 0, -2, 0.25, 1;
U << 1, 0, 2, 0, 4, 3, 0, 0, 5.25;
Eigen::MatrixXd A = lumult(L, U);
std::cout << "L=\n" << L << std::endl << "U=\n" << U << std::endl;
std::cout << "A=\n" << A << std::endl;
std::cout
<< "|A-L*U| = "
<< (A - (Eigen::MatrixXd(n, n) << 1, 0, 2, -1, 4, 1, -2, 1, 2).finished())
.norm()
<< std::endl;
return 0;
}
......@@ -16,24 +16,25 @@
using namespace std;
using namespace Eigen;
int main () {
/* SAM_LISTING_BEGIN_0 */
double epsilon = 5.0e-17;
MatrixXd A(2,2), D(2,2);
A << epsilon, 1.0,
1.0, 1.0;
D << 1./epsilon, 0.0,
0.0, 1.0;
VectorXd b(2), x2(2);
b << 1.0, 2.0;
A = D * A; b = D * b;
VectorXd x1 = A.fullPivLu().solve(b);
gausselimsolve(A,b, x2);// see Code~\ref{cpp:gausselim}
MatrixXd L(2,2), U(2,2);
lufak(A, L, U);// see Code~\ref{cpp:lufak}
VectorXd z = L.lu().solve(b);
VectorXd x3 = U.lu().solve(z);
cout << "x1 = \n" << x1 << "\nx2 = \n" << x2 << "\nx3 = \n" << x3 << std::endl;
/* SAM_LISTING_END_0 */
return 0;
int main() {
/* SAM_LISTING_BEGIN_0 */
double epsilon = 5.0e-17;
MatrixXd A(2, 2), D(2, 2);
A << epsilon, 1.0, 1.0, 1.0;
D << 1. / epsilon, 0.0, 0.0, 1.0;
VectorXd b(2), x2(2);
b << 1.0, 2.0;
A = D * A;
b = D * b;
VectorXd x1 = A.fullPivLu().solve(b);
gausselimsolve(A, b, x2); // see Code~\ref{cpp:gausselim}
auto [L, U] = lufak(A); // see Code~\ref{cpp:lufak}
VectorXd z = L.lu().solve(b);
VectorXd x3 = U.lu().solve(z);
cout << "x1 = \n"
<< x1 << "\nx2 = \n"
<< x2 << "\nx3 = \n"
<< x3 << std::endl;
/* SAM_LISTING_END_0 */
return 0;
}
......@@ -16,20 +16,21 @@
using namespace std;
using namespace Eigen;
int main () {
/* SAM_LISTING_BEGIN_0 */
MatrixXd A(2,2);
A << 5.0e-17, 1.0,
1.0, 1.0;
VectorXd b(2), x2(2);
b << 1.0, 2.0;
VectorXd x1 = A.fullPivLu().solve(b);
gausselimsolve(A,b, x2);// see Code~\ref{cpp:gausselim}
MatrixXd L(2,2), U(2,2);
lufak(A, L, U);// see Code~\ref{cpp:lufak}
VectorXd z = L.lu().solve(b);
VectorXd x3 = U.lu().solve(z);
cout << "x1 = \n" << x1 << "\nx2 = \n" << x2 << "\nx3 = \n" << x3 << std::endl;
/* SAM_LISTING_END_0 */
return 0;
int main() {
/* SAM_LISTING_BEGIN_0 */
MatrixXd A(2, 2);
A << 5.0e-17, 1.0, 1.0, 1.0;
VectorXd b(2), x2(2);
b << 1.0, 2.0;
VectorXd x1 = A.fullPivLu().solve(b);
gausselimsolve(A, b, x2); // see Code~\ref{cpp:gausselim}
auto [L,U] = lufak(A); // see Code~\ref{cpp:lufak}
VectorXd z = L.lu().solve(b);
VectorXd x3 = U.lu().solve(z);
cout << "x1 = \n"
<< x1 << "\nx2 = \n"
<< x2 << "\nx3 = \n"
<< x3 << std::endl;
/* SAM_LISTING_END_0 */
return 0;
}
......@@ -9,8 +9,8 @@
#pragma once
#include <iostream>
#include <cassert>
#include <iostream>
#include <limits>
#include <Eigen/Dense>
......@@ -20,23 +20,30 @@ using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
// Solving rank-1 updated LSE based on \eqref{eq:smwx}
template <class LUDec>
VectorXd smw(const LUDec &lu, const MatrixXd &u, const VectorXd &v, const VectorXd &b){
VectorXd z = lu.solve(b); //\label{smw:1}
VectorXd w = lu.solve(u); //\label{smw:2}
double alpha = 1.0 + v.dot(w);
VectorXd smw(const LUDec &lu, const VectorXd &u, const VectorXd &v,
const VectorXd &b) {
VectorXd z = lu.solve(b); // $\cob{\Vz=\VA^{-1}\Vb}$ \Label[line]{smw:1}
VectorXd w = lu.solve(u); // $\cob{\Vw=\VA^{-1}\Vu}$ \Label[line]{smw:2}
double alpha = 1.0 + v.dot(w); // Compute denominator of \eqref{eq:smwx}
if (std::abs(alpha) < std::numeric_limits<double>::epsilon())
throw std::runtime_error("A nearly singular");
else return (z - w * v.dot(z) / alpha);
else
return (z - w * v.dot(z) / alpha); // see \eqref{eq:smwx}
}
/* SAM_LISTING_END_0 */
// Old version
/* SAM_LISTING_BEGIN_1 */
VectorXd smw_old(const MatrixXd &L, const MatrixXd &U, const MatrixXd &u, const VectorXd &v, const VectorXd &b){
VectorXd z = U.triangularView<Upper>().solve( L.triangularView<Lower>().solve(b) );//\label{smw:1}
VectorXd w = U.triangularView<Upper>().solve( L.triangularView<Lower>().solve(u) );