Commit c8f59f62 authored by ralfh's avatar ralfh

Revision of comments

parent b3ed61d5
......@@ -8,8 +8,8 @@
#pragma once
#include <iostream>
#include <cmath>
#include <iostream>
#include <Eigen/Dense>
......@@ -23,30 +23,32 @@ using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
//! Eigen Function for testing the computation of the zeros of a parabola
void compzeros(){
void compzeros() {
int n = 100;
MatrixXd res(n,4);
VectorXd gamma = VectorXd::LinSpaced(n, 2,992);
for(int i = 0; i < n; ++i){
double alpha = -(gamma(i) + 1./gamma(i));
MatrixXd res(n, 4);
VectorXd gamma = VectorXd::LinSpaced(n, 2, 992);
for (int i = 0; i < n; ++i) {
double alpha = -(gamma(i) + 1. / gamma(i));
double beta = 1.;
Vector2d z1 = zerosquadpol(alpha, beta);
Vector2d z1 = zerosquadpol(alpha, beta);
Vector2d z2 = zerosquadpolstab(alpha, beta);
double ztrue = 1./gamma(i), z2true = gamma(i);
res(i,0) = gamma(i);
res(i,1) = std::abs((z1(0)-ztrue)/ztrue);
res(i,2) = std::abs((z2(0)-ztrue)/ztrue);
res(i,3) = std::abs((z1(1)-z2true)/z2true);
double ztrue = 1. / gamma(i), z2true = gamma(i);
res(i, 0) = gamma(i);
res(i, 1) = std::abs((z1(0) - ztrue) / ztrue);
res(i, 2) = std::abs((z2(0) - ztrue) / ztrue);
res(i, 3) = std::abs((z1(1) - z2true) / z2true);
}
// Graphical output of relative error of roots computed by unstable implementation
/* SAM_LISTING_END_0 */
// Graphical output of relative error of roots computed by unstable
// implementation
mgl::Figure fig1;
fig1.setFontSize(3);
fig1.title("Roots of a parabola computed in an unstable manner");
fig1.plot(res.col(0),res.col(1), " +r").label("small root");
fig1.plot(res.col(0),res.col(3), " *b").label("large root");
fig1.plot(res.col(0), res.col(1), " +r").label("small root");
fig1.plot(res.col(0), res.col(3), " *b").label("large root");
fig1.xlabel("\\gamma");
fig1.ylabel("relative errors in \\xi_1, \\xi_2");
fig1.legend(0.05,0.95);
fig1.legend(0.05, 0.95);
fig1.save("zqperrinstab");
// Graphical output of relative errors (comparison), small roots
mgl::Figure fig2;
......@@ -55,7 +57,6 @@ void compzeros(){
fig2.plot(res.col(0), res.col(3), " *m").label("stable");
fig2.xlabel("\\gamma");
fig2.ylabel("relative errors in \\xi_1");
fig2.legend(0.05,0.95);
fig2.legend(0.05, 0.95);
fig2.save("zqperr");
}
/* SAM_LISTING_END_0 */
......@@ -25,16 +25,16 @@ void gausselimsolve(const MatrixXd &A, const VectorXd& b,
for(int i = 0; i < n-1; ++i) {
double pivot = Ab(i,i);
for(int k = i+1; k < n; ++k) {
double fac = Ab(k,i)/pivot;
double fac = Ab(k, i) / pivot; // the multiplier \Label[line]{cppgse:fac}
Ab.block(k,i+1,1,n-i)-= fac * Ab.block(i,i+1,1,n-i); //\Label[line]{cppgse:vec}
}
}
// \com{\Hyperlink{RUECKSUBST}{Back substitution}} (\textit{cf.} step \ding{193} in Ex.~\ref{ex:GE})
// {\Hyperlink{RUECKSUBST}{\com{Back substitution}}} (\textit{cf.} step \ding{193} in Ex.~\ref{ex:GE})
Ab(n-1,n) = Ab(n-1,n) / Ab(n-1,n-1);
for(int i = n-2; i >= 0; --i) {
for(int l = i+1; l < n; ++l) Ab(i,n) -= Ab(l,n)*Ab(i,l);
Ab(i,n) /= Ab(i,i);
}
x = Ab.rightCols(1); // \Label[line]{cppgse:last}
x = Ab.rightCols(1); // Solution in rightmost column! \Label[line]{cppgse:last}
}
/* SAM_LISTING_END_0 */
......@@ -6,8 +6,8 @@
/// Do not remove this header.
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <Eigen/Dense>
#include <iostream>
/**
* \brief Given a matrix $\VA$ with linearly independent columns, returns
......@@ -18,22 +18,24 @@
* \return Matrix with ONB of $span(a_1, \cdots, a_n)$
*/
/* SAM_LISTING_BEGIN_0 */
template <class Matrix>
Matrix gramschmidt( const Matrix & A ) {
Matrix Q = A;
// First vector just gets normalized, Line 1 of \eqref{GS}
Q.col(0).normalize();
for(unsigned int j = 1; j < A.cols(); ++j) {
// Replace inner loop over each previous vector in Q with fast
// matrix-vector multiplication (Lines 4, 5 of \eqref{GS})
Q.col(j) -= Q.leftCols(j) * (Q.leftCols(j).transpose() * A.col(j));// \Label{gscpp:op}
// Normalize vector, if possible.
// Otherwise colums of A must have been linearly dependent
if( Q.col(j).norm() <= 10e-9 * A.col(j).norm() ) { // \Label{gscpp:1}
std::cerr << "Gram-Schmidt failed: A has lin. dep columns." << std::endl;
break;
} else { Q.col(j).normalize(); } // Line 7 of \eqref{GS}
}
return Q;
template <class Matrix> Matrix gramschmidt(const Matrix &A) {
Matrix Q = A;
// First vector just gets normalized, Line 1 of \eqref{GS}
Q.col(0).normalize();
for (unsigned int j = 1; j < A.cols(); ++j) {
// Replace inner loop over each previous vector in Q with fast
// matrix-vector multiplication (Lines 4, 5 of \eqref{GS})
Q.col(j) -= Q.leftCols(j) *
(Q.leftCols(j).adjoint() * A.col(j)); // \Label{gscpp:op}
// Normalize vector, if possible.
// Otherwise colums of A must have been linearly dependent
if (Q.col(j).norm() <= 10e-9 * A.col(j).norm()) { // \Label{gscpp:1}
std::cerr << "Gram-Schmidt failed: A has lin. dep columns." << std::endl;
break;
} else {
Q.col(j).normalize();
} // Line 7 of \eqref{GS}
}
return Q;
}
/* SAM_LISTING_END_0 */
......@@ -12,7 +12,7 @@
#include "gsroundoff.hpp"
int main () {
unsigned int n = 10;
const int n = 10;
Eigen::MatrixXd H(n,n);
// Initialize Hilbert matrix
for(int i = 1; i <=n; ++i){
......
......@@ -15,31 +15,37 @@
using namespace Eigen;
void linesec(){
/* SAM_LISTING_BEGIN_0 */
VectorXd phi = VectorXd::LinSpaced(50,M_PI/200,M_PI/2);
MatrixXd res(phi.size(), 3);
Matrix2d A; A(0,0) = 1; A(1,0) = 0;
for(int i = 0; i < phi.size(); ++i){
A(0,1) = std::cos(phi(i)); A(1,1) = std::sin(phi(i));
// L2 condition number is the quotient of the maximal
// and minimal singular value of A
JacobiSVD<MatrixXd> svd(A);
double C2 = svd.singularValues()(0) / // \Label[line]{cmc:1}
svd.singularValues()(svd.singularValues().size()-1);
// L-infinity condition number
double Cinf=A.inverse().cwiseAbs().rowwise().sum().maxCoeff()* A.cwiseAbs().rowwise().sum().maxCoeff(); // \Label[line]{cmc:2}
res(i,0) = phi(i); res(i,1) = C2; res(i,2) = Cinf;
}
// Plot
// ...
/* SAM_LISTING_END_0 */
mgl::Figure fig;
fig.plot(res.col(0),res.col(1), "r").label("2-norm");
fig.plot(res.col(0),res.col(2), "=b").label("max-norm");
fig.xlabel("angle of n_1, n_2");
fig.ylabel("condition numbers");
fig.legend(1,1);
fig.save("linsec");
void linesec() {
/* SAM_LISTING_BEGIN_0 */
VectorXd phi = VectorXd::LinSpaced(50, M_PI / 200, M_PI / 2);
MatrixXd res(phi.size(), 3);
Matrix2d A;
A(0, 0) = 1;
A(1, 0) = 0;
for (int i = 0; i < phi.size(); ++i) {
A(0, 1) = std::cos(phi(i));
A(1, 1) = std::sin(phi(i));
// L2 condition number is the quotient of the maximal
// and minimal singular value of A
JacobiSVD<MatrixXd> svd(A);
double C2 = svd.singularValues()(0) / // \Label[line]{cmc:1}
svd.singularValues()(svd.singularValues().size() - 1);
// L-infinity condition number
double Cinf =
A.inverse().cwiseAbs().rowwise().sum().maxCoeff() *
A.cwiseAbs().rowwise().sum().maxCoeff(); // \Label[line]{cmc:2}
res(i, 0) = phi(i);
res(i, 1) = C2;
res(i, 2) = Cinf;
}
/* SAM_LISTING_END_0 */
// Plot
mgl::Figure fig;
fig.plot(res.col(0), res.col(1), "r").label("2-norm");
fig.plot(res.col(0), res.col(2), "=b").label("max-norm");
fig.xlabel("angle of n_1, n_2");
fig.ylabel("condition numbers");
fig.legend(1, 1);
fig.save("linsec");
}
......@@ -8,9 +8,9 @@
#pragma once
#include <iostream>
#include <iomanip>
#include <cmath>
#include <iomanip>
#include <iostream>
#include <Eigen/Dense>
#include <figure/figure.hpp>
......@@ -19,55 +19,63 @@
using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
//! script for timing different implementations of matrix multiplications
//! no BLAS is used in Eigen!
void mmtiming(){
/* SAM_LISTING_BEGIN_0 */
void mmtiming() {
int nruns = 3, minExp = 2, maxExp = 10;
MatrixXd timings(maxExp-minExp+1,5);
for(int p = 0; p <= maxExp-minExp; ++p){
Timer t1, t2, t3, t4; // timer class
int n = std::pow(2, minExp + p);
MatrixXd A = MatrixXd::Random(n,n);
MatrixXd B = MatrixXd::Random(n,n);
MatrixXd C = MatrixXd::Zero(n,n);
for(int q = 0; q < nruns; ++q){
// Loop based implementation no template magic
t1.start();
for(int i = 0; i < n; ++i)
for(int j = 0; j < n; ++j)
for(int k = 0; k < n; ++k)
C(i,j) += A(i,k)*B(k,j);
t1.stop();
// dot product based implementation little template magic
t2.start();
for(int i = 0; i < n; ++i)
for(int j = 0; j < n; ++j)
C(i,j) = A.row(i).dot( B.col(j) );
t2.stop();
// matrix-vector based implementation middle template magic
t3.start();
for(int j = 0; j < n; ++j)
C.col(j) = A * B.col(j);
t3.stop();
// Eigen matrix multiplication template magic optimized
t4.start();
C = A * B;
t4.stop();
}
timings(p,0)=n; timings(p,1)=t1.min(); timings(p,2)=t2.min();
timings(p,3)=t3.min(); timings(p,4)=t4.min();
MatrixXd timings(maxExp - minExp + 1, 5);
for (int p = 0; p <= maxExp - minExp; ++p) {
Timer t1, t2, t3, t4; // timer class
int n = std::pow(2, minExp + p);
MatrixXd A = MatrixXd::Random(n, n);
MatrixXd B = MatrixXd::Random(n, n);
MatrixXd C = MatrixXd::Zero(n, n);
for (int q = 0; q < nruns; ++q) {
// Loop based implementation no template magic
t1.start();
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
for (int k = 0; k < n; ++k)
C(i, j) += A(i, k) * B(k, j);
t1.stop();
// dot product based implementation little template magic
t2.start();
for (int i = 0; i < n; ++i)
for (int j = 0; j < n; ++j)
C(i, j) = A.row(i).dot(B.col(j));
t2.stop();
// matrix-vector based implementation middle template magic
t3.start();
for (int j = 0; j < n; ++j)
C.col(j) = A * B.col(j);
t3.stop();
// Eigen matrix multiplication template magic optimized
t4.start();
C = A * B;
t4.stop();
}
timings(p, 0) = n;
timings(p, 1) = t1.min();
timings(p, 2) = t2.min();
timings(p, 3) = t3.min();
timings(p, 4) = t4.min();
}
std::cout << std::scientific << std::setprecision(3) << timings << std::endl;
//Plotting
/* SAM_LISTING_END_0 */
// Plotting
mgl::Figure fig;
fig.setFontSize(4);
fig.setlog(true, true);
fig.plot(timings.col(0),timings.col(1), " +r-").label("loop implementation");
fig.plot(timings.col(0),timings.col(2)," *m-").label("dot-product implementation");
fig.plot(timings.col(0),timings.col(3)," ^b-").label("matrix-vector implementation");
fig.plot(timings.col(0),timings.col(4)," ok-").label("Eigen matrix product");
fig.xlabel("matrix size n"); fig.ylabel("time [s]");
fig.legend(0.05,0.95); fig.save("mmtiming");
fig.plot(timings.col(0), timings.col(1), " +r-").label("loop implementation");
fig.plot(timings.col(0), timings.col(2), " *m-")
.label("dot-product implementation");
fig.plot(timings.col(0), timings.col(3), " ^b-")
.label("matrix-vector implementation");
fig.plot(timings.col(0), timings.col(4), " ok-")
.label("Eigen matrix product");
fig.xlabel("matrix size n");
fig.ylabel("time [s]");
fig.legend(0.05, 0.95);
fig.save("mmtiming");
}
/* SAM_LISTING_END_0 */
......@@ -20,14 +20,15 @@ using namespace Eigen;
//! formula $\xi_{1,2} = \frac{1}{2}(-\alpha\pm\sqrt{\alpha^2-4\beta})$. However
//! this implementation is \emph{vulnerable to round-off}! The zeros are
//! returned in a column vector
Vector2d zerosquadpol(double alpha, double beta){
Vector2d zerosquadpol(double alpha, double beta) {
Vector2d z;
double D = std::pow(alpha,2) -4*beta; // discriminant
if(D < 0) throw "no real zeros";
else{
double D = std::pow(alpha, 2) - 4 * beta; // discriminant
if (D < 0)
throw "no real zeros";
else {
// The famous discriminant formula
double wD = std::sqrt(D);
z << (-alpha-wD)/2, (-alpha+wD)/2; // \Label[line]{zsq:1}
z << (-alpha - wD) / 2, (-alpha + wD) / 2; // \Label[line]{zsq:1}
}
return z;
}
......
......@@ -17,25 +17,25 @@ using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
//! \cpp function computing the zeros of a quadratic polynomial
//! $\xi\to \xi^2+\alpha\xi+\beta$ by means of the familiar discriminant
//! formula $\xi_{1,2} = \frac{1}{2}(-\alpha\pm\sqrt{\alpha^2-4\beta})$.
//! formula $\xi_{1,2} = \frac{1}{2}(-\alpha\pm\sqrt{\alpha^2-4\beta})$.
//! This is a stable implementation based on Vieta's theorem.
//! The zeros are returned in a column vector
VectorXd zerosquadpolstab(double alpha, double beta){
VectorXd zerosquadpolstab(double alpha, double beta) {
Vector2d z(2);
double D = std::pow(alpha,2) -4*beta; // discriminant
if(D < 0) throw "no real zeros";
else{
double D = std::pow(alpha, 2) - 4 * beta; // discriminant
if (D < 0)
throw "no real zeros";
else {
double wD = std::sqrt(D);
// Use discriminant formula only for zero far away from $0$
// Use discriminant formula only for zero far away from $0$
// in order to \com{avoid cancellation}. For the other zero
// use Vieta's formula.
if(alpha >= 0){
double t = 0.5*(-alpha-wD); // \Label[line]{zqs:11}
z << t, beta/t;
}
else{
double t = 0.5*(-alpha+wD); // \Label[line]{zqs:12}
z << beta/t, t;
// use Vieta's formula.
if (alpha >= 0) {
double t = 0.5 * (-alpha - wD); // \Label[line]{zqs:11}
z << t, beta / t;
} else {
double t = 0.5 * (-alpha + wD); // \Label[line]{zqs:12}
z << beta / t, t;
}
}
return z;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment