Commit b9817f75 authored by ralfh's avatar ralfh

Reformatting of codes

parent 023c6ad2
#include <iostream>
#include <iomanip>
#include <iostream>
#include <Eigen/Dense>
......@@ -17,20 +17,15 @@ using namespace Eigen;
/* SAM_LISTING_BEGIN_0 */
void arrow_matrix_2_times_x(const VectorXd &d, const VectorXd &a,
const VectorXd &x, VectorXd &y) {
assert(d.size() == a.size() && a.size() == x.size() &&
"Vector size must be the same!");
int n = d.size();
VectorXd d_head = d.head(n-1);
VectorXd a_head = a.head(n-1);
MatrixXd d_diag = d_head.asDiagonal();
MatrixXd A(n,n);
A << d_diag, a_head,
a_head.transpose(), d(n-1);
y = A*A*x;
assert(d.size() == a.size() && a.size() == x.size() &&
"Vector size must be the same!");
int n = d.size();
VectorXd d_head = d.head(n - 1);
VectorXd a_head = a.head(n - 1);
MatrixXd d_diag = d_head.asDiagonal();
MatrixXd A(n, n);
A << d_diag, a_head, a_head.transpose(), d(n - 1);
y = A * A * x;
}
/* SAM_LISTING_END_0 */
......@@ -42,15 +37,13 @@ void arrow_matrix_2_times_x(const VectorXd &d, const VectorXd &a,
* @param[out] y The vector y = A*A*x
*/
/* SAM_LISTING_BEGIN_1 */
void efficient_arrow_matrix_2_times_x(const VectorXd &d,
const VectorXd &a,
const VectorXd &x,
VectorXd &y) {
assert(d.size() == a.size() && a.size() == x.size() &&
void efficient_arrow_matrix_2_times_x(const VectorXd &d, const VectorXd &a,
const VectorXd &x, VectorXd &y) {
assert(d.size() == a.size() && a.size() == x.size() &&
"Vector size must be the same!");
int n = d.size();
int n = d.size();
// TODO: Implement an efficient version of arrow\_matrix\_2\_times\_x
// TODO: Implement an efficient version of arrow\_matrix\_2\_times\_x
}
/* SAM_LISTING_END_1 */
......@@ -58,40 +51,38 @@ void efficient_arrow_matrix_2_times_x(const VectorXd &d,
* Repeat tests 10 times, and output the minimal runtime
* amongst all times. Test both the inefficient and the efficient
* versions.
*/
*/
void runtime_arrow_matrix() {
// TODO: your code here, time the codes
// TODO: your code here, time the codes
}
int main(void) {
// Test vectors
VectorXd a(5);
a << 1., 2., 3., 4., 5.;
VectorXd d(5);
d <<1., 3., 4., 5., 6.;
VectorXd x(5);
x << -5., 4., 6., -8., 5.;
VectorXd yi;
// Run both functions
arrow_matrix_2_times_x(a,d,x,yi);
VectorXd ye(yi.size());
efficient_arrow_matrix_2_times_x(a,d,x,ye);
// Compute error
double err = (yi - ye).norm();
// Output error
std::cout << "--> Correctness test." << std::endl;
std::cout << "Error: " << err << std::endl;
// Print out runtime
std::cout << "--> Runtime test." << std::endl;
runtime_arrow_matrix();
// Final test: exit with error if error is too big
double eps = std::numeric_limits<double>::denorm_min();
exit(err < eps);
// Test vectors
VectorXd a(5);
a << 1., 2., 3., 4., 5.;
VectorXd d(5);
d << 1., 3., 4., 5., 6.;
VectorXd x(5);
x << -5., 4., 6., -8., 5.;
VectorXd yi;
// Run both functions
arrow_matrix_2_times_x(a, d, x, yi);
VectorXd ye(yi.size());
efficient_arrow_matrix_2_times_x(a, d, x, ye);
// Compute error
double err = (yi - ye).norm();
// Output error
std::cout << "--> Correctness test." << std::endl;
std::cout << "Error: " << err << std::endl;
// Print out runtime
std::cout << "--> Runtime test." << std::endl;
runtime_arrow_matrix();
// Final test: exit with error if error is too big
double eps = std::numeric_limits<double>::denorm_min();
exit(err < eps);
}
......@@ -11,55 +11,46 @@ using namespace Eigen;
* \return Matrix with ONB of $span(a_1, \cdots, a_n)$ as columns
*/
/* SAM_LISTING_BEGIN_1 */
MatrixXd gram_schmidt(const MatrixXd & A) {
// We create a matrix Q with the same size and data of A
MatrixXd Q(A);
// The first vector just gets normalized
Q.col(0).normalize();
// Iterate over all other columns of A
for(unsigned int j = 1; j < A.cols(); ++j) {
// See eigen documentation for usage of col and leftCols
Q.col(j) -= Q.leftCols(j) * (Q.leftCols(j).transpose() * A.col(j));
// Normalize vector, if possible
// (otherwise it means columns of $\mathbf{A}$ are
// almost linear dependant)
double eps = std::numeric_limits<double>::denorm_min();
if( Q.col(j).norm() <= eps * A.col(j).norm() ) {
std::cerr << "Gram-Schmidt failed because "
<< "A has (almost) linear dependant "
<< "columns. Bye." << std::endl;
break;
} else {
Q.col(j).normalize();
}
MatrixXd gram_schmidt(const MatrixXd &A) {
// We create a matrix Q with the same size and data of A
MatrixXd Q(A);
// The first vector just gets normalized
Q.col(0).normalize();
// Iterate over all other columns of A
for (unsigned int j = 1; j < A.cols(); ++j) {
// See eigen documentation for usage of col and leftCols
Q.col(j) -= Q.leftCols(j) * (Q.leftCols(j).transpose() * A.col(j));
// Normalize vector, if possible
// (otherwise it means columns of $\mathbf{A}$ are
// almost linearly dependant)
double eps = std::numeric_limits<double>::denorm_min();
if (Q.col(j).norm() <= eps * A.col(j).norm()) {
std::cerr << "Gram-Schmidt failed because "
<< "A has (almost) linear dependant "
<< "columns. Bye." << std::endl;
break;
} else {
Q.col(j).normalize();
}
return Q;
}
return Q; // Efficient due to return-value optimization
}
/* SAM_LISTING_END_1 */
/* SAM_LISTING_BEGIN_2 */
int main(void) {
// Orthonormality test
unsigned int n = 9;
MatrixXd A = MatrixXd::Random(n,n);
MatrixXd Q = gram_schmidt( A );
// Compute how far is $\mathbf{Q}^\top*\mathbf{Q}$ from the identity
// i.e. "How far is Q from being orthonormal?"
double err = (Q.transpose()*Q - MatrixXd::Identity(n,n))
.norm();
// Error has to be small, but not zero (why?)
std::cout << "Error is: "
<< err
<< std::endl;
// If error is too big, we exit with error
double eps = std::numeric_limits<double>::denorm_min();
exit(err < eps);
// Orthonormality test
unsigned int n = 9;
MatrixXd A = MatrixXd::Random(n, n);
MatrixXd Q = gram_schmidt(A);
// Compute how far is $\mathbf{Q}^\top*\mathbf{Q}$ from the identity
// i.e. "How far is Q from being orthonormal?"
double err = (Q.transpose() * Q - MatrixXd::Identity(n, n)).norm();
// Error has to be small, but not zero (why?)
std::cout << "Error is: " << err << std::endl;
// If the error is too big, we exit with error
double eps = std::numeric_limits<double>::denorm_min();
exit(err < eps);
}
/* SAM_LISTING_END_2 */
#include <iostream>
#include <iomanip>
#include <iostream>
#include <vector>
......@@ -16,18 +16,17 @@ using namespace Eigen;
* \param[out] C Kronecker product of A and B of dim $n^2 \times n^2$
*/
/* SAM_LISTING_BEGIN_1 */
void kron(const MatrixXd & A, const MatrixXd & B,
MatrixXd & C) {
// Allocate enough space for the matrix
C = MatrixXd(A.rows()*B.rows(), A.cols()*B.cols());
for(unsigned int i = 0; i < A.rows(); ++i) {
for(unsigned int j = 0; j < A.cols(); ++j) {
// We use eigen block operations to set the values of
// each $n \times n$ block.
C.block(i*B.rows(),j*B.cols(), B.rows(), B.cols())
= A(i,j)*B; // $\in \mathbb{R}^{(n \times n)}$
}
void kron(const MatrixXd &A, const MatrixXd &B, MatrixXd &C) {
// Allocate enough space for the matrix
C = MatrixXd(A.rows() * B.rows(), A.cols() * B.cols());
for (unsigned int i = 0; i < A.rows(); ++i) {
for (unsigned int j = 0; j < A.cols(); ++j) {
// We use eigen block operations to set the values of
// each $n \times n$ block.
C.block(i * B.rows(), j * B.cols(), B.rows(), B.cols()) =
A(i, j) * B; // $\in \mathbb{R}^{(n \times n)}$
}
}
}
/* SAM_LISTING_END_1 */
......@@ -39,32 +38,30 @@ void kron(const MatrixXd & A, const MatrixXd & B,
* \param[out] y Vector y = kron(A,B)*x
*/
/* SAM_LISTING_BEGIN_2 */
void kron_mult(const MatrixXd &A, const MatrixXd &B,
const VectorXd &x, VectorXd &y) {
assert(A.rows() == A.cols() &&
A.rows() == B.rows() &&
B.rows() == B.cols() &&
"Matrices A and B must be square matrices with same size!");
assert(x.size() == A.cols()*A.cols() &&
"Vector x must have length A.cols()^2");
unsigned int n = A.rows();
// Allocate space for output
y = VectorXd::Zero(n*n);
// Note: this is like doing a matrix-vector multiplication
// where the entries of the matrix are smaller matrices
// and entries of the vector are smaller vectors
// Loop over all segments of x ($\tilde{x}$)
for(unsigned int j = 0; j < n; ++j) {
// Reuse computation of z
VectorXd z = B * x.segment(j*n, n);
// Loop over all segments of y
for(unsigned int i = 0; i < n; ++i) {
y.segment(i*n, n) += A(i, j)*z;
}
void kron_mult(const MatrixXd &A, const MatrixXd &B, const VectorXd &x,
VectorXd &y) {
assert(A.rows() == A.cols() && A.rows() == B.rows() && B.rows() == B.cols() &&
"Matrices A and B must be square matrices with same size!");
assert(x.size() == A.cols() * A.cols() &&
"Vector x must have length A.cols()^2");
unsigned int n = A.rows();
// Allocate space for output
y = VectorXd::Zero(n * n);
// Note: this is like doing a matrix-vector multiplication
// where the entries of the matrix are smaller matrices
// and entries of the vector are smaller vectors
// Loop over all segments of x ($\tilde{x}$)
for (unsigned int j = 0; j < n; ++j) {
// Reuse computation of z
VectorXd z = B * x.segment(j * n, n);
// Loop over all segments of y
for (unsigned int i = 0; i < n; ++i) {
y.segment(i * n, n) += A(i, j) * z;
}
}
}
/* SAM_LISTING_END_2 */
......@@ -78,102 +75,93 @@ void kron_mult(const MatrixXd &A, const MatrixXd &B,
* \param[out] y Vector y = kron(A,B)*x
*/
/* SAM_LISTING_BEGIN_3 */
void kron_reshape(const MatrixXd & A, const MatrixXd & B,
const VectorXd & x, VectorXd & y) {
assert(A.rows() == A.cols() && A.rows() == B.rows() && B.rows() == B.cols() &&
"Matrices A and B must be square matrices with same size!");
unsigned int n = A.rows();
MatrixXd t = B * MatrixXd::Map(x.data(), n, n) * A.transpose();
y = MatrixXd::Map(t.data(), n*n, 1);
void kron_reshape(const MatrixXd &A, const MatrixXd &B, const VectorXd &x,
VectorXd &y) {
assert(A.rows() == A.cols() && A.rows() == B.rows() && B.rows() == B.cols() &&
"Matrices A and B must be square matrices with same size!");
unsigned int n = A.rows();
MatrixXd t = B * MatrixXd::Map(x.data(), n, n) * A.transpose();
y = MatrixXd::Map(t.data(), n * n, 1);
}
/* SAM_LISTING_END_3 */
int main(void) {
// Testing correctness of Kron
MatrixXd A(2,2);
A << 1, 2, 3, 4;
MatrixXd B(2,2);
B << 5, 6, 7, 8;
MatrixXd C;
VectorXd x = Eigen::VectorXd::Random(4);
VectorXd y;
std::cout << "Testing kron, kron_mult, and kron_reshape with small matrices..."
<< std::endl;
// Compute using kron
kron(A, B, C);
y = C*x;
std::cout << "kron(A,B) = " << std::endl
<< C << std::endl;
std::cout << "Using kron: y = " << std::endl
<< y << std::endl;
// Compute using kron_mult
kron_mult(A, B, x, y);
std::cout << "Using kron_mult: y =" << std::endl
<< y << std::endl;
// Compute using kron_reshape
kron_reshape(A, B, x, y);
std::cout << "Using kron_reshape: y =" << std::endl
<< y << std::endl;
// Compute runtime of different implementations of kron
/* SAM_LISTING_BEGIN_4 */
// We repeat each runtime measurment 10 times
// (this is done in order to remove outliers)
unsigned int repeats = 10;
std::cout << "Runtime for each implementation." << std::endl;
std::cout << std::setw(5) << "n"
<< std::setw(15) << "kron"
<< std::setw(15) << "kron_mult"
<< std::setw(15) << "kron_reshape"
<< std::endl;
// Loop from $M = 2,\dots,2^8$
for(unsigned int M = 2; M <= (1 << 8); M = M << 1) {
Timer tm_kron, tm_kron_mult, tm_kron_map;
// Run experiments "repeats" times
for(unsigned int r = 0; r < repeats; ++r) {
// Random matrices for testing
A = MatrixXd::Random(M,M);
B = MatrixXd::Random(M,M);
x = VectorXd::Random(M*M);
// Do not want to use kron for large values of M
if( M < (1 << 6)) {
// Kron using direct implementation
tm_kron.start();
kron(A,B,C);
y = C*x;
tm_kron.stop();
}
// Kron matrix-vector multiplication
tm_kron_mult.start();
kron_mult(A,B,x,y);
tm_kron_mult.stop();
// Kron using reshape
tm_kron_map.start();
kron_reshape(A,B,x,y);
tm_kron_map.stop();
}
double kron_time = (M < (1 << 6)) ? tm_kron.min() : std::nan("");
std::cout << std::setw(5) << M
<< std::scientific << std::setprecision(3)
<< std::setw(15) << kron_time
<< std::setw(15) << tm_kron_mult.min()
<< std::setw(15) << tm_kron_map.min() << std::endl;
// Testing correctness of Kron
MatrixXd A(2, 2);
A << 1, 2, 3, 4;
MatrixXd B(2, 2);
B << 5, 6, 7, 8;
MatrixXd C;
VectorXd x = Eigen::VectorXd::Random(4);
VectorXd y;
std::cout
<< "Testing kron, kron_mult, and kron_reshape with small matrices..."
<< std::endl;
// Compute using kron
kron(A, B, C);
y = C * x;
std::cout << "kron(A,B) = " << std::endl << C << std::endl;
std::cout << "Using kron: y = " << std::endl << y << std::endl;
// Compute using kron_mult
kron_mult(A, B, x, y);
std::cout << "Using kron_mult: y =" << std::endl << y << std::endl;
// Compute using kron_reshape
kron_reshape(A, B, x, y);
std::cout << "Using kron_reshape: y =" << std::endl << y << std::endl;
// Compute runtime of different implementations of kron
/* SAM_LISTING_BEGIN_4 */
// We repeat each runtime measurment 10 times
// (this is done in order to remove outliers)
unsigned int repeats = 10;
std::cout << "Runtime for each implementation." << std::endl;
std::cout << std::setw(5) << "n" << std::setw(15) << "kron" << std::setw(15)
<< "kron_mult" << std::setw(15) << "kron_reshape" << std::endl;
// Loop from $M = 2,\dots,2^8$
for (unsigned int M = 2; M <= (1 << 8); M = M << 1) {
Timer tm_kron, tm_kron_mult, tm_kron_map;
// Run experiments "repeats" times
for (unsigned int r = 0; r < repeats; ++r) {
// Random matrices for testing
A = MatrixXd::Random(M, M);
B = MatrixXd::Random(M, M);
x = VectorXd::Random(M * M);
// Do not want to use kron for large values of M
if (M < (1 << 6)) {
// Kron using direct implementation
tm_kron.start();
kron(A, B, C);
y = C * x;
tm_kron.stop();
}
// Kron matrix-vector multiplication
tm_kron_mult.start();
kron_mult(A, B, x, y);
tm_kron_mult.stop();
// Kron using reshape
tm_kron_map.start();
kron_reshape(A, B, x, y);
tm_kron_map.stop();
}
/* SAM_LISTING_END_4 */
double kron_time = (M < (1 << 6)) ? tm_kron.min() : std::nan("");
std::cout << std::setw(5) << M << std::scientific << std::setprecision(3)
<< std::setw(15) << kron_time << std::setw(15)
<< tm_kron_mult.min() << std::setw(15) << tm_kron_map.min()
<< std::endl;
}
/* SAM_LISTING_END_4 */
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment