Commit 2d8c20e5 authored by Jason Rhinelander's avatar Jason Rhinelander

Switched OLS solver to SVD decomposition

This is supposed to be slightly more accurate, and is (ignoring the
work Eigen is doing) simpler code.
parent 8bbbc592
......@@ -4,7 +4,7 @@ project(fracdist CXX)
set(fracdist_VMAJ 1)
set(fracdist_VMIN 0)
set(fracdist_VPAT 3)
set(fracdist_VPAT 4)
set(fracdist_description "fractional unit roots/cointegration pvalue and critical value finder")
set(fracdist_author "Jason Rhinelander <jason@imaginary.ca>")
set(fracdist_homepage "https://github.com/jagerman/fracdist")
......
#include <fracdist/common.hpp>
#include <boost/math/distributions/chi_squared.hpp>
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <Eigen/SVD>
using Eigen::MatrixX3d;
using Eigen::Matrix3Xd;
using Eigen::Matrix3d;
using Eigen::VectorXd;
using Eigen::RowVector3d;
using Eigen::LLT;
using namespace Eigen;
namespace fracdist {
......@@ -125,9 +120,8 @@ const std::array<double, p_length> quantiles(const unsigned int &q, const double
// The interpolated F' is then the fitted value from the regression evaluted at the desired
// b.
// The regressors don't change, so calculate the X and cholesky decomposition of XtX
// matrices just once:
// The regressors don't change, so calculate the X and SVD decomposition just once:
MatrixX3d X(blast-bfirst+1, 3);
for (size_t i = bfirst; i <= blast; i++) {
X(i-bfirst, 0) = bweights[i];
......@@ -135,8 +129,7 @@ const std::array<double, p_length> quantiles(const unsigned int &q, const double
X(i-bfirst, 2) = bweights[i] * bvalues[i] * bvalues[i];
}
Matrix3Xd Xt = X.transpose();
LLT<Matrix3d> cholXtX(Xt * X);
JacobiSVD<MatrixXd> svd(X, ComputeThinU | ComputeThinV);
RowVector3d wantx;
wantx(0) = 1.0;
......@@ -152,7 +145,7 @@ const std::array<double, p_length> quantiles(const unsigned int &q, const double
y(j-bfirst) = bweights[j] * bmap[j][i];
}
result[i] = wantx * cholXtX.solve(Xt * y);
result[i] = wantx * svd.solve(y);
}
qcache_store(q, b, constant, interp, result);
......
......@@ -2,14 +2,9 @@
#include <boost/math/distributions/chi_squared.hpp>
#include <sstream>
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <Eigen/SVD>
using Eigen::MatrixX3d;
using Eigen::Matrix3Xd;
using Eigen::Matrix3d;
using Eigen::VectorXd;
using Eigen::RowVector3d;
using Eigen::LLT;
using namespace Eigen;
namespace fracdist {
......@@ -73,9 +68,8 @@ double critical_advanced(double test_level, const unsigned int &q, const double
data(2) = chisqinv_actual*chisqinv_actual;
// Get the fitted value from the regression using the inverse of our actual test level
Matrix3Xd Xt = X.transpose();
LLT<Matrix3d> cholXtX(Xt * X);
double fitted = data * cholXtX.solve(Xt * y);
JacobiSVD<MatrixXd> svd(X, ComputeThinU | ComputeThinV);
double fitted = data * svd.solve(y);
// Negative critical values are impossible; if we somehow got a negative prediction, truncate it
if (fitted < 0) fitted = 0;
......
#include <fracdist/pvalue.hpp>
#include <boost/math/distributions/chi_squared.hpp>
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <Eigen/SVD>
using Eigen::Matrix3d;
using Eigen::MatrixX3d;
using Eigen::Matrix3Xd;
using Eigen::VectorXd;
using Eigen::RowVector3d;
using Eigen::LLT;
using namespace Eigen;
namespace fracdist {
......@@ -70,9 +65,8 @@ double pvalue_advanced(const double &test_stat, const unsigned int &q, const dou
data(1) = test_stat;
data(2) = test_stat*test_stat;
Matrix3Xd Xt = X.transpose();
LLT<Matrix3d> cholXtX(Xt * X);
double fitted = data * cholXtX.solve(Xt * y);
JacobiSVD<MatrixXd> svd(X, ComputeThinU | ComputeThinV);
double fitted = data * svd.solve(y);
// A negative isn't valid, so if we predicted one anyway, truncate it at 0 (which corresponds to
// a pvalue of 1).
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment