Commit 0f0d82fe authored by Jason Rhinelander's avatar Jason Rhinelander

Did truncated t sampling as per R-Y et a. (2004)

This is about to get replaced, however, as it produces biased results!
parent 7382d976
......@@ -220,15 +220,16 @@ void Reader::interOptimize() {
auto max = demand_belief_.argmaxP(book->quality(), book->lifeSales(), book->age(), authored_books - 1, market_books, cost_unit);
const double &p = max.first;
const double &q = max.second;
#ifdef ERIS_DEBUG
if (p > 1000) {
ERIS_DBG("Found p=" << p << " > 1000");
ERIS_DBGVAR(id());
ERIS_DBGVAR(book->id());
ERIS_DBGVAR(p);
ERIS_DBGVAR(q);
ERIS_DBGVAR(demand_belief_.draw_discards);
ERIS_DBGVAR(demand_belief_.draw_success_cumulative);
ERIS_DBGVAR(demand_belief_.draw_discards_cumulative);
ERIS_DBGVAR(demand_belief_.draw_rejection_discards_last);
ERIS_DBGVAR(demand_belief_.draw_rejection_discards);
ERIS_DBGVAR(demand_belief_.draw_rejection_success);
std::cerr << "\n VectorXd beta(10); beta << ";
for (int i = 0; i < demand_belief_.beta().size(); i++) {
if (i > 0) std::cerr << ", ";
......@@ -250,6 +251,7 @@ void Reader::interOptimize() {
<< ", otherbooks = " << authored_books - 1 << ", marketbooks = " << market_books << "; } book;\n";
}
#endif
const double profit = (p - cost_unit) * q - cost_fixed;
if (profit > 0) {
......
#include "creativity/belief/Linear.hpp"
#include <eris/debug.hpp>
#include <eris/Random.hpp>
#include <Eigen/QR>
namespace creativity { namespace belief {
using namespace Eigen;
using eris::Random;
constexpr double Linear::NONINFORMATIVE_N, Linear::NONINFORMATIVE_S2, Linear::NONINFORMATIVE_Vc;
......@@ -13,11 +13,9 @@ Linear::Linear(
const Ref<const VectorXd> beta,
double s2,
const Ref<const MatrixXd> V,
double n,
std::shared_ptr<MatrixXd> V_inv,
std::shared_ptr<MatrixXd> V_chol_L
double n
)
: beta_(beta), s2_{s2}, V_(V), V_inv_{std::move(V_inv)}, V_chol_L_{std::move(V_chol_L)}, n_{n}, K_(beta_.rows())
: beta_(beta), s2_{s2}, V_(V.selfadjointView<Lower>()), n_{n}, K_(beta_.rows())
{
// Check that the given matrices conform
checkLogic();
......@@ -78,15 +76,21 @@ const MatrixXd& Linear::V() const { NO_EMPTY_MODEL; return V_; }
const MatrixXd& Linear::Vinv() const {
NO_EMPTY_MODEL;
if (not V_inv_)
V_inv_ = std::make_shared<MatrixXd>(V_.colPivHouseholderQr().inverse());
V_inv_ = std::make_shared<MatrixXd>(V_.colPivHouseholderQr().inverse().selfadjointView<Lower>());
return *V_inv_;
}
const MatrixXd& Linear::VcholL() const {
NO_EMPTY_MODEL;
if (not V_chol_L_)
V_chol_L_ = std::make_shared<MatrixXd>(V_.llt().matrixL());
V_chol_L_ = std::make_shared<MatrixXd>(V_.selfadjointView<Lower>().llt().matrixL());
return *V_chol_L_;
}
const MatrixXd& Linear::VcholLinv() const {
NO_EMPTY_MODEL;
if (not V_chol_L_inv_)
V_chol_L_inv_ = std::make_shared<MatrixXd>(VcholL().colPivHouseholderQr().inverse());
return *V_chol_L_inv_;
}
const bool& Linear::noninformative() const { NO_EMPTY_MODEL; return noninformative_; }
......@@ -102,26 +106,52 @@ const VectorXd& Linear::draw() {
if (last_draw_.size() != K_ + 1) last_draw_.resize(K_ + 1);
auto &rng = eris::Random::rng();
// beta is distributed as t(beta, s^2*V, n)
// That can be generated as beta + y*sqrt(n/q) where y ~ N(0, s^2*V), and q ~ chisq(n)
auto &rng = Random::rng();
// (beta,h) is distributed as a normal-gamma(beta, V, s2^{-1}, n), in Koop's Gamma distribution
// notation, or NG(beta, V, n/2, 2*s2^{-1}/n) in the more common G(shape,scale) notation
// (which std::gamma_distribution wants).
//
// Proof:
// Let $G_{k\theta}(k,\theta)$ be the shape ($k$), scale ($\theta$) notation. This has mean $k\theta$ and
// variance $k\theta^2$.
//
// Let $G_{Koop}(\mu,\nu)$ be Koop's notation, where $\mu$ is the mean and $\nu$ is the degrees of
// freedom, which has variance $\frac{2\mu^2}{\nu}$. Equating means and variances:
//
// \[
// k\theta = \mu
// k\theta^2 = \frac{2\mu^2}{\nu}
// \theta = \frac{2\mu}{\nu}
// k = \frac{2}{\nu}
// \]
// where the third equation follows from the first divided by the second, and fourth follows
// from the first divided by the third. Thus
// \[
// G_{Koop}(\mu,\nu) = G_{k\theta}(\frac{2}{\nu},\frac{2\mu}{\nu})
// \]
// To draw this, first draw a gamma-distributed "h" value (store its inverse)
last_draw_[K_] = 1.0 / std::gamma_distribution<double>(n_/2, 2/(s2_*n_))(rng);
// Now use that to draw a multivariate normal conditional on h, with mean beta and variance
// h^{-1} V; this is the beta portion of the draw:
last_draw_.head(K_) = multivariateNormal(beta_, VcholL(), std::sqrt(last_draw_[K_]));
// To generate y ~ N(0, SIGMA), generate N(0,1) and multiple by L from the cholesky
// decomposition of SIGMA, or in other words, s*L where LL'=V (so SIGMA=s^2*V)
VectorXd y(K_);
std::normal_distribution<double> stdnorm(0, 1);
for (unsigned int i = 0; i < K_; i++) y[i] = stdnorm(rng);
std::chi_squared_distribution<double> rchisqn(n_);
return last_draw_;
}
last_draw_.head(K_) = beta_ + sqrt(s2_ * n_ / rchisqn(rng)) * VcholL() * y;
VectorXd Linear::multivariateNormal(const Ref<const VectorXd> &mu, const Ref<const MatrixXd> &L, double s) {
if (mu.rows() != L.rows() or L.rows() != L.cols())
throw std::logic_error("multivariateNormal() called with non-conforming mu and L");
// h has distribution Gamma(n/2, 2/(n*s^2)), and s^2 is the inverse of h:
last_draw_[K_] = 1.0 / (std::gamma_distribution<double>(n_/2, 2/(s2_*n_))(rng));
// To draw such a normal, we need the lower-triangle Cholesky decomposition L of V, and a vector
// of K random \f$N(\mu=0, \sigma^2=h^{-1})\f$ values. Then \f$beta + Lz\f$ yields a \f$beta\f$
// draw of the desired distribution.
VectorXd z(mu.size());
for (unsigned int i = 0; i < z.size(); i++) z[i] = Random::rstdnorm();
return last_draw_;
return mu + L * (s * z);
}
const VectorXd& Linear::lastDraw() const {
......@@ -141,13 +171,23 @@ void Linear::discardForce(unsigned int burn) {
}
std::ostream& operator<<(std::ostream &os, const Linear &b) {
if (b.K() == 0)
os << "Linear model with no parameters (default constructed)";
else
os << "Linear model with " << b.K() << " parameters, beta_ =\n" << b.beta_;
b.print(os);
return os;
}
void Linear::print(std::ostream &os) const {
os << print_name();
if (K_ == 0) os << " model with no parameters (default constructed)";
else {
if (noninformative()) os << " (noninformative)";
os << " model: K=" << K_ << ", n=" << n_ << ", s2=" << s2_ <<
"\n beta = " << beta_.transpose().format(IOFormat(StreamPrecision, 0, ", ")) <<
"\n V = " << V_.format(IOFormat(6, 0, " ", "\n ")) << "\n";
}
}
std::string Linear::print_name() const { return "Linear"; }
void Linear::verifyParameters() const { NO_EMPTY_MODEL; }
// Called on an lvalue object, creates a new object with *this as prior
......@@ -194,10 +234,16 @@ void Linear::updateInPlace(const Ref<const VectorXd> &y, const Ref<const MatrixX
VectorXd beta_diff = beta_post - beta_;
beta_ = std::move(beta_post);
s2_ = (n_prior * s2_ + residualspost.squaredNorm() + beta_diff.transpose() * Vinv() * beta_diff) / n_;
//ERIS_DBG("orig nSSR: " << residualspost.squaredNorm()
/*ERIS_DBG("s2_ orig method: " << s2_);
ERIS_DBG("s2_ Koop: " << s2_alt);*/
V_inv_ = std::move(V_post_inv);
beta_ = std::move(beta_post);
if (V_chol_L_) V_chol_L_.reset(); // This will have to be recalculated
// The decompositions will have to be recalculated:
if (V_chol_L_) V_chol_L_.reset();
if (V_chol_L_inv_) V_chol_L_inv_.reset();
if (noninformative_) noninformative_ = false; // If we just updated a noninformative model, we aren't noninformative anymore
}
......@@ -231,14 +277,21 @@ void Linear::weakenInPlace(const double precision_scale) {
V_inv_ = std::make_shared<Eigen::MatrixXd>(*V_inv_ * precision_scale);
}
// Likewise for the Cholesky decomposition
// Likewise for the Cholesky decomposition (and its inverse)
if (V_chol_L_) {
if (V_chol_L_.unique())
*V_chol_L_ /= std::sqrt(precision_scale);
else
V_chol_L_ = std::make_shared<Eigen::MatrixXd>(*V_chol_L_ / std::sqrt(precision_scale));
}
if (V_chol_L_inv_) {
if (V_chol_L_inv_.unique())
*V_chol_L_inv_ *= std::sqrt(precision_scale);
else
V_chol_L_inv_ = std::make_shared<Eigen::MatrixXd>(*V_chol_L_inv_ * std::sqrt(precision_scale));
}
// And of course V gets scaled
V_ /= precision_scale;
return;
......
......@@ -8,9 +8,6 @@
namespace creativity { namespace belief {
/** Base class for a linear model with a natural conjugate, normal-gamma prior.
*
* FIXME: change (where appropriate) to use selfadjointView (esp. for Cholesky decomp, and possibly
* for inverse).
*/
class Linear {
public:
......@@ -50,33 +47,19 @@ class Linear {
*/
explicit Linear(unsigned int K);
// NB: if changing these constants, also change the above constructor documentation
static constexpr double
/** The value of `n` for a default noninformative model constructed using
* `Linear(unsigned int)`.
*/
//
NONINFORMATIVE_N = 1e-3,
/// The value of `s2` for a noninformative model constructed using `Linear(unsigned int)`
NONINFORMATIVE_S2 = 1.0,
/// The constant for the diagonals of the V matrix for a noninformative model
NONINFORMATIVE_Vc = 1e+8;
/** Constructs a Linear model with the given parameters. These parameters will be those
* used for the prior when updating.
*
* \param beta the coefficient mean parameters (which, because of restrictions, might not be
* the actual means).
*
* \param s2 the \f$\sigma^2\f$ value of the error term variance. Typically the \f$\sigma^2\f$ estimate.
*
* \param V the model's V matrix (where \f$s^2 V\f$ is the variance matrix of \f$\beta\f$).
* Note: only the lower triangle of the matrix will be used.
*
* \param n the number of data points supporting the other values (which can be a
* non-integer value).
* \param V_inv A shared pointer to the inverse of `V`, if already calculated. If the
* inverse has not already been calculated, it is better to omit this argument: the inverse
* will be calculated when needed.
* \param V_chol_L A shared pointer to the `L` matrix of the cholesky decomposition of `V`
* (where LL' = V). If the decomposition has not already been calculated, it is better to
* omit this argument: the decomposition will be calculated when needed.
*
* \throws std::runtime_error if any of (`K >= 1`, `V.rows() == V.cols()`, `K == V.rows()`)
* are not satisfied (where `K` is determined by the number of rows of `beta`).
......@@ -85,10 +68,8 @@ class Linear {
const Eigen::Ref<const Eigen::VectorXd> beta,
double s2,
const Eigen::Ref<const Eigen::MatrixXd> V,
double n,
std::shared_ptr<Eigen::MatrixXd> V_inv = nullptr,
std::shared_ptr<Eigen::MatrixXd> V_chol_L = nullptr
);
double n
);
/** Constructs a Linear model from std::vector<double>s containing the coefficients of beta
* and the lower triangle of V.
......@@ -125,6 +106,19 @@ class Linear {
/// Virtual destructor
virtual ~Linear() = default;
// NB: if changing these constants, also change the single-int, non-informative constructor documentation
static constexpr double
/** The value of `n` for a default noninformative model constructed using
* `Linear(unsigned int)`.
*/
//
NONINFORMATIVE_N = 1e-3,
/// The value of `s2` for a noninformative model constructed using `Linear(unsigned int)`
NONINFORMATIVE_S2 = 1.0,
/// The constant for the diagonals of the V matrix for a noninformative model
NONINFORMATIVE_Vc = 1e+8;
/** Virtual method called during construction to verify the model size. If this returns a
* non-zero value, the given parameters (beta, V for the regular constructor, K for the
* noninformative constructor) must agree with the returned value. If this returns 0, beta
......@@ -132,7 +126,6 @@ class Linear {
*/
virtual unsigned int fixedModelSize() const;
#define NO_EMPTY_MODEL if (K_ == 0) { throw std::logic_error("Cannot use default constructed model object as a model"); }
/** Accesses the base distribution means value of beta. Note that this is *not* necessarily
* the mean of beta and should not be used for prediction; rather it simply returns the
* distribution parameter value used, which may well not be the mean if any of the beta
......@@ -159,6 +152,10 @@ class Linear {
*/
const Eigen::MatrixXd& VcholL() const;
/** Accesses (calculating if not previous calculated) the inverse of `VcholL()`. Note that
* if VcholL() hasn't been calculated yet, this will calculate it. */
const Eigen::MatrixXd& VcholLinv() const;
/** Given a row vector of values \f$X^*\f$, predicts \f$y^*\f$ using the current model
* values. The default implementation provided by this class simply returns the mean \f$X^*
* \beta\f$ (the mean of the multivariate \f$t\f$ density for an unrestricted, natural
......@@ -172,10 +169,14 @@ class Linear {
*/
virtual double predict(const Eigen::Ref<const Eigen::RowVectorXd> &Xi);
/** Draws a vector of \f$\beta\f$ values and \f$s^2\f$ values distributed according to the
* model's structure. The default implementation simply returns the posterior beta and s^2
* values, which is suitable for a natural conjugate, normal-gamma distribution without
* any model restrictions.
/** Draws a vector of \f$\beta\f$ values and \f$h^{-1} = \sigma^2\f$ values distributed
* according to the model's parameters. The first `K()` values are the drawn \f$\beta\f$
* values, the last value is the drawn \f$h^{-1}\f$ value.
*
* In particular, this uses a gamma distribution to first draw an h value, then uses that h
* value to draw multivariate normal beta values. This means the \f$\beta\f$ values will have a
* multivariate t distribution with mean `beta()`, covariance parameter `s2()*V()`, and
* degrees of freedom parameter `n()`.
*
* \returns a const reference to the vector of values. This same vector is accessible by
* calling lastDraw(). Note that this vector is reused for subsequent draw() calls and so
......@@ -185,6 +186,36 @@ class Linear {
*/
virtual const Eigen::VectorXd& draw();
/** Draws a multivariate normal with mean \f$\mu\f$ covariance \f$s^2LL^\top\f$ (i.e. takes
* a constant and a Cholesky decomposition).
*
* \param mu the vector means
* \param L the Cholesky decomposition matrix
* \param s a standard deviation multiplier for the Cholesky decomposition matrix. Typically
* a \f$\sigma\f$ (NOT \f$\sigma^2\f$) value. If omitted, defaults to 1 (so that you can
* just pass the Cholesky decomposition of the full covariance matrix).
*
* \returns the random multivariate normal vector.
*
* \throws std::logic_error if mu and L have non-conforming sizes
*/
static Eigen::VectorXd multivariateNormal(
const Eigen::Ref<const Eigen::VectorXd> &mu,
const Eigen::Ref<const Eigen::MatrixXd> &L,
double s = 1.0);
/** Exception class thrown when draw() is unable to produce an admissable draw. Not thrown
* by this class (draws never fail) but available for subclass use.
*/
class draw_failure : public std::runtime_error {
public:
/** Constructor.
* \param what the exception message.
*/
draw_failure(const std::string &what) : std::runtime_error(what) {}
};
/** Returns a reference to the vector of \f$\beta\f$ and \f$s^2\f$ values generated by the
* last call to draw(). If draw() has not yet been called, the vector will be empty.
*/
......@@ -212,6 +243,16 @@ class Linear {
*/
friend std::ostream& operator << (std::ostream &os, const Linear &b);
/** Prints the Linear model to the given output stream. Called internally by operator<<,
* but subclassable. The model_base parameter is used for the first word of the output.
*/
virtual void print(std::ostream &os) const;
/** The display name of the model to use when printing it. Defaults to "Linear" but
* subclasses should override.
*/
virtual std::string print_name() const;
/** Using the calling object as a prior, uses the provided data to create a new Linear
* model.
*
......@@ -261,6 +302,7 @@ class Linear {
Linear weaken(double precision_scale) &&;
protected:
/** Weakens the current linear model. This functionality should only be used internally and
* by subclasses as required for move and copy update methods; weakening should be
* considered (externally) as a type of construction of a new object.
......@@ -305,11 +347,15 @@ class Linear {
*/
Eigen::MatrixXd V_;
/** The cached inverse of the prior V matrix, which isn't set until/unless needed. */
mutable std::shared_ptr<Eigen::MatrixXd> V_inv_;
mutable std::shared_ptr<Eigen::MatrixXd>
/// The cached inverse of the prior V matrix, which isn't set until/unless needed.
V_inv_,
/** The cached "L" matrix of the cholesky decomposition of V, where LL' = V. */
mutable std::shared_ptr<Eigen::MatrixXd> V_chol_L_;
/// The cached "L" matrix of the cholesky decomposition of V, where LL' = V.
V_chol_L_,
/// The cached inverse of the "L" matrix of the cholesky decomposition of V.
V_chol_L_inv_;
/// The number of data points supporting this model, which need not be an integer.
double n_;
......@@ -334,6 +380,4 @@ class Linear {
void checkLogic();
};
#undef NO_EMPTY_MODEL
}}
This diff is collapsed.
This diff is collapsed.
......@@ -6,6 +6,7 @@
using namespace eris;
using namespace creativity::state;
using namespace creativity::belief;
namespace creativity { namespace gui {
......@@ -148,17 +149,19 @@ InfoWindow::InfoWindow(std::shared_ptr<const State> state, std::shared_ptr<Gtk::
data_append(grid_profit, "p_" + std::to_string(i), BETA "[" + p_vars[i] + "]");
data_append(grid_profit, "_p_draws", "# successful draws");
data_append(grid_profit, "_p_discards", "# discarded draws");
data_append(grid_profit, "_p_drawtype", "Draw method");
matrix_at(grid_profit, "p_V", "s<sup>2</sup><b>V</b>", 2, 2, p_vars.size(), p_vars.size());
auto &grid_demand = new_tab_grid(beliefs, "Demand");
labels_append(grid_demand, "Dependent variable", "<i>quantityDemanded</i>");
data_append(grid_demand, "d_n", "n");
data_append(grid_demand, "d_s2", "s<sup>2</sup>");
std::vector<std::string> d_vars{{"constant", "price", "quality", "quality<sup>2</sup>", "prevSales", "noSales", "age", "I(onlyBook)", "otherBooks", "marketBooks"}};
std::vector<std::string> d_vars{{"constant", "price", "quality", "quality<sup>2</sup>", "prevSales", "age", "I(onlyBook)", "otherBooks", "marketBooks"}};
for (size_t i = 0; i < d_vars.size(); i++)
data_append(grid_demand, "d_" + std::to_string(i), BETA "[" + d_vars[i] + "]");
data_append(grid_demand, "_d_draws", "# successful draws");
data_append(grid_demand, "_d_discards", "# discarded draws");
data_append(grid_demand, "_d_drawtype", "Draw method");
matrix_at(grid_demand, "d_V", "s<sup>2</sup><b>V</b>", 2, 2, d_vars.size(), d_vars.size());
comment_append(grid_demand, "<i>NB: This regression is for single-period demand.</i>", p_vars.size() + 3);
......@@ -188,6 +191,7 @@ InfoWindow::InfoWindow(std::shared_ptr<const State> state, std::shared_ptr<Gtk::
data_append(grid_pextrap, "pe_" + std::to_string(i), BETA "[" + p_vars[i] + "]");
data_append(grid_pextrap, "_pe_draws", "# successful draws");
data_append(grid_pextrap, "_pe_discards", "# discarded draws");
data_append(grid_pextrap, "_pe_drawtype", "Draw method");
matrix_at(grid_pextrap, "pe_V", "s<sup>2</sup><b>V</b>", 2, 2, p_vars.size(), p_vars.size());
comment_append(grid_pextrap, "<i>NB: This is the same model as the Profit belief, but its data also includes extrapolated values for "
"still-on-market books using ProfitStream beliefs, while Profit beliefs only include books once they leave the market.</i>",
......@@ -311,12 +315,15 @@ void InfoWindow::refresh(std::shared_ptr<const State> state) {
UPDATE_LIN_RB("d_", demand);
UPDATE_LIN_RB("pe_", profit_extrap);
updateValue("_p_draws", r.profit.draw_success_cumulative);
updateValue("_p_discards", r.profit.draw_discards_cumulative);
updateValue("_pe_draws", r.profit_extrap.draw_success_cumulative);
updateValue("_pe_discards", r.profit_extrap.draw_discards_cumulative);
updateValue("_d_draws", r.demand.draw_success_cumulative);
updateValue("_d_discards", r.demand.draw_discards_cumulative);
updateValue("_p_draws", r.profit.draw_rejection_success);
updateValue("_p_discards", r.profit.draw_rejection_discards);
updateValue("_p_drawtype", r.profit.last_draw_mode == LinearRestricted::DrawMode::Gibbs ? "gibbs" : "rejection");
updateValue("_pe_draws", r.profit_extrap.draw_rejection_success);
updateValue("_pe_discards", r.profit_extrap.draw_rejection_discards);
updateValue("_pe_drawtype", r.profit_extrap.last_draw_mode == LinearRestricted::DrawMode::Gibbs ? "gibbs" : "rejection");
updateValue("_d_draws", r.demand.draw_rejection_success);
updateValue("_d_discards", r.demand.draw_rejection_discards);
updateValue("_d_drawtype", r.demand.last_draw_mode == LinearRestricted::DrawMode::Gibbs ? "gibbs" : "rejection");
for (unsigned long a : Reader::profit_stream_ages) {
std::string code_prefix = "ps" + std::to_string(a) + "_";
......
......@@ -643,8 +643,8 @@ std::pair<eris_id_t, ReaderState> FileStorage::readReader(eris_time_t t) const {
r.profit = belief.noninformative
? Profit(settings_.dimensions, belief.K)
: Profit(settings_.dimensions, belief.beta, belief.s2, belief.V, belief.n);
r.profit.draw_success_cumulative = belief.draw_success_cumulative;
r.profit.draw_discards_cumulative = belief.draw_discards_cumulative;
r.profit.draw_rejection_success = belief.draw_success_cumulative;
r.profit.draw_rejection_discards = belief.draw_discards_cumulative;
}
belief = readBelief();
......@@ -652,8 +652,8 @@ std::pair<eris_id_t, ReaderState> FileStorage::readReader(eris_time_t t) const {
r.profit_extrap = belief.noninformative
? Profit(settings_.dimensions, belief.K)
: Profit(settings_.dimensions, belief.beta, belief.s2, belief.V, belief.n);
r.profit_extrap.draw_success_cumulative = belief.draw_success_cumulative;
r.profit_extrap.draw_discards_cumulative = belief.draw_discards_cumulative;
r.profit_extrap.draw_rejection_success = belief.draw_success_cumulative;
r.profit_extrap.draw_rejection_discards = belief.draw_discards_cumulative;
}
belief = readBelief();
......@@ -661,8 +661,8 @@ std::pair<eris_id_t, ReaderState> FileStorage::readReader(eris_time_t t) const {
r.demand = belief.noninformative
? Demand(settings_.dimensions, belief.K)
: Demand(settings_.dimensions, belief.beta, belief.s2, belief.V, belief.n);
r.demand.draw_success_cumulative = belief.draw_success_cumulative;
r.demand.draw_discards_cumulative = belief.draw_discards_cumulative;
r.demand.draw_rejection_success = belief.draw_success_cumulative;
r.demand.draw_rejection_discards = belief.draw_discards_cumulative;
}
belief = readBelief();
......@@ -713,9 +713,10 @@ FileStorage::belief_data FileStorage::readBelief() const {
// Next up is a status field
uint8_t status = read_u8();
// Currently only the first bit is used: if set, this is a restricted belief (with draw
// The lowest-value bit indicates this is a restricted belief
// information)
bool restricted_model = status & 1;
bool last_draw_was_gibbs = status & 2;
// The first K elements are beta values
belief.beta = VectorXd(k);
......@@ -740,6 +741,7 @@ FileStorage::belief_data FileStorage::readBelief() const {
if (restricted_model) {
belief.draw_success_cumulative = read_u32();
belief.draw_discards_cumulative = read_u32();
belief.draw_gibbs = last_draw_was_gibbs;
}
return belief;
......@@ -830,12 +832,16 @@ void FileStorage::writeBelief(const Linear &m) {
write_i8(k);
bool restricted_model = false;
// First up is the status field. Currently we just have one status bit for a restricted model
// First up is the status field. Currently we have one status bit for a restricted model, and
// one for the last draw mode
const LinearRestricted *lr = dynamic_cast<const LinearRestricted*>(&m);
if (lr) restricted_model = true;
uint8_t status = 0;
if (restricted_model) status |= 1;
if (restricted_model) {
status |= 1;
if (lr->last_draw_mode == LinearRestricted::DrawMode::Gibbs) status |= 2;
}
// Status field
write_u8(status);
......@@ -858,8 +864,8 @@ void FileStorage::writeBelief(const Linear &m) {
}
if (restricted_model) {
write_u32(lr->draw_success_cumulative);
write_u32(lr->draw_discards_cumulative);
write_u32(lr->draw_rejection_success);
write_u32(lr->draw_rejection_discards);
}
FILESTORAGE_DEBUG_WRITE_CHECK(2+8*(2+k+k*(k+1)/2) + (restricted_model ? 4*2 : 0))
......
......@@ -496,7 +496,7 @@ class FileStorage final : public StorageBackend {
double s2; ///< belief::Linear s2 value
double n; ///< belief::Linear n value
Eigen::MatrixXd V; ///< belief::Linear V matrix
bool draw_data = false; ///< If true, the belief came from a LinearRestricted, and so has draw stats
bool draw_gibbs; ///< If true, the last draw from this belief used Gibbs sampling (false = no draws, or rejection sampling)
uint32_t draw_success_cumulative, ///< For a restricted belief, the number of successful draws
draw_discards_cumulative; ///< For a restricted belief, the number of discarded draws
} belief_data;
......
......@@ -58,7 +58,7 @@ void PsqlStorage::initialConnection() {
conn_->prepare("exists_library_book", "SELECT COUNT(*) FROM library WHERE simulation = $1 AND reader_eris_id = $2 AND book_eris_id = $3");
conn_->prepare("insert_library_book", "INSERT INTO library (simulation,reader_eris_id,book_eris_id,type,acquired,quality) VALUES ($1,$2,$3,$4,$5,$6)");
conn_->prepare("insert_belief_unrestr", "INSERT INTO belief (reader, type, k, noninformative, s2, n, beta, v_lower) VALUES ($1,$2,$3,$4,$5,$6,$7,$8)");
conn_->prepare("insert_belief_restr", "INSERT INTO belief (reader, type, k, noninformative, s2, n, beta, v_lower, draw_successes, draw_discards) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)");
conn_->prepare("insert_belief_restr", "INSERT INTO belief (reader, type, k, noninformative, s2, n, beta, v_lower, last_draw_type, draw_successes, draw_discards) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11)");
conn_->prepare("insert_book", "INSERT INTO book (state,eris_id,author_eris_id,created,position,quality,price,revenue,revenue_lifetime,sales,sales_lifetime,pirated,pirated_lifetime,lifetime) VALUES "
"($1" ",$2" ",$3" ",$4" ",$5" ",$6" ",$7" ",$8" ",$9" ",$10"",$11" ",$12" ",$13" ",$14)");
}
......@@ -245,7 +245,7 @@ void PsqlStorage::insertBelief(eris_id_t dbid, const std::string &type, const Li
auto insert = trans.prepared(r_belief ? "insert_belief_restr" : "insert_belief_unrestr")(dbid)(type)(belief.K())(belief.noninformative());
if (belief.noninformative()) {
insert()()()();
if (r_belief) insert()();
if (r_belief) insert()()();
}
else {
insert(belief.s2())(belief.n());
......@@ -263,8 +263,9 @@ void PsqlStorage::insertBelief(eris_id_t dbid, const std::string &type, const Li
insert(createDoubleArray(coef.cbegin(), coef.cend()));
if (r_belief) {
insert(r_belief->draw_success_cumulative);
insert(r_belief->draw_discards_cumulative);
insert(r_belief->last_draw_mode == LinearRestricted::DrawMode::Gibbs ? "gibbs" :
r_belief->last_draw_mode == LinearRestricted::DrawMode::Rejection ? "rejection" : "none");
insert(r_belief->draw_rejection_success)(r_belief->draw_rejection_discards);
}
}
insert.exec();
......@@ -337,18 +338,33 @@ std::pair<eris::eris_id_t, ReaderState> PsqlStorage::readReader(const pqxx::tupl
auto V = parseDoubleArray(b["v_lower"].c_str(), k*(k+1)/2);
if (type == "profit") {
r.profit = Profit(dimensions_, beta, s2, V, n);
r.profit.draw_success_cumulative = b["draw_successes"].as<unsigned long>();
r.profit.draw_discards_cumulative = b["draw_discards"].as<unsigned long>();
std::string last_draw_type = std::string(b["last_draw_type"].c_str());
r.profit.last_draw_mode =
last_draw_type == "gibbs" ?