Skip to content

Commit

Permalink
remove unnecessary parameter in computemean method
Browse files Browse the repository at this point in the history
  • Loading branch information
ygeunkim committed Oct 29, 2024
1 parent 9d308b4 commit cbbbcb4
Show file tree
Hide file tree
Showing 5 changed files with 57 additions and 51 deletions.
4 changes: 2 additions & 2 deletions inst/include/bvharconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ struct RegParams {
Eigen::VectorXd _sig_shp, _sig_scl, _mean_non;
double _sd_non;
bool _mean;
int _dim, _dim_design, _num_design, _num_lowerchol, _num_coef, _num_alpha;
int _dim, _dim_design, _num_design, _num_lowerchol, _num_coef, _num_alpha, _nrow;

RegParams(
int num_iter, const Eigen::MatrixXd& x, const Eigen::MatrixXd& y,
Expand All @@ -35,7 +35,7 @@ struct RegParams {
_sd_non(CAST_DOUBLE(intercept["sd_non"])), _mean(include_mean),
_dim(y.cols()), _dim_design(x.cols()), _num_design(y.rows()),
_num_lowerchol(_dim * (_dim - 1) / 2), _num_coef(_dim * _dim_design),
_num_alpha(_mean ? _num_coef - _dim : _num_coef) {}
_num_alpha(_mean ? _num_coef - _dim : _num_coef), _nrow(_num_alpha / _dim) {}
};

struct RegInits {
Expand Down
17 changes: 9 additions & 8 deletions inst/include/mcmcreg.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class McmcReg {
x(params._x), y(params._y),
num_iter(params._iter), dim(params._dim), dim_design(params._dim_design), num_design(params._num_design),
num_lowerchol(params._num_lowerchol), num_coef(params._num_coef),
num_alpha(params._num_alpha),
num_alpha(params._num_alpha), nrow_coef(params._nrow),
reg_record(num_iter, dim, num_design, num_coef, num_lowerchol),
sparse_record(num_iter, dim, num_design, num_alpha, num_lowerchol),
mcmc_step(0), rng(seed),
Expand All @@ -39,13 +39,13 @@ class McmcReg {
response_contem(Eigen::VectorXd::Zero(num_design)),
// response_contem(Eigen::MatrixXd::Zero(num_design, dim)),
sqrt_sv(Eigen::MatrixXd::Zero(num_design, dim)),
sparse_coef(Eigen::MatrixXd::Zero(num_alpha / dim, dim)), sparse_contem(Eigen::VectorXd::Zero(num_lowerchol)),
sparse_coef(Eigen::MatrixXd::Zero(nrow_coef, dim)), sparse_contem(Eigen::VectorXd::Zero(num_lowerchol)),
prior_sig_shp(params._sig_shp), prior_sig_scl(params._sig_scl) {
if (include_mean) {
prior_alpha_mean.tail(dim) = params._mean_non;
prior_alpha_prec.tail(dim) = 1 / (params._sd_non * Eigen::VectorXd::Ones(dim)).array().square();
}
coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped();
coef_vec.head(num_alpha) = coef_mat.topRows(nrow_coef).reshaped();
if (include_mean) {
coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose();
}
Expand Down Expand Up @@ -98,6 +98,7 @@ class McmcReg {
int num_lowerchol;
int num_coef;
int num_alpha;
int nrow_coef;
LdltRecords reg_record;
SparseRecords sparse_record;
std::atomic<int> mcmc_step; // MCMC step
Expand Down Expand Up @@ -129,14 +130,14 @@ class McmcReg {
if (include_mean) {
Eigen::VectorXd prior_mean_j(dim_design);
Eigen::VectorXd prior_prec_j(dim_design);
prior_mean_j << prior_alpha_mean.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_mean.tail(dim)[j];
prior_prec_j << prior_alpha_prec.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_prec.tail(dim)[j];
prior_mean_j << prior_alpha_mean.segment(j * nrow_coef, nrow_coef), prior_alpha_mean.tail(dim)[j];
prior_prec_j << prior_alpha_prec.segment(j * nrow_coef, nrow_coef), prior_alpha_prec.tail(dim)[j];
draw_coef(
coef_mat.col(j), design_coef,
(((y - x * coef_mat) * chol_lower_j.transpose()).array() / sqrt_sv_j.array()).reshaped(),
prior_mean_j, prior_prec_j, rng
);
coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped();
coef_vec.head(num_alpha) = coef_mat.topRows(nrow_coef).reshaped();
coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose();
} else {
draw_coef(
Expand All @@ -147,9 +148,9 @@ class McmcReg {
prior_alpha_prec.segment(dim_design * j, dim_design), // Prior precision of j-th column of A
rng
);
coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped();
coef_vec = coef_mat.reshaped();
}
draw_savs(sparse_coef.col(j), coef_mat.col(j).head(num_alpha / dim), design_coef);
draw_savs(sparse_coef.col(j), coef_mat.col(j).head(nrow_coef), design_coef);
}
}
void updateDiag() {
Expand Down
17 changes: 9 additions & 8 deletions inst/include/mcmcsv.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class McmcSv {
x(params._x), y(params._y),
num_iter(params._iter), dim(params._dim), dim_design(params._dim_design), num_design(params._num_design),
num_lowerchol(params._num_lowerchol), num_coef(params._num_coef),
num_alpha(params._num_alpha),
num_alpha(params._num_alpha), nrow_coef(params._nrow),
sv_record(num_iter, dim, num_design, num_coef, num_lowerchol),
sparse_record(num_iter, dim, num_design, num_alpha, num_lowerchol),
mcmc_step(0), rng(seed),
Expand All @@ -39,14 +39,14 @@ class McmcSv {
ortho_latent(Eigen::MatrixXd::Zero(num_design, dim)),
response_contem(Eigen::VectorXd::Zero(num_design)),
sqrt_sv(Eigen::MatrixXd::Zero(num_design, dim)),
sparse_coef(Eigen::MatrixXd::Zero(num_alpha / dim, dim)), sparse_contem(Eigen::VectorXd::Zero(num_lowerchol)),
sparse_coef(Eigen::MatrixXd::Zero(nrow_coef, dim)), sparse_contem(Eigen::VectorXd::Zero(num_lowerchol)),
prior_sig_shp(params._sig_shp), prior_sig_scl(params._sig_scl),
prior_init_mean(params._init_mean), prior_init_prec(params._init_prec) {
if (include_mean) {
prior_alpha_mean.tail(dim) = params._mean_non;
prior_alpha_prec.tail(dim) = 1 / (params._sd_non * Eigen::VectorXd::Ones(dim)).array().square();
}
coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped();
coef_vec.head(num_alpha) = coef_mat.topRows(nrow_coef).reshaped();
if (include_mean) {
coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose();
}
Expand Down Expand Up @@ -92,6 +92,7 @@ class McmcSv {
int num_lowerchol;
int num_coef;
int num_alpha;
int nrow_coef;
SvRecords sv_record;
SparseRecords sparse_record;
std::atomic<int> mcmc_step; // MCMC step
Expand Down Expand Up @@ -124,14 +125,14 @@ class McmcSv {
if (include_mean) {
Eigen::VectorXd prior_mean_j(dim_design);
Eigen::VectorXd prior_prec_j(dim_design);
prior_mean_j << prior_alpha_mean.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_mean.tail(dim)[j];
prior_prec_j << prior_alpha_prec.segment(j * num_alpha / dim, num_alpha / dim), prior_alpha_prec.tail(dim)[j];
prior_mean_j << prior_alpha_mean.segment(j * nrow_coef, nrow_coef), prior_alpha_mean.tail(dim)[j];
prior_prec_j << prior_alpha_prec.segment(j * nrow_coef, nrow_coef), prior_alpha_prec.tail(dim)[j];
draw_coef(
coef_mat.col(j), design_coef,
(((y - x * coef_mat) * chol_lower_j.transpose()).array() * sqrt_sv_j.array()).reshaped(),
prior_mean_j, prior_prec_j, rng
);
coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped();
coef_vec.head(num_alpha) = coef_mat.topRows(nrow_coef).reshaped();
coef_vec.tail(dim) = coef_mat.bottomRows(1).transpose();
} else {
draw_coef(
Expand All @@ -142,9 +143,9 @@ class McmcSv {
prior_alpha_prec.segment(dim_design * j, dim_design), // Prior precision of j-th column of A
rng
);
coef_vec.head(num_alpha) = coef_mat.topRows(num_alpha / dim).reshaped();
coef_vec = coef_mat.reshaped();
}
draw_savs(sparse_coef.col(j), coef_mat.col(j).head(num_alpha / dim), design_coef);
draw_savs(sparse_coef.col(j), coef_mat.col(j).head(nrow_coef), design_coef);
}
}
void updateState() {
Expand Down
45 changes: 24 additions & 21 deletions inst/include/regforecaster.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,15 @@ class RegForecaster {
coef_mat(Eigen::MatrixXd::Zero(num_coef / dim, dim)),
contem_mat(Eigen::MatrixXd::Identity(dim, dim)),
standard_normal(Eigen::VectorXd::Zero(dim)),
tmp_vec(Eigen::VectorXd::Zero((var_lag - 1) * dim)),
lpl(Eigen::VectorXd::Zero(step)) {
last_pvec[dim_design - 1] = 1.0; // valid when include_mean = true
last_pvec.head(var_lag * dim) = vectorize_eigen(response.colwise().reverse().topRows(var_lag).transpose().eval()); // [y_T^T, y_(T - 1)^T, ... y_(T - lag + 1)^T]
post_mean = last_pvec.head(dim); // y_T
tmp_vec = last_pvec.segment(dim, (var_lag - 1) * dim); // y_(T - 1), ... y_(T - lag + 1)
// post_mean = last_pvec.head(dim); // y_T
// tmp_vec = last_pvec.segment(dim, (var_lag - 1) * dim); // y_(T - 1), ... y_(T - lag + 1)
}
virtual ~RegForecaster() = default;
virtual void computeMean(int i) = 0;
virtual void computeMean() = 0;
void updateParams(int i) {
coef_mat.topRows(nrow_coef) = unvectorize(reg_record.coef_record.row(i).head(num_alpha).transpose(), dim);
if (include_mean) {
Expand All @@ -44,28 +45,28 @@ class RegForecaster {
sv_update = reg_record.fac_record.row(i).transpose().cwiseSqrt(); // D^1/2
contem_mat = build_inv_lower(dim, reg_record.contem_coef_record.row(i)); // L
}
void updateVariance(int i) {
void updateVariance() {
// sv_update = reg_record.fac_record.row(i).transpose();
// contem_mat = build_inv_lower(dim, reg_record.contem_coef_record.row(i));
for (int j = 0; j < dim; j++) {
for (int j = 0; j < dim; ++j) {
standard_normal[j] = normal_rand(rng);
}
standard_normal.array() *= sv_update.array(); // D^(1/2) Z ~ N(0, D)
}
Eigen::MatrixXd forecastDensity() {
std::lock_guard<std::mutex> lock(mtx);
Eigen::MatrixXd predictive_distn(step, num_sim * dim); // rbind(step), cbind(sims)
Eigen::VectorXd obs_vec = last_pvec; // y_T y_(T - 1), ... y_(T - lag + 1)
Eigen::VectorXd obs_vec = last_pvec; // y_T, y_(T - 1), ... y_(T - lag + 1)
for (int i = 0; i < num_sim; ++i) {
last_pvec = obs_vec;
post_mean = obs_vec.head(dim);
tmp_vec = obs_vec.segment(dim, (var_lag - 1) * dim);
last_pvec = obs_vec; // y_T, y_(T - 1), ... y_(T - lag + 1)
post_mean = obs_vec.head(dim); // y_T
tmp_vec = obs_vec.segment(dim, (var_lag - 1) * dim); // y_(T - 1), ... y_(T - lag + 1)
updateParams(i);
for (int h = 0; h < step; ++h) {
last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec;
last_pvec.head(dim) = post_mean;
computeMean(i);
updateVariance(i);
computeMean();
updateVariance();
post_mean += contem_mat.triangularView<Eigen::UnitLower>().solve(standard_normal); // N(post_mean, L^-1 D L^T-1)
predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); // hat(Y_{T + h}^{(i)})
tmp_vec = last_pvec.head((var_lag - 1) * dim);
Expand All @@ -76,7 +77,7 @@ class RegForecaster {
Eigen::MatrixXd forecastDensity(const Eigen::VectorXd& valid_vec) {
std::lock_guard<std::mutex> lock(mtx);
Eigen::MatrixXd predictive_distn(step, num_sim * dim); // rbind(step), cbind(sims)
Eigen::VectorXd obs_vec = last_pvec; // y_T y_(T - 1), ... y_(T - lag + 1)
Eigen::VectorXd obs_vec = last_pvec; // y_T, y_(T - 1), ... y_(T - lag + 1)
for (int i = 0; i < num_sim; ++i) {
last_pvec = obs_vec;
post_mean = obs_vec.head(dim);
Expand All @@ -85,11 +86,11 @@ class RegForecaster {
for (int h = 0; h < step; ++h) {
last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec;
last_pvec.head(dim) = post_mean;
computeMean(i);
updateVariance(i);
computeMean();
updateVariance();
post_mean += contem_mat.triangularView<Eigen::UnitLower>().solve(standard_normal); // N(post_mean, L^-1 D L^T-1)
predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); // hat(Y_{T + h}^{(i)})
lpl[h] += sv_update.array().log().sum() / 2 - dim * log(2 * M_PI) / 2 - (sv_update.cwiseInverse().array() * (contem_mat * (post_mean - valid_vec)).array()).matrix().squaredNorm() / 2;
lpl[h] += sv_update.array().log().sum() - dim * log(2 * M_PI) / 2 - (sv_update.cwiseInverse().array() * (contem_mat * (post_mean - valid_vec)).array()).matrix().squaredNorm() / 2;
tmp_vec = last_pvec.head((var_lag - 1) * dim);
}
}
Expand Down Expand Up @@ -141,8 +142,9 @@ class RegVarForecaster : public RegForecaster {
}
}
virtual ~RegVarForecaster() = default;
void computeMean(int i) override {
post_mean = last_pvec.transpose() * coef_mat;
void computeMean() override {
// post_mean = last_pvec.transpose() * coef_mat;
post_mean = coef_mat.transpose() * last_pvec;
}
};

Expand All @@ -159,8 +161,9 @@ class RegVharForecaster : public RegForecaster {
}
}
virtual ~RegVharForecaster() = default;
void computeMean(int i) override {
post_mean = last_pvec.transpose() * har_trans.transpose() * coef_mat;
void computeMean() override {
// post_mean = last_pvec.transpose() * har_trans.transpose() * coef_mat;
post_mean = coef_mat.transpose() * har_trans * last_pvec;
}
protected:
Eigen::MatrixXd har_trans;
Expand All @@ -174,7 +177,7 @@ class RegVarSelectForecaster : public RegVarForecaster {
RegVarSelectForecaster(const LdltRecords& records, const Eigen::MatrixXd& selection, int step, const Eigen::MatrixXd& response_mat, int lag, bool include_mean, bool filter_stable, unsigned int seed)
: RegVarForecaster(records, step, response_mat, lag, include_mean, filter_stable, seed), activity_graph(selection) {}
virtual ~RegVarSelectForecaster() = default;
void computeMean(int i) override {
void computeMean() override {
post_mean = last_pvec.transpose() * (activity_graph.array() * coef_mat.array()).matrix();
}
private:
Expand All @@ -189,7 +192,7 @@ class RegVharSelectForecaster : public RegVharForecaster {
RegVharSelectForecaster(const LdltRecords& records, const Eigen::MatrixXd& selection, int step, const Eigen::MatrixXd& response_mat, const Eigen::MatrixXd& har_trans, int month, bool include_mean, bool filter_stable, unsigned int seed)
: RegVharForecaster(records, step, response_mat, har_trans, month, include_mean, filter_stable, seed), activity_graph(selection) {}
virtual ~RegVharSelectForecaster() = default;
void computeMean(int i) override {
void computeMean() override {
post_mean = last_pvec.transpose() * har_trans.transpose() * (activity_graph.array() * coef_mat.array()).matrix();
}
private:
Expand Down
25 changes: 13 additions & 12 deletions inst/include/svforecaster.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,15 @@ class SvForecaster {
contem_mat(Eigen::MatrixXd::Identity(dim, dim)),
h_last_record(sv_record.lvol_record.rightCols(dim)),
standard_normal(Eigen::VectorXd::Zero(dim)),
tmp_vec(Eigen::VectorXd::Zero((var_lag - 1) * dim)),
lpl(Eigen::VectorXd::Zero(step)) {
last_pvec[dim_design - 1] = 1.0; // valid when include_mean = true
last_pvec.head(var_lag * dim) = vectorize_eigen(response.colwise().reverse().topRows(var_lag).transpose().eval()); // [y_T^T, y_(T - 1)^T, ... y_(T - lag + 1)^T]
post_mean = last_pvec.head(dim); // y_T
tmp_vec = last_pvec.segment(dim, (var_lag - 1) * dim); // y_(T - 1), ... y_(T - lag + 1)
// post_mean = last_pvec.head(dim); // y_T
// tmp_vec = last_pvec.segment(dim, (var_lag - 1) * dim); // y_(T - 1), ... y_(T - lag + 1)
}
virtual ~SvForecaster() = default;
virtual void computeMean(int i) = 0;
virtual void computeMean() = 0;
void updateParams(int i) {
coef_mat.topRows(nrow_coef) = unvectorize(sv_record.coef_record.row(i).head(num_alpha).transpose(), dim);
if (include_mean) {
Expand All @@ -54,7 +55,7 @@ class SvForecaster {
// }
contem_mat = build_inv_lower(dim, sv_record.contem_coef_record.row(i)); // L
}
void updateVariance(int i) {
void updateVariance() {
for (int j = 0; j < dim; j++) {
standard_normal[j] = normal_rand(rng);
}
Expand All @@ -73,7 +74,7 @@ class SvForecaster {
for (int h = 0; h < step; ++h) {
last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec;
last_pvec.head(dim) = post_mean;
computeMean(i);
computeMean();
if (sv) {
for (int j = 0; j < dim; j++) {
standard_normal[j] = normal_rand(rng);
Expand All @@ -84,7 +85,7 @@ class SvForecaster {
} else {
sv_update = h_last_record.row(i).transpose();
}
updateVariance(i);
updateVariance();
post_mean += contem_mat.triangularView<Eigen::UnitLower>().solve(standard_normal); // N(post_mean, L^-1 D L^T-1)
predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); // hat(Y_{T + h}^{(i)})
tmp_vec = last_pvec.head((var_lag - 1) * dim);
Expand All @@ -104,7 +105,7 @@ class SvForecaster {
for (int h = 0; h < step; ++h) {
last_pvec.segment(dim, (var_lag - 1) * dim) = tmp_vec;
last_pvec.head(dim) = post_mean;
computeMean(i);
computeMean();
if (sv) {
for (int j = 0; j < dim; j++) {
standard_normal[j] = normal_rand(rng);
Expand All @@ -115,7 +116,7 @@ class SvForecaster {
} else {
sv_update = h_last_record.row(i).transpose();
}
updateVariance(i);
updateVariance();
post_mean += contem_mat.triangularView<Eigen::UnitLower>().solve(standard_normal); // N(post_mean, L^-1 D L^T-1)
predictive_distn.block(h, i * dim, 1, dim) = post_mean.transpose(); // hat(Y_{T + h}^{(i)})
lpl[h] += sv_update.sum() / 2 - dim * log(2 * M_PI) / 2 - ((-sv_update / 2).array().exp() * (contem_mat * (post_mean - valid_vec)).array()).matrix().squaredNorm() / 2;
Expand Down Expand Up @@ -171,7 +172,7 @@ class SvVarForecaster : public SvForecaster {
}
}
virtual ~SvVarForecaster() = default;
void computeMean(int i) override {
void computeMean() override {
post_mean = last_pvec.transpose() * coef_mat;
}
};
Expand All @@ -189,7 +190,7 @@ class SvVharForecaster : public SvForecaster {
}
}
virtual ~SvVharForecaster() = default;
void computeMean(int i) override {
void computeMean() override {
post_mean = last_pvec.transpose() * har_trans.transpose() * coef_mat;
}
protected:
Expand All @@ -204,7 +205,7 @@ class SvVarSelectForecaster : public SvVarForecaster {
SvVarSelectForecaster(const SvRecords& records, const Eigen::MatrixXd& selection, int step, const Eigen::MatrixXd& response_mat, int lag, bool include_mean, bool filter_stable, unsigned int seed)
: SvVarForecaster(records, step, response_mat, lag, include_mean, filter_stable, seed), activity_graph(selection) {}
virtual ~SvVarSelectForecaster() = default;
void computeMean(int i) override {
void computeMean() override {
post_mean = last_pvec.transpose() * (activity_graph.array() * coef_mat.array()).matrix();
}
private:
Expand All @@ -219,7 +220,7 @@ class SvVharSelectForecaster : public SvVharForecaster {
SvVharSelectForecaster(const SvRecords& records, const Eigen::MatrixXd& selection, int step, const Eigen::MatrixXd& response_mat, const Eigen::MatrixXd& har_trans, int month, bool include_mean, bool filter_stable, unsigned int seed)
: SvVharForecaster(records, step, response_mat, har_trans, month, include_mean, filter_stable, seed), activity_graph(selection) {}
virtual ~SvVharSelectForecaster() = default;
void computeMean(int i) override {
void computeMean() override {
post_mean = last_pvec.transpose() * har_trans.transpose() * (activity_graph.array() * coef_mat.array()).matrix();
}
private:
Expand Down

0 comments on commit cbbbcb4

Please sign in to comment.