From 7bc747d1b4f34b43b7c2ad8e7643b3e64f0dcc21 Mon Sep 17 00:00:00 2001 From: Kinto Date: Wed, 30 Aug 2023 10:44:19 +1000 Subject: [PATCH] Cleanup --- README.md | 2 +- README.qmd | 2 +- models/mrp2.stan | 60 ------------------------------------------------ 3 files changed, 2 insertions(+), 62 deletions(-) delete mode 100644 models/mrp2.stan diff --git a/README.md b/README.md index f40afbb..958a76a 100644 --- a/README.md +++ b/README.md @@ -152,7 +152,7 @@ therefore, predict an ALP loss in 2019. Given that this is exactly the opposite of what one might guess looking at raw polls alone, and that 2019 was a notorious miss in terms of -election predictions – with many analysts predicting an ALP win, we can +election predictions, with many analysts predicting an ALP win, we can feel very satisfied with this result. # Diagnostics. diff --git a/README.qmd b/README.qmd index 11ea965..403b02d 100644 --- a/README.qmd +++ b/README.qmd @@ -148,7 +148,7 @@ ps_sim |> Thus we see that of our 10,000 simulations, the ALP manages to achieve a majority (i.e. win >75 seats) in under 40% of them. We would, therefore, predict an ALP loss in 2019. -Given that this is exactly the opposite of what one might guess looking at raw polls alone, and that 2019 was a notorious miss in terms of election predictions -- with many analysts predicting an ALP win, we can feel very satisfied with this result. +Given that this is exactly the opposite of what one might guess looking at raw polls alone, and that 2019 was a notorious miss in terms of election predictions, with many analysts predicting an ALP win, we can feel very satisfied with this result. # Diagnostics. diff --git a/models/mrp2.stan b/models/mrp2.stan deleted file mode 100644 index ce77b1f..0000000 --- a/models/mrp2.stan +++ /dev/null @@ -1,60 +0,0 @@ -// -// MRP component of model.stan. -// -// Note that in the main model we use the national polls effectively to produce -// an estimate of national swing. We then take the previous TPP result for -// each division modified by the national swing as a rough prior for the expected -// TPP result. -// -// In this model, we simply take the prior TPP result for each division as -// our rough prior. - -data { - int n_records; - int n_covariates; - int n_divisions; - array[n_records] int record_division; # map obs to division - array[n_divisions] real tpp_div_prev; - - array[n_records] int sex_record; - array[n_records] int age_record; - array[n_records] int educ_record; - array[n_records] int tpp_record; -} - -parameters { - real epsilon; - array[4] real b_age; - array[4] real b_educ; - array[n_divisions] real tpp_div_curr; -} - -transformed parameters { - array[2] real b_sex = {epsilon, -epsilon}; - array[n_records] real eta; - - // Offsets - array[4] real o_age = {-2, -1, 1, 2}; - array[4] real o_educ = {-2, -1, 1, 2}; - - for (n in 1:n_records) { - int d = record_division[n]; - int a = age_record[n]; - int s = sex_record[n]; - int u = educ_record[n]; - - eta[n] = inv_logit(tpp_div_curr[d]/100) + b_age[a] + o_age[a] + b_sex[s] + b_educ[u] + o_educ[u]; - } -} - -model { - for (n in 1:n_records) { - int d = record_division[n]; - tpp_div_curr[d] ~ normal(tpp_div_prev[d], 5); - tpp_record[n] ~ bernoulli_logit(eta[n]); - } - - epsilon ~ normal(0, 1); - b_age ~ normal(0, 2); - b_educ ~ normal(0, 2); -}