-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path2022_11_21_jack_scratchpad.R
51 lines (40 loc) · 4.03 KB
/
2022_11_21_jack_scratchpad.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#an exploration of late payments and how to handle them
prop_late_csv <- read.csv(file = "C:/Users/Jack Li/Downloads/2022_03_21.csv")
prop_late_csv$proportion_late <- prop_late_csv$late/prop_late_csv$total #grabbing the proportion of late payments
#there are results over 1 - we have to adjust for that.
prop_late_csv$propL_adjusted <- prop_late_csv$proportion_late
for (i in 1:1257) {
same_household_max <- max(prop_late_csv$proportion_late[prop_late_csv$name == prop_late_csv$name[i] & prop_late_csv$proportion_late <= 1])
if (prop_late_csv$proportion_late[i] > 1) {
prop_late_csv$propL_adjusted[i] <- same_household_max
print(paste("Changed", i, "from", prop_late_csv$proportion_late[i], "to", same_household_max))
}
} #a crude form of adjustment that replaces any proportions over 1 with the highest proportion of late payments <=1 from the same building and does nothing else to the proportions.
#Lower Wilson score intervals
#Wilson scores are a type of binomial confidence interval that adjust for both the observed sample size and the
#we can take the lower bound of the 95% confidence Wilson interval - the lower bound allows for the benefit of the doubt for those with very few recorded payments, even if they have a decently high proportion of them.
#in other words, it treats 1 month late, 2 months total more leniently than 15 months late, 30 total (the lower interval for the former is 0.025, the latter is 0.331).
#this interval can be tuned by the confidence level - if alpha = 0.05 leads to CIs that you think are too generous with their lower bound, let me know and I can make the alpha parameter make a little "more sense" - I started with alpha = 0.05 as the default 95% CI, but setting alpha = 50% instead turns 1 month late, 2 months total with a lower bound of ~0.35, for example, which may be more sensible.
library(Hmisc)
prop_late_csv$wilson_prop_late <- rep(0, 1257)
for (i in 1:1257) {
x <- prop_late_csv$late[i]
n <- prop_late_csv$total[i]
if (x > n) n = x #if it has x late payments, there need to be at least x payments total.
prop_late_csv$wilson_prop_late[i] <- binconf(x, n, alpha= 0.05, method = "wilson")[2] #lower bound of interval
#for a select amount of observations, the wilson interval goes VERY slightly below 0 due to some precision weirdness - I bumped those up to 0 since negative proportions are theoretically impossible to accomplish.
if (prop_late_csv$wilson_prop_late[i] < 0 ) {
prop_late_csv$wilson_prop_late[i] = 0
}
}
library(ggplot2)
#Mean, median, IQR, SD per building
#boxplots, violin plots
tapply(prop_late_csv$propL_adjusted, prop_late_csv$name, summary)
tapply(prop_late_csv$wilson_prop_late, prop_late_csv$name, summary)
windows()
ggplot(data = prop_late_csv, aes(x = name, y = propL_adjusted)) + geom_violin() +theme_classic(base_size = 10) + geom_boxplot(width = 0.1) + ylim(0,1) #a rudimentary violin plot of results when simply pushing everything above 1 below to the maximum value <= 1
windows()
ggplot(data = prop_late_csv, aes(x = name, y = wilson_prop_late)) + geom_violin() +theme_classic(base_size = 10) + geom_boxplot(width = 0.1) + ylim(0, 1) #a rudimentary violin plot of results when using Wilson scores.
#you'll notice in the summary statistics and in the violin plots that using the Wilson score has the tendency to shrink most proportions to zero (by nature of using the lower bound instead of the point estimate) - this could lead to some trickiness with interpretation as we are using a lower bound of a confidence interval as our response variable, rather than a point estimate. Nonetheless, if we plan to categorize tenants by quartiles and/or in a way that is relative to each other, this should not be a major issue.
#I am more partial to the Wilson score implementation since it can take into account different sample sizes in each point estimate, and I think I can adjust it easily to be more useful, but I do understand how shifting the estimates down is troublesome and how it may be a bit harder to explain - let me know what you think.