diff --git a/MetaWin_mac.spec b/MetaWin_mac.spec
index 2e111cd..b19c3d0 100644
--- a/MetaWin_mac.spec
+++ b/MetaWin_mac.spec
@@ -42,6 +42,7 @@ added_files = [("resources/images/exit@256px.png", "resources/images"),
("resources/images/cloud-filled-download-filled@256px.png", "resources/images"),
("resources/images/flag-united-states@256px.png", "resources/images"),
("resources/images/flag-spain@256px.png", "resources/images"),
+ ("resources/images/letter-t@256px.png", "resources/images"),
("resources/images/metawin3icon.png", "resources/images"),
("resources/images/draw_forest.png", "resources/images"),
diff --git a/MetaWin_windows.spec b/MetaWin_windows.spec
index ab5a447..e212679 100644
--- a/MetaWin_windows.spec
+++ b/MetaWin_windows.spec
@@ -42,6 +42,7 @@ added_files = [("resources/images/exit@256px.png", "resources/images"),
("resources/images/cloud-filled-download-filled@256px.png", "resources/images"),
("resources/images/flag-united-states@256px.png", "resources/images"),
("resources/images/flag-spain@256px.png", "resources/images"),
+ ("resources/images/letter-t@256px.png", "resources/images"),
("resources/images/metawin3icon.png", "resources/images"),
("resources/images/draw_forest.png", "resources/images"),
diff --git a/resources/images/letter-t@256px.png b/resources/images/letter-t@256px.png
new file mode 100644
index 0000000..bec0fd4
Binary files /dev/null and b/resources/images/letter-t@256px.png differ
diff --git a/resources/metawin_help.html b/resources/metawin_help.html
index 67afe4f..70e4eb7 100644
--- a/resources/metawin_help.html
+++ b/resources/metawin_help.html
@@ -58,6 +58,7 @@
Help Table of Contents
Phylogeny Tab
+ Analysis Options
Additional Options
@@ -353,21 +354,6 @@ Decimal Places
places used to display these values is automatically determined by the software based on the number of desired
replicates.
- Significance Level
- By default, significance levels for generating confidence intervals and certain types of tests are
- based on a standard value of 5% (α = 0.05). This value can be changed by
- choosing from the
- menu or
- from the toolbar on the
- left side of the Output Tab. Valid options are numbers between 0.01 and 1.0).
- Note that changing this value will
- only affect future output; vales already computed will not be changed. If you change the significance
- level to use in the output, MetaWin will attempt to remember this
- choice the next time you run the program.
- This choice also effects a few of the direct figures you can draw, such as
- Forest Plots
- and Normal Quantile Plots.
-
Font
You can change the font and it's properties used in the Output Tab by choosing
from the
@@ -470,6 +456,40 @@
Phylogeny Tab
Meta-Analysis option becomes available.
+ Analysis Options
+ Significance Level
+ By default, significance levels for generating confidence intervals and certain types of tests are
+ based on a standard value of 5% (α = 0.05). This value can be changed by
+ choosing from the
+ menu or
+ from the toolbar on the
+ left side of the Output Tab. Valid options are numbers between 0.01 and 1.0).
+ Note that changing this value will
+ only affect future output; vales already computed will not be changed. If you change the significance
+ level to use in the output, MetaWin will attempt to remember this
+ choice the next time you run the program.
+ This choice also effects a few of the direct figures you can draw, such as
+ Forest Plots
+ and Normal Quantile Plots.
+
+ Confidence Interval Distribution
+ When determining confidence intervals around means using standard distributions (rather than a
+ boostrap procedure), the traditional approach in meta-analysis has generally been to use a Normal
+ distribution. In earlier versions of MetaWin, we used Student's t
+ distribution instead, because we thought it useful to account for the uncertainty in estimation due
+ to the small number of studies often found in meta-analyses. With this new version, the user can
+ specify which distribution they wish to use, with the Normal distribution set as default.
+
+ To change the distribution, choose the item under the
+ menu, which will toggle between the two
+ distributions. The current distribution is indicated by the icon to the left of the menu option
+ (either a Z or a t), as well as by the text
+ of the menu item which specifies the distributions being changed both "from" and "to".
+ MetaWin will attempt to remember this choice the next time you run the
+ program.
+ The specified distribution is also listed as one of the user-specified parameters at the beginning of
+ analysis output.
+
Additional Options
Check for Updates
@@ -889,6 +909,7 @@ Output
→ Fixed Effects Model
+→ Standard confidence intervals around means based on Normal distribution
→ Use bootstrap for confidence intervals around means: 999 iterations
→ Citations: Adams et al. (1997), Dixon (1993)
@@ -1119,6 +1140,9 @@ Output
→ Fixed Effects Model
+→ Standard confidence intervals around means based on Normal distribution
+
+
85 studies will be included in this analysis
@@ -1194,6 +1218,7 @@
Output
→ Fixed Effects Model
+→ Standard confidence intervals around means based on Normal distribution
→ Use bootstrap for confidence intervals around means: 999 iterations
→ Citations: Adams et al. (1997), Dixon (1993)
@@ -1371,6 +1396,7 @@ Output
→ Fixed Effects Model
+→ Standard confidence intervals around means based on Normal distribution
→ Use bootstrap for confidence intervals around means: 999 iterations
→ Citations: Adams et al. (1997), Dixon (1993)
@@ -1577,6 +1603,7 @@ Output
→ Fixed Effects Model
+→ Standard confidence intervals around means based on Normal distribution
→ Use bootstrap for confidence intervals around means: 999 iterations
→ Citations: Adams et al. (1997), Dixon (1993)
@@ -1768,6 +1795,7 @@ Output
→ Citations: Adams et al. (1997), Dixon (1993)
+→ Standard confidence intervals around means based on Normal distribution
→ Use randomization to test model structure: 999 iterations
→ Citation: Adams et al. (1997)
@@ -1987,6 +2015,7 @@ Output
→ Fixed Effects Model
+→ Standard confidence intervals around means based on Normal distribution
→ Use bootstrap for confidence intervals around means: 999 iterations
→ Citations: Adams et al. (1997), Dixon (1993)
@@ -2216,6 +2245,7 @@ Output
→ Fixed Effects Model
+→ Standard confidence intervals around means based on Normal distribution
→ Use bootstrap for confidence intervals around means: 999 iterations
→ Citations: Adams et al. (1997), Dixon (1993)
@@ -2559,7 +2589,7 @@ References
analysis of data from retrospective studies of disease. Journal of the National Cancer
Institute 22:719–748.
Mengersen, K., and J. Gurevitch (2013)
- Using other Metricsmof effect size in meta-analysis. Pp. 72–85 in Handbook of
+ Using other metrics of effect size in meta-analysis. Pp. 72–85 in Handbook of
Meta-analysis in Ecology and Evolution, J. Koricheva, J. Gurevitch and K.L. Mengersen, eds.
Princeton University Press: Princeton, NJ.
Normand, S.-L.T. (1999) Meta-analysis: Formulating, evaluating, combining,
diff --git a/src/MetaWinAnalysis.py b/src/MetaWinAnalysis.py
index 2c19072..cbb4295 100644
--- a/src/MetaWinAnalysis.py
+++ b/src/MetaWinAnalysis.py
@@ -56,6 +56,7 @@ def __init__(self):
self.create_graph = False
self.k_estimator = "L"
self.cor_test = "tau"
+ self.norm_ci = True
def report_choices(self):
output_blocks = []
@@ -168,14 +169,25 @@ def report_choices(self):
output.append("→ {}".format(get_text("Fixed Effects Model")))
output_blocks.append(output)
+ output = []
+ if self.norm_ci:
+ output.append("→ {}".format(get_text("ci from norm")))
+ else:
+ output.append("→ {}".format(get_text("ci from t")))
if self.bootstrap_mean is not None:
- output_blocks.append(["→ {}: {} {}".format(get_text("Use bootstrap for confidence intervals around "
- "means"), self.bootstrap_mean,
- get_text("iterations")),
- "→ {}: ".format(get_text("Citations")) + get_citation("Adams_et_1997") + ", " +
- get_citation("Dixon_1993")])
+ output.extend(["→ {}: {} {}".format(get_text("Use bootstrap for confidence intervals around means"),
+ self.bootstrap_mean, get_text("iterations")),
+ "→ {}: ".format(get_text("Citations")) + get_citation("Adams_et_1997") + ", " +
+ get_citation("Dixon_1993")])
+ # output_blocks.append(["→ {}: {} {}".format(get_text("Use bootstrap for confidence intervals around "
+ # "means"), self.bootstrap_mean,
+ # get_text("iterations")),
+ # "→ {}: ".format(get_text("Citations")) + get_citation("Adams_et_1997") + ", " +
+ # get_citation("Dixon_1993")])
citations.append("Adams_et_1997")
citations.append("Dixon_1993")
+ output_blocks.append(output)
+
if self.structure == RANKCOR:
output_blocks.append(["→ {}: {} {}".format(get_text("Randomization to test correlation"),
self.randomize_model, get_text("iterations"))])
@@ -1773,7 +1785,8 @@ def add_resampling_options_to_dialog(sender, test_model: bool = False):
return randomization_group
-def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, tree: Optional = None):
+def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, tree: Optional = None,
+ norm_ci: bool = True):
"""
primary function controlling the execution of an analysis
@@ -1781,48 +1794,51 @@ def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05
results
"""
output_blocks = [["{}
".format(get_text("Analysis"))]]
+ options.norm_ci = norm_ci
output, all_citations = options.report_choices()
output_blocks.extend(output)
if options.structure == SIMPLE_MA:
(output, figure, fig_caption, chart_data, analysis_values,
- citations) = MetaWinAnalysisFunctions.simple_meta_analysis(data, options, decimal_places, alpha)
+ citations) = MetaWinAnalysisFunctions.simple_meta_analysis(data, options, decimal_places, alpha, norm_ci)
elif options.structure == GROUPED_MA:
(output, figure, fig_caption, chart_data, analysis_values,
- citations) = MetaWinAnalysisFunctions.grouped_meta_analysis(data, options, decimal_places, alpha)
+ citations) = MetaWinAnalysisFunctions.grouped_meta_analysis(data, options, decimal_places, alpha, norm_ci)
elif options.structure == CUMULATIVE_MA:
output, figure, fig_caption, chart_data = MetaWinAnalysisFunctions.cumulative_meta_analysis(data, options,
decimal_places,
- alpha)
+ alpha, norm_ci)
analysis_values = None
citations = []
elif options.structure == REGRESSION_MA:
(output, figure, fig_caption, chart_data, analysis_values,
- citations) = MetaWinAnalysisFunctions.regression_meta_analysis(data, options, decimal_places, alpha)
+ citations) = MetaWinAnalysisFunctions.regression_meta_analysis(data, options, decimal_places, alpha, norm_ci)
elif options.structure == COMPLEX_MA:
output, analysis_values, citations = MetaWinAnalysisFunctions.complex_meta_analysis(data, options,
- decimal_places, alpha)
+ decimal_places, alpha,
+ norm_ci)
figure = None
fig_caption = None
chart_data = None
elif options.structure == NESTED_MA:
(output, figure, fig_caption, chart_data, analysis_values,
- citations) = MetaWinAnalysisFunctions.nested_meta_analysis(data, options, decimal_places, alpha)
+ citations) = MetaWinAnalysisFunctions.nested_meta_analysis(data, options, decimal_places, alpha, norm_ci)
elif options.structure == TRIM_FILL:
(output, figure, fig_caption, chart_data, analysis_values,
- citations) = MetaWinAnalysisFunctions.trim_and_fill_analysis(data, options, decimal_places, alpha)
+ citations) = MetaWinAnalysisFunctions.trim_and_fill_analysis(data, options, decimal_places, alpha, norm_ci)
elif options.structure == JACKKNIFE:
(output, figure, fig_caption,
- chart_data, citations) = MetaWinAnalysisFunctions.jackknife_meta_analysis(data, options, decimal_places, alpha)
+ chart_data, citations) = MetaWinAnalysisFunctions.jackknife_meta_analysis(data, options, decimal_places,
+ alpha, norm_ci)
analysis_values = None
elif options.structure == PHYLOGENETIC_MA:
output, citations = MetaWinAnalysisFunctions.phylogenetic_meta_analysis(data, options, tree, decimal_places,
- alpha)
+ alpha, norm_ci)
analysis_values = None
figure = None
fig_caption = None
chart_data = None
elif options.structure == RANKCOR:
- output, citations = MetaWinAnalysisFunctions.rank_correlation_analysis(data, options, decimal_places, alpha)
+ output, citations = MetaWinAnalysisFunctions.rank_correlation_analysis(data, options, decimal_places)
figure = None
fig_caption = None
chart_data = None
@@ -1841,7 +1857,7 @@ def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05
def meta_analysis(sender, data, last_effect, last_var, decimal_places: int = 4, alpha: float = 0.05,
- tree: Optional = None):
+ tree: Optional = None, norm_ci: bool = True):
"""
primary function for calling various dialogs to retrieve user choices about how to run various analyses
"""
@@ -1894,7 +1910,7 @@ def meta_analysis(sender, data, last_effect, last_var, decimal_places: int = 4,
if meta_analysis_options.structure is not None:
output, figure, fig_caption, chart_data, _ = do_meta_analysis(data, meta_analysis_options, decimal_places,
- alpha, tree)
+ alpha, tree, norm_ci)
sender.last_effect = meta_analysis_options.effect_data
sender.last_var = meta_analysis_options.effect_vars
return output, figure, fig_caption, chart_data
diff --git a/src/MetaWinAnalysisFunctions.py b/src/MetaWinAnalysisFunctions.py
index 575ca51..741c4d7 100644
--- a/src/MetaWinAnalysisFunctions.py
+++ b/src/MetaWinAnalysisFunctions.py
@@ -544,7 +544,7 @@ def caption_bootstrap_text(bs_n: int):
# ---------- basic meta-analysis ----------
-def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -598,8 +598,10 @@ def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
p = 1 - scipy.stats.chi2.cdf(qt, df=df)
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data,
mean_e, pooled_var,
@@ -617,6 +619,7 @@ def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
# create chart data
forest_data = [mean_data]
for i in range(n):
+ # individual study data has to use normal dist
tmp_lower, tmp_upper = scipy.stats.norm.interval(alpha=1-alpha, loc=e_data[i], scale=math.sqrt(v_data[i]))
study_data = mean_data_tuple(study_names[i], plot_order, 0, e_data[i], None, 0, 0, tmp_lower, tmp_upper,
None, None, None, None)
@@ -675,7 +678,7 @@ def check_data_for_group(output_blocks, n, group_cnts, group_label) -> bool:
# return True
-def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -720,7 +723,7 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
citations = []
if check_data_for_group(output_blocks, n, group_cnts, options.groups.label):
output_blocks.append([get_text("{} studies will be included in this analysis").format(n)])
- # do enough to calculated the pooled variance
+ # do enough to calculate the pooled variance
group_w_sums = []
qe = 0
for group in group_names:
@@ -760,10 +763,12 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
group_median = median_effect(group_e, group_w)
qe += group_qw
group_p = 1 - scipy.stats.chi2.cdf(group_qw, df=group_df)
- # group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=group_mean,
- # scale=math.sqrt(group_var))
- group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=group_mean,
- scale=math.sqrt(group_var))
+ if norm_ci:
+ group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=group_mean,
+ scale=math.sqrt(group_var))
+ else:
+ group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=group_mean,
+ scale=math.sqrt(group_var))
(group_lower_bs, group_upper_bs,
group_lower_bias, group_upper_bias) = bootstrap_means(options.bootstrap_mean, group_boot,
group_mean, pooled_var, options.random_effects,
@@ -779,8 +784,10 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
chart_order += 1
mean_v = numpy.sum(v_data) / n
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data,
mean_e, pooled_var,
options.random_effects, alpha)
@@ -877,7 +884,7 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
# ---------- cumulative meta-analysis ----------
-def cumulative_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def cumulative_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -939,8 +946,10 @@ def cumulative_meta_analysis(data, options, decimal_places: int = 4, alpha: floa
ws_data = numpy.reciprocal(tmp_v + pooled_var)
mean_e, var_e, qt, *_ = mean_effect_var_and_q(tmp_e, ws_data)
p = 1 - scipy.stats.chi2.cdf(qt, df=df)
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, tmp_boot,
mean_e, pooled_var,
options.random_effects, alpha)
@@ -988,7 +997,7 @@ def calculate_regression_ma_values(e_data, w_data, x_data, sum_w, sum_we, qt):
return qm, qe, b1_slope, b0_intercept, var_b1, var_b0, sum_wx, sum_wx2
-def regression_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def regression_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -1054,8 +1063,10 @@ def regression_meta_analysis(data, options, decimal_places: int = 4, alpha: floa
median_e = median_effect(e_data, ws_data)
mean_v = numpy.sum(v_data) / n
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data,
mean_e, pooled_var,
options.random_effects, alpha)
@@ -1182,7 +1193,7 @@ def calculate_glm(e: numpy.array, x: numpy.array, w: numpy.array):
return qm, qe, beta, xtwxinv
-def complex_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def complex_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -1306,8 +1317,10 @@ def complex_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
# basic global calcs
mean_e, var_e, _, _, _, _ = mean_effect_var_and_q(e_data, ws_data)
mean_v = numpy.sum(v_data) / n
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data,
mean_e, pooled_var,
options.random_effects, alpha)
@@ -1433,7 +1446,8 @@ def qe(self, index):
sn += cn
return sq, sn
- def group_calculations(self, e, w, chart_order: int, boot_data, bootstrap_mean, alpha: float = 0.05):
+ def group_calculations(self, e, w, chart_order: int, boot_data, bootstrap_mean, alpha: float = 0.05,
+ norm_ci: bool = True):
chart_order += 1
mean_output = []
het_output = []
@@ -1447,10 +1461,12 @@ def group_calculations(self, e, w, chart_order: int, boot_data, bootstrap_mean,
group_median = median_effect(group_e, group_w)
group_p = 1 - scipy.stats.chi2.cdf(self.qw, df=group_df)
- # group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=self.mean,
- # scale=math.sqrt(group_var))
- group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=self.mean,
- scale=math.sqrt(group_var))
+ if norm_ci:
+ group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=self.mean,
+ scale=math.sqrt(group_var))
+ else:
+ group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=self.mean,
+ scale=math.sqrt(group_var))
(group_lower_bs, group_upper_bs,
group_lower_bias, group_upper_bias) = bootstrap_means(bootstrap_mean, group_boot, self.mean,
0, False, alpha)
@@ -1518,7 +1534,7 @@ def find_next_nested_level(index, group_data, parent) -> list:
return group_list
-def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -1578,8 +1594,10 @@ def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
mean_e, var_e, qt, sum_w, sum_w2, sum_ew = mean_effect_var_and_q(e_data, w_data)
median_e = median_effect(e_data, w_data)
mean_v = numpy.sum(v_data) / n
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data,
mean_e, 0, False, alpha)
global_mean_data = mean_data_tuple(get_text("Global"), 0, n, mean_e, median_e, var_e, mean_v, lower_ci,
@@ -1697,7 +1715,7 @@ def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float =
# ---------- trim-and-fill analysis ----------
-def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -1747,8 +1765,10 @@ def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float
output_blocks.append([get_text("Estimate of pooled variance") + ": " +
format(pooled_var, inline_float(decimal_places))])
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
original_mean_data = mean_data_tuple(get_text("Original Mean"), 0, n, mean_e, median_e, var_e, mean_v,
lower_ci, upper_ci, 0, 0, 0, 0)
original_mean = mean_e
@@ -1832,8 +1852,10 @@ def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float
ws = numpy.reciprocal(tmp_data[:, 2] + pooled_var)
mean_e, var_e, *_ = mean_effect_var_and_q(tmp_data[:, 0], ws)
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
trim_mean_data = mean_data_tuple(get_text("Trim and Fill Mean"), 0, n+trim_n, mean_e, median_e, var_e, mean_v,
lower_ci, upper_ci, 0, 0, 0, 0)
@@ -1882,7 +1904,7 @@ def phylogenetic_correlation(tip_names, root):
return p
-def phylogenetic_meta_analysis(data, options, tree, decimal_places: int = 4, alpha: float = 0.05):
+def phylogenetic_meta_analysis(data, options, tree, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -2136,7 +2158,7 @@ def phylogenetic_meta_analysis(data, options, tree, decimal_places: int = 4, alp
# ---------- jackknife meta-analysis ----------
-def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
@@ -2190,8 +2212,10 @@ def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float
format(pooled_var, inline_float(decimal_places))])
p = 1 - scipy.stats.chi2.cdf(qt, df=df)
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data,
mean_e, pooled_var,
@@ -2231,8 +2255,10 @@ def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float
mean_e, var_e, qt, *_ = mean_effect_var_and_q(tmp_e, ws_data)
median_e = median_effect(tmp_e, ws_data)
p = 1 - scipy.stats.chi2.cdf(qt, df=df)
- # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
- lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ if norm_ci:
+ lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e))
+ else:
+ lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e))
lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, tmp_boot,
mean_e, pooled_var,
options.random_effects, alpha)
@@ -2323,7 +2349,7 @@ def kendalls_tau(e_ranks, x_ranks):
return tau
-def rank_correlation_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05):
+def rank_correlation_analysis(data, options, decimal_places: int = 4):
# filter and prepare data for analysis
effect_sizes = options.effect_data
variances = options.effect_vars
diff --git a/src/MetaWinCharts.py b/src/MetaWinCharts.py
index 58e4ff2..10c51ad 100644
--- a/src/MetaWinCharts.py
+++ b/src/MetaWinCharts.py
@@ -499,10 +499,6 @@ def export_to_list(self):
return outlist
-def show_color_dialog():
- pass
-
-
def base_figure():
"""
create the baseline figure used for all plots
@@ -664,7 +660,6 @@ def add_quantile_axes_to_chart(x_data, y_data, slope: float, intercept: float, c
mse = numpy.sum(numpy.square(y_data - y_pred))/(n - 2) # mean square error
ss_x = numpy.sum(numpy.square(x_data - x_mean)) # sum of squares of x
- # t_score = -scipy.stats.t.ppf(0.025, n-2)
t_score = -scipy.stats.t.ppf(alpha/2, n-2)
nsteps = 100
p = [(i + 0.5)/nsteps for i in range(nsteps)]
diff --git a/src/MetaWinConfig.py b/src/MetaWinConfig.py
index bae4be9..f2b3e62 100644
--- a/src/MetaWinConfig.py
+++ b/src/MetaWinConfig.py
@@ -46,7 +46,8 @@ def default_config() -> dict:
"filtered row color": "lightpink",
"filtered col color": "red",
"auto update check": True,
- "alpha": 0.05
+ "alpha": 0.05,
+ "confidence interval distribution": "Normal"
}
@@ -86,6 +87,10 @@ def validate_config(key, value):
return value
except ValueError:
return None
+ elif key == "confidence interval distribution":
+ if value == "Students t":
+ return value
+ return "Normal"
return value
@@ -103,5 +108,6 @@ def export_config(main_window) -> None:
outfile.write("filtered col color={}\n".format(main_window.filtered_col_color))
outfile.write("auto update check={}\n".format(main_window.auto_update_check))
outfile.write("alpha={}\n".format(main_window.alpha))
+ outfile.write("confidence interval distribution={}".format(main_window.confidence_interval_dist))
except IOError:
pass
diff --git a/src/MetaWinConstants.py b/src/MetaWinConstants.py
index 9d6af76..bf4efea 100644
--- a/src/MetaWinConstants.py
+++ b/src/MetaWinConstants.py
@@ -68,6 +68,8 @@ def resource_path(relative_path: str, inc_file: bool = False) -> str:
radial_plot_icon = resource_path(icon_path + "draw-radial-plot@256px.png")
forest_plot_icon = resource_path(icon_path + "chart-forest-plot@256px.png")
normal_quantile_icon = resource_path(icon_path + "letter-z-2@256px.png")
+norm_dist_icon = resource_path(icon_path + "letter-z-2@256px.png")
+t_dist_icon = resource_path(icon_path + "letter-t@256px.png")
gear_icon = resource_path(icon_path + "gear-filled@256px.png")
clear_filter_icon = resource_path(icon_path + "filter-filled-eraser@256px.png")
font_icon = resource_path(icon_path + "text-fonts@256px.png")
@@ -210,7 +212,7 @@ def resource_path(relative_path: str, inc_file: bool = False) -> str:
"from retrospective studies of disease. Journal of the National Cancer "
"Institute 22:719–748.", "Mantel and Haenszel (1959)"],
- "Mengerson_Gurevitch_2013": ["Mengersen, K., and J. Gurevitch (2013) Using other Metricsmof effect size in "
+ "Mengerson_Gurevitch_2013": ["Mengersen, K., and J. Gurevitch (2013) Using other metrics of effect size in "
"meta-analysis. Pp. 72–85 in Handbook of Meta-analysis in Ecology and "
"Evolution, J. Koricheva, J. Gurevitch and K.L. Mengersen, eds. "
"Princeton University Press: Princeton, NJ.", "Mengersen and Gurevitch (2013)"],
diff --git a/src/MetaWinLanguage.py b/src/MetaWinLanguage.py
index 611f401..2f5f335 100644
--- a/src/MetaWinLanguage.py
+++ b/src/MetaWinLanguage.py
@@ -14,6 +14,7 @@
"About MetaWin": "About MetaWin",
"Additional Options": "Additional Options",
"Analysis": "Analysis",
+ "Analysis Options": "Analysis Options",
"available": "available",
"Automatically check for udpates": "Automatically check for udpates",
"Axes Titles": "Axes Titles",
@@ -32,6 +33,8 @@
"Categorical Independent Variables(s)": "Categorical Independent Variable(s)",
"Check for updates": "Check for updates",
"Choose an Analysis": "Choose an Analysis",
+ "ci from norm": "Standard confidence intervals around means based on Normal distribution",
+ "ci from t": "Standard confidence intervals around means based on Student\'s t distribution",
"Citation": "Citation",
"Citations": "Citations",
"Clear Data": "Clear Data",
@@ -217,6 +220,8 @@
"No Response": "No Response",
"No Weighting": "No Weighting",
"None": "None",
+ "normal to t": "Change Confidence Interval Distribution from Normal to Student\'s t",
+ "t to normal": "Change Confidence Interval Distribution from Student\'s t to Normal",
"Normal Quantile": "Normal Quantile",
"Normal Quantile Plot": "Normal Quantile Plot",
"normal_quantile_caption": "Normal Quantile plot following {}. The "
diff --git a/src/MetaWinMain.py b/src/MetaWinMain.py
index bd4055c..2270a55 100644
--- a/src/MetaWinMain.py
+++ b/src/MetaWinMain.py
@@ -48,6 +48,7 @@ def __init__(self, config: dict):
self.filtered_col_color = config["filtered col color"]
self.auto_update_check = config["auto update check"]
self.alpha = config["alpha"]
+ self.confidence_interval_dist = config["confidence interval distribution"]
self.help = MetaWinConstants.help_index["metawin"]
self.localization_help = MetaWinConstants.help_index["localization"]
self.main_area = None
@@ -68,6 +69,7 @@ def __init__(self, config: dict):
self.tree_area_action = None
self.tree_toolbar = None
self.auto_update_check_action = None
+ self.conf_int_action = None
# self.language_actions = None
self.language_box = None
self.output_saved = True
@@ -167,15 +169,23 @@ def init_ui(self):
self.data_toolbar_action.triggered.connect(self.show_data_toolbar_click)
data_options_menu.addAction(self.data_toolbar_action)
options_menu.addMenu(data_options_menu)
+ # analysis options submenu
+ analysis_options_menu = QMenu(get_text("Analysis Options"), self)
+ # analysis_options_menu.setIcon(QIcon(MetaWinConstants.output_icon))
+ output_alpha_action = QAction(QIcon(MetaWinConstants.alpha_icon), get_text("Significance Level"), self)
+ output_alpha_action.triggered.connect(self.set_alpha_significance)
+ analysis_options_menu.addAction(output_alpha_action)
+ self.conf_int_action = QAction("tmp", self)
+ self.update_conf_int_action()
+ self.conf_int_action.triggered.connect(self.change_conf_int_distribution)
+ analysis_options_menu.addAction(self.conf_int_action)
+ options_menu.addMenu(analysis_options_menu)
# output options submenu
output_options_menu = QMenu(get_text("Output Options"), self)
output_options_menu.setIcon(QIcon(MetaWinConstants.output_icon))
output_decimal_action = QAction(QIcon(MetaWinConstants.decimal_icon), get_text("Decimal Places"), self)
output_decimal_action.triggered.connect(self.set_output_decimal_places)
output_options_menu.addAction(output_decimal_action)
- output_alpha_action = QAction(QIcon(MetaWinConstants.alpha_icon), get_text("Significance Level"), self)
- output_alpha_action.triggered.connect(self.set_alpha_significance)
- output_options_menu.addAction(output_alpha_action)
output_font_action = QAction(QIcon(MetaWinConstants.font_icon), get_text("Font"), self)
output_font_action.triggered.connect(self.set_output_font)
output_options_menu.addAction(output_font_action)
@@ -682,9 +692,13 @@ def save_data(self) -> bool:
def meta_analysis(self) -> None:
if self.data is not None:
+ if self.confidence_interval_dist == "Students t":
+ norm_ci = False
+ else:
+ norm_ci = True
output, figure, fig_caption, chart_data = MetaWinAnalysis.meta_analysis(self, self.data, self.last_effect,
self.last_var, self.output_decimals,
- self.alpha, self.phylogeny)
+ self.alpha, self.phylogeny, norm_ci)
if output is not None:
self.write_multi_output_blocks(output)
self.main_area.setCurrentIndex(1)
@@ -839,3 +853,18 @@ def edit_graph(self):
if figure is not None:
caption = self.chart_caption
self.show_figure(figure, caption, self.chart_data)
+
+ def change_conf_int_distribution(self):
+ if self.confidence_interval_dist == "Normal":
+ self.confidence_interval_dist = "Students t"
+ else:
+ self.confidence_interval_dist = "Normal"
+ self.update_conf_int_action()
+
+ def update_conf_int_action(self):
+ if self.confidence_interval_dist == "Normal":
+ self.conf_int_action.setText(get_text("normal to t"))
+ self.conf_int_action.setIcon(QIcon(MetaWinConstants.norm_dist_icon))
+ else:
+ self.conf_int_action.setText(get_text("t to normal"))
+ self.conf_int_action.setIcon(QIcon(MetaWinConstants.t_dist_icon))