From e59cbda08943f7f360db2772c6b0ba43fb314ae4 Mon Sep 17 00:00:00 2001 From: Michael Rosenberg Date: Tue, 30 Aug 2022 10:45:32 -0400 Subject: [PATCH 1/6] fixed typo in reference --- resources/metawin_help.html | 2 +- src/MetaWinConstants.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/resources/metawin_help.html b/resources/metawin_help.html index 67afe4f..656d831 100644 --- a/resources/metawin_help.html +++ b/resources/metawin_help.html @@ -2559,7 +2559,7 @@

References

analysis of data from retrospective studies of disease. Journal of the National Cancer Institute 22:719–748.
  • Mengersen, K., and J. Gurevitch (2013) - Using other Metricsmof effect size in meta-analysis. Pp. 72–85 in Handbook of + Using other metrics of effect size in meta-analysis. Pp. 72–85 in Handbook of Meta-analysis in Ecology and Evolution, J. Koricheva, J. Gurevitch and K.L. Mengersen, eds. Princeton University Press: Princeton, NJ.
  • Normand, S.-L.T. (1999) Meta-analysis: Formulating, evaluating, combining, diff --git a/src/MetaWinConstants.py b/src/MetaWinConstants.py index 9d6af76..6d47e2c 100644 --- a/src/MetaWinConstants.py +++ b/src/MetaWinConstants.py @@ -210,7 +210,7 @@ def resource_path(relative_path: str, inc_file: bool = False) -> str: "from retrospective studies of disease. Journal of the National Cancer " "Institute 22:719–748.", "Mantel and Haenszel (1959)"], - "Mengerson_Gurevitch_2013": ["Mengersen, K., and J. Gurevitch (2013) Using other Metricsmof effect size in " + "Mengerson_Gurevitch_2013": ["Mengersen, K., and J. Gurevitch (2013) Using other metrics of effect size in " "meta-analysis. Pp. 72–85 in Handbook of Meta-analysis in Ecology and " "Evolution, J. Koricheva, J. Gurevitch and K.L. Mengersen, eds. " "Princeton University Press: Princeton, NJ.", "Mengersen and Gurevitch (2013)"], From 96a5fe7bbb69099b63ade6a5271504404afbfac7 Mon Sep 17 00:00:00 2001 From: Michael Rosenberg Date: Thu, 1 Sep 2022 14:01:02 -0400 Subject: [PATCH 2/6] preparing option for normal or t-dist --- src/MetaWinAnalysisFunctions.py | 91 +++++++++++++++++++++------------ src/MetaWinCharts.py | 1 - 2 files changed, 57 insertions(+), 35 deletions(-) diff --git a/src/MetaWinAnalysisFunctions.py b/src/MetaWinAnalysisFunctions.py index 575ca51..4e27c60 100644 --- a/src/MetaWinAnalysisFunctions.py +++ b/src/MetaWinAnalysisFunctions.py @@ -544,7 +544,7 @@ def caption_bootstrap_text(bs_n: int): # ---------- basic meta-analysis ---------- -def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -598,8 +598,10 @@ def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float = p = 1 - scipy.stats.chi2.cdf(qt, df=df) - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data, mean_e, pooled_var, @@ -617,6 +619,7 @@ def simple_meta_analysis(data, options, decimal_places: int = 4, alpha: float = # create chart data forest_data = [mean_data] for i in range(n): + # individual study data has to use normal dist tmp_lower, tmp_upper = scipy.stats.norm.interval(alpha=1-alpha, loc=e_data[i], scale=math.sqrt(v_data[i])) study_data = mean_data_tuple(study_names[i], plot_order, 0, e_data[i], None, 0, 0, tmp_lower, tmp_upper, None, None, None, None) @@ -675,7 +678,7 @@ def check_data_for_group(output_blocks, n, group_cnts, group_label) -> bool: # return True -def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -720,7 +723,7 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = citations = [] if check_data_for_group(output_blocks, n, group_cnts, options.groups.label): output_blocks.append([get_text("{} studies will be included in this analysis").format(n)]) - # do enough to calculated the pooled variance + # do enough to calculate the pooled variance group_w_sums = [] qe = 0 for group in group_names: @@ -760,10 +763,12 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = group_median = median_effect(group_e, group_w) qe += group_qw group_p = 1 - scipy.stats.chi2.cdf(group_qw, df=group_df) - # group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=group_mean, - # scale=math.sqrt(group_var)) - group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=group_mean, - scale=math.sqrt(group_var)) + if norm_ci: + group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=group_mean, + scale=math.sqrt(group_var)) + else: + group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=group_mean, + scale=math.sqrt(group_var)) (group_lower_bs, group_upper_bs, group_lower_bias, group_upper_bias) = bootstrap_means(options.bootstrap_mean, group_boot, group_mean, pooled_var, options.random_effects, @@ -779,8 +784,10 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = chart_order += 1 mean_v = numpy.sum(v_data) / n - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data, mean_e, pooled_var, options.random_effects, alpha) @@ -877,7 +884,7 @@ def grouped_meta_analysis(data, options, decimal_places: int = 4, alpha: float = # ---------- cumulative meta-analysis ---------- -def cumulative_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def cumulative_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -939,8 +946,10 @@ def cumulative_meta_analysis(data, options, decimal_places: int = 4, alpha: floa ws_data = numpy.reciprocal(tmp_v + pooled_var) mean_e, var_e, qt, *_ = mean_effect_var_and_q(tmp_e, ws_data) p = 1 - scipy.stats.chi2.cdf(qt, df=df) - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, tmp_boot, mean_e, pooled_var, options.random_effects, alpha) @@ -988,7 +997,7 @@ def calculate_regression_ma_values(e_data, w_data, x_data, sum_w, sum_we, qt): return qm, qe, b1_slope, b0_intercept, var_b1, var_b0, sum_wx, sum_wx2 -def regression_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def regression_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -1054,8 +1063,10 @@ def regression_meta_analysis(data, options, decimal_places: int = 4, alpha: floa median_e = median_effect(e_data, ws_data) mean_v = numpy.sum(v_data) / n - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data, mean_e, pooled_var, options.random_effects, alpha) @@ -1182,7 +1193,7 @@ def calculate_glm(e: numpy.array, x: numpy.array, w: numpy.array): return qm, qe, beta, xtwxinv -def complex_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def complex_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -1306,8 +1317,10 @@ def complex_meta_analysis(data, options, decimal_places: int = 4, alpha: float = # basic global calcs mean_e, var_e, _, _, _, _ = mean_effect_var_and_q(e_data, ws_data) mean_v = numpy.sum(v_data) / n - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data, mean_e, pooled_var, options.random_effects, alpha) @@ -1518,7 +1531,7 @@ def find_next_nested_level(index, group_data, parent) -> list: return group_list -def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -1578,8 +1591,10 @@ def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float = mean_e, var_e, qt, sum_w, sum_w2, sum_ew = mean_effect_var_and_q(e_data, w_data) median_e = median_effect(e_data, w_data) mean_v = numpy.sum(v_data) / n - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1 - alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1 - alpha, df=n-1, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data, mean_e, 0, False, alpha) global_mean_data = mean_data_tuple(get_text("Global"), 0, n, mean_e, median_e, var_e, mean_v, lower_ci, @@ -1697,7 +1712,7 @@ def nested_meta_analysis(data, options, decimal_places: int = 4, alpha: float = # ---------- trim-and-fill analysis ---------- -def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -1747,8 +1762,10 @@ def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float output_blocks.append([get_text("Estimate of pooled variance") + ": " + format(pooled_var, inline_float(decimal_places))]) - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) original_mean_data = mean_data_tuple(get_text("Original Mean"), 0, n, mean_e, median_e, var_e, mean_v, lower_ci, upper_ci, 0, 0, 0, 0) original_mean = mean_e @@ -1832,8 +1849,10 @@ def trim_and_fill_analysis(data, options, decimal_places: int = 4, alpha: float ws = numpy.reciprocal(tmp_data[:, 2] + pooled_var) mean_e, var_e, *_ = mean_effect_var_and_q(tmp_data[:, 0], ws) - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) trim_mean_data = mean_data_tuple(get_text("Trim and Fill Mean"), 0, n+trim_n, mean_e, median_e, var_e, mean_v, lower_ci, upper_ci, 0, 0, 0, 0) @@ -1882,7 +1901,7 @@ def phylogenetic_correlation(tip_names, root): return p -def phylogenetic_meta_analysis(data, options, tree, decimal_places: int = 4, alpha: float = 0.05): +def phylogenetic_meta_analysis(data, options, tree, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -2136,7 +2155,7 @@ def phylogenetic_meta_analysis(data, options, tree, decimal_places: int = 4, alp # ---------- jackknife meta-analysis ---------- -def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, norm_ci: bool = True): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars @@ -2190,8 +2209,10 @@ def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float format(pooled_var, inline_float(decimal_places))]) p = 1 - scipy.stats.chi2.cdf(qt, df=df) - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, boot_data, mean_e, pooled_var, @@ -2231,8 +2252,10 @@ def jackknife_meta_analysis(data, options, decimal_places: int = 4, alpha: float mean_e, var_e, qt, *_ = mean_effect_var_and_q(tmp_e, ws_data) median_e = median_effect(tmp_e, ws_data) p = 1 - scipy.stats.chi2.cdf(qt, df=df) - # lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) - lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + if norm_ci: + lower_ci, upper_ci = scipy.stats.norm.interval(alpha=1-alpha, loc=mean_e, scale=math.sqrt(var_e)) + else: + lower_ci, upper_ci = scipy.stats.t.interval(alpha=1-alpha, df=df, loc=mean_e, scale=math.sqrt(var_e)) lower_bs_ci, upper_bs_ci, lower_bias_ci, upper_bias_ci = bootstrap_means(options.bootstrap_mean, tmp_boot, mean_e, pooled_var, options.random_effects, alpha) diff --git a/src/MetaWinCharts.py b/src/MetaWinCharts.py index 58e4ff2..8f37fd6 100644 --- a/src/MetaWinCharts.py +++ b/src/MetaWinCharts.py @@ -664,7 +664,6 @@ def add_quantile_axes_to_chart(x_data, y_data, slope: float, intercept: float, c mse = numpy.sum(numpy.square(y_data - y_pred))/(n - 2) # mean square error ss_x = numpy.sum(numpy.square(x_data - x_mean)) # sum of squares of x - # t_score = -scipy.stats.t.ppf(0.025, n-2) t_score = -scipy.stats.t.ppf(alpha/2, n-2) nsteps = 100 p = [(i + 0.5)/nsteps for i in range(nsteps)] From 6a2252abef834530196e00f5959f4354b2a21a13 Mon Sep 17 00:00:00 2001 From: Michael Rosenberg Date: Thu, 1 Sep 2022 14:04:59 -0400 Subject: [PATCH 3/6] added missing norm vs t if --- src/MetaWinAnalysisFunctions.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/MetaWinAnalysisFunctions.py b/src/MetaWinAnalysisFunctions.py index 4e27c60..2c070e5 100644 --- a/src/MetaWinAnalysisFunctions.py +++ b/src/MetaWinAnalysisFunctions.py @@ -1446,7 +1446,8 @@ def qe(self, index): sn += cn return sq, sn - def group_calculations(self, e, w, chart_order: int, boot_data, bootstrap_mean, alpha: float = 0.05): + def group_calculations(self, e, w, chart_order: int, boot_data, bootstrap_mean, alpha: float = 0.05, + norm_ci: bool = True): chart_order += 1 mean_output = [] het_output = [] @@ -1460,10 +1461,12 @@ def group_calculations(self, e, w, chart_order: int, boot_data, bootstrap_mean, group_median = median_effect(group_e, group_w) group_p = 1 - scipy.stats.chi2.cdf(self.qw, df=group_df) - # group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=self.mean, - # scale=math.sqrt(group_var)) - group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=self.mean, - scale=math.sqrt(group_var)) + if norm_ci: + group_lower, group_upper = scipy.stats.norm.interval(alpha=1 - alpha, loc=self.mean, + scale=math.sqrt(group_var)) + else: + group_lower, group_upper = scipy.stats.t.interval(alpha=1 - alpha, df=group_df, loc=self.mean, + scale=math.sqrt(group_var)) (group_lower_bs, group_upper_bs, group_lower_bias, group_upper_bias) = bootstrap_means(bootstrap_mean, group_boot, self.mean, 0, False, alpha) From bb3723f6112682df84fbf5599971290e7bebc7d0 Mon Sep 17 00:00:00 2001 From: msrosenberg Date: Sat, 3 Sep 2022 18:33:02 -0400 Subject: [PATCH 4/6] implemented swapping between t and normal dists for conf intervals --- MetaWin_mac.spec | 1 + MetaWin_windows.spec | 1 + resources/images/letter-t@256px.png | Bin 0 -> 3406 bytes src/MetaWinAnalysis.py | 52 ++++++++++++++++++---------- src/MetaWinAnalysisFunctions.py | 2 +- src/MetaWinConfig.py | 8 ++++- src/MetaWinConstants.py | 2 ++ src/MetaWinLanguage.py | 5 +++ src/MetaWinMain.py | 37 +++++++++++++++++--- 9 files changed, 84 insertions(+), 24 deletions(-) create mode 100644 resources/images/letter-t@256px.png diff --git a/MetaWin_mac.spec b/MetaWin_mac.spec index 2e111cd..b19c3d0 100644 --- a/MetaWin_mac.spec +++ b/MetaWin_mac.spec @@ -42,6 +42,7 @@ added_files = [("resources/images/exit@256px.png", "resources/images"), ("resources/images/cloud-filled-download-filled@256px.png", "resources/images"), ("resources/images/flag-united-states@256px.png", "resources/images"), ("resources/images/flag-spain@256px.png", "resources/images"), + ("resources/images/letter-t@256px.png", "resources/images"), ("resources/images/metawin3icon.png", "resources/images"), ("resources/images/draw_forest.png", "resources/images"), diff --git a/MetaWin_windows.spec b/MetaWin_windows.spec index ab5a447..e212679 100644 --- a/MetaWin_windows.spec +++ b/MetaWin_windows.spec @@ -42,6 +42,7 @@ added_files = [("resources/images/exit@256px.png", "resources/images"), ("resources/images/cloud-filled-download-filled@256px.png", "resources/images"), ("resources/images/flag-united-states@256px.png", "resources/images"), ("resources/images/flag-spain@256px.png", "resources/images"), + ("resources/images/letter-t@256px.png", "resources/images"), ("resources/images/metawin3icon.png", "resources/images"), ("resources/images/draw_forest.png", "resources/images"), diff --git a/resources/images/letter-t@256px.png b/resources/images/letter-t@256px.png new file mode 100644 index 0000000000000000000000000000000000000000..bec0fd467d4df7473b6bebbb0ca9283b996f10e4 GIT binary patch literal 3406 zcmai1c{o(-AAip|Gh>)RmTYCqGD>#YV_L-sNo9*6p;AP$#+h(Ywu_{)OyNf9#w9Ie zPMLCV%9cST=28)2xUzI*ey4ta|DNA@&hxzQ_x*g{@AiDp_dRchtFyhhn35O(K-^)^ zPB#D`dCM|9Y^%{`|0%X{z4!v~bxW@j4cENAbK zo33K+sXby5QaYE(Rq9u(X63SGl~hcPNHQ|zGc6&#5+L%F|&( z1hEiegPu-9rMy^bwxrNxU_>o4Xkq#8y@l1pJZ1Q=Vqt0bck2^(ANM6CYdYlp-`BB8 zA%<*W^=Yd1^u{haL4P6_Vy6xenu#6{)62GiO+g?;y%9PD z@5k{bYg>7Tj1G{9EiJCvvp8!l{IZ@%p}wyU@}KWot!Wj$Py}&XRN)}G<$4m@Tg$^Z z9)kj*j$nFaf1kyAXk55Vtv&UtUDLWTa4Q&$Un(XhFh%Wz8+UT)MR2SD(@uLq^zfB-D4>o!^RG^B8=piay5tSYu6A8~6SKu`kCXf_?lH zt%T>(-PeEk@X!SsGnh4@qPrD(h(bLOwQ6XF zu=;Amd}RQs_0I;soFpwt*EY6xo}q;AprU_U5trcb18rp|D2CM$`?=@K-k&j_FSG7& zTG^_Zb<9$bII{0>zdYw(Vdaa%;w#zH2CL=RoeWNz6@NWeTZ|-#KRCO+Z00-1z1oDO z3L6++2@{rMO+AV094LQej!#AE*4ZVw$qSwNH!6l5df=1DDWvpBguHMc-<+$$+H-q-&R*sV~PgIn$@{$F>%c0%1W+qVDwnP zj$f5V$!yi%(0|ZHso`(y@+N1V$}#sEgSXL}1S))OzFv7!XCuWM-Li3X$Th#N_yM0o z_Hmw?nhdZL223F;#BAAHNj27J=Ox>WgBEZ|yC}E3ague7rCT+c)VhTmctkw$z&C=I zTdOd3UZ|%r{n;G349e8>S7aEa?$4&|4M_;wxgD*~pnNAWb-Qm1qqt5S-#3pdUD#S3 zP{DnoMafd(@J`cD&9V>B@m`bU;RQ-m?$R}$p?o*N9(;5{I7EHja~rnK(VCaqM$Ymh z%+}~G^p^bVSVliVMI)%_v?XMrjj-1vY;S`RGBcD!w;Xc|;`#c0Q5y*BHxjt7KdBd4Bd}PFE&@>0n13^Ju_9?%%EoWSyB$`s` zaVm+fcvDIPM2(8_|LUQ2Z$Q|YuXm?CnkATNddebfDsHsN2$+VOU|T=Tg9Nd7uy9@j zT^-mSYWSk|VvJO}QZSfrJIFlXcBcd{EP3h=%o)6thWKQ*2o+8AU^GyzH(u{k=WpYO zZ`a#f1a(QSW7X#!|3dRcep4|7-~30%|EyIVD0OGZ1cCY1-E%{vhRXvnlNSL2C4J%y4+%Zq2+^t6n? z+d|8gVdqS^SA#@<_lGtJ&iI>SFf^`+un*sa#%Me%dc*rf{p!*@T;rqk=Dv{}Bu9(q zh9~c^r!tDEppTD&2Ho2t{LKQF(5Ckc68+jivPi=Dpi72v?=y&KFaQ?R_b=B7#-UeO#=8^wq~!Rr%2l zQSK=7L1@1=&kB`69|wVku?_r!3a>?^Q=8`o;o$F#nyB~yy|s8|#%@(u z)epcmi&V6+MtcY@ej>+2@b!-Yb6%)0%Y#s%VglYqoC~-UpK%UM7$I!EMzH@T6+Ihx zAG~AfVR4-xq-}X8IdC9L86`cz1ud>EO3m??#J1ty8}6zRmN{1)rPM-r9&0azK!vk% zYTl@LI@I+`eY&=qqBknZhuKeXZf%AF6gY5kD-dY~Rl~C3W{u!AK>>ViKLstQg&S5E zikW~3^L|L@HnGSWh;VU|k0iEv0Vu3N*ol*Wo`BBgJ|K3{@h$blBJJm(%CAp?UW*9$ z_)3#{b;srwgc(jnAKiiV_uM*45kSUZqVPQ&8s?^V;3=qj*U0sHm$93zgsVH{IRlEG zsi4V$p!dX<*hQg89dSTS5qy1wHcQZ(YX2*|-EKqtvo@a$iDyEtH#Y`+7fz&j)nnz@ zsOUX+ygZ!oT#4bs8$C?LxvE@D5k$SUuri)%TZ<+i1--!+#-SKzx5qb8k-X#8#w7Z= zPhRNJlThNC8KFo!sOsFrJQ1)JPgW7+T6QczoGup+s6Xb4{I3r8N}mA*Q-p0RpWhF! z*!l?VLU0Af-st-h55`(Nvivs)KPTg!yE?+w*JZWm9ako-7P~Vp;ZVdz%Sj=m@`R+8 z91MD&$uS*qZ?qy|fjSj^f+yK%?;spl6i+M)gOn@~wu<*(#{hg5^rSWKpqd~=6;?v@C<`6IS2 zT_01NhE{*YJuv|M z!=e!v8qW>Ep+<IK*87D zcqPw(1x=ot1{M9oI8BImI@TTooM#N&3n14H(GT}PG8qiRHKTa*e3{!1Ppzqg#+NJN zj;G)-byilb)nrKmylMqH!FsXQpiEJ@=QQ4njS;3Q3H{^`3L>>*?}PYo@t;C{cC!?g zxE=~o!CSL#hCNGBB1hsg?w(~ktGdF=K>8KE;)kU`61I=}Q!C8`1mRs(67TBvU{eQf z=E9&9I&X*Dkbn&|c~+^#ixRhta1D5Xc-Aa;+LX34L5MHDazz8ZSH8n48}~GE4(3N; zu@3io@d_^9@qvg@rGTe_+CDt+2#H*Cky`2lC5x@qrAoNV^)fMLbwlq^y^OED0H>$1 zXfl%}5(%Zv%ISB5EW`CrO`tz_P^~3Tk-r0rPLIqZea>HrE=lY= zO*2w;ed@>`#TL`O+48J1_;qTB{iX@yIfQS?KZIQpWJH7a#rT?_GJju=}p}lNpf`5(0r!rmifHH4r+e@NNSNE91UJK^F1L%sg; zMv4~m{n1}EV7gMV3@dE^s;$kxF;YwBb#lyq@`M-MTFqoSM;LwuSLB5=ESDo~H#QU_ zRqe02NA6FK3-uqCaD&))VLJ__iap8qNlR8zgi6kJ+G!igpjh)-kN2<2oF*Qt6UUvda4A}ubVRIk^c{q{^ee4QTufwnP8Jtuw&Zef_J zS(Z=QWtz&~X5~n&JLBlrOR-czTBtsZ!54cbFiWo2avN)GW773b<#^OSU3{;(|K#j% z)Yv!2ZVzpnf8A8*VV-;L$t&q5qN=*mwIPW?XeG7iuImo`Si7UWAmL+g%4;s3Zv5C0 zTv+sslM}K&!a&e99ussvYKuR9#vWXgWnl*QBVpP~iD!*IFUZd7C5g=HaY_an8qz{! zZ~XX82v+R&`LLbue&a}|DCWu4@!x7>8J$w(5|gd)Xhum{a*h1I(^E2+|L=9;e-SDO aLMZUuht`#0Q$Bt{0uH;JcmBSedHz4PPT?m2 literal 0 HcmV?d00001 diff --git a/src/MetaWinAnalysis.py b/src/MetaWinAnalysis.py index 2c19072..cbb4295 100644 --- a/src/MetaWinAnalysis.py +++ b/src/MetaWinAnalysis.py @@ -56,6 +56,7 @@ def __init__(self): self.create_graph = False self.k_estimator = "L" self.cor_test = "tau" + self.norm_ci = True def report_choices(self): output_blocks = [] @@ -168,14 +169,25 @@ def report_choices(self): output.append("→ {}".format(get_text("Fixed Effects Model"))) output_blocks.append(output) + output = [] + if self.norm_ci: + output.append("→ {}".format(get_text("ci from norm"))) + else: + output.append("→ {}".format(get_text("ci from t"))) if self.bootstrap_mean is not None: - output_blocks.append(["→ {}: {} {}".format(get_text("Use bootstrap for confidence intervals around " - "means"), self.bootstrap_mean, - get_text("iterations")), - "→ {}: ".format(get_text("Citations")) + get_citation("Adams_et_1997") + ", " + - get_citation("Dixon_1993")]) + output.extend(["→ {}: {} {}".format(get_text("Use bootstrap for confidence intervals around means"), + self.bootstrap_mean, get_text("iterations")), + "→ {}: ".format(get_text("Citations")) + get_citation("Adams_et_1997") + ", " + + get_citation("Dixon_1993")]) + # output_blocks.append(["→ {}: {} {}".format(get_text("Use bootstrap for confidence intervals around " + # "means"), self.bootstrap_mean, + # get_text("iterations")), + # "→ {}: ".format(get_text("Citations")) + get_citation("Adams_et_1997") + ", " + + # get_citation("Dixon_1993")]) citations.append("Adams_et_1997") citations.append("Dixon_1993") + output_blocks.append(output) + if self.structure == RANKCOR: output_blocks.append(["→ {}: {} {}".format(get_text("Randomization to test correlation"), self.randomize_model, get_text("iterations"))]) @@ -1773,7 +1785,8 @@ def add_resampling_options_to_dialog(sender, test_model: bool = False): return randomization_group -def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, tree: Optional = None): +def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05, tree: Optional = None, + norm_ci: bool = True): """ primary function controlling the execution of an analysis @@ -1781,48 +1794,51 @@ def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05 results """ output_blocks = [["

    {}

    ".format(get_text("Analysis"))]] + options.norm_ci = norm_ci output, all_citations = options.report_choices() output_blocks.extend(output) if options.structure == SIMPLE_MA: (output, figure, fig_caption, chart_data, analysis_values, - citations) = MetaWinAnalysisFunctions.simple_meta_analysis(data, options, decimal_places, alpha) + citations) = MetaWinAnalysisFunctions.simple_meta_analysis(data, options, decimal_places, alpha, norm_ci) elif options.structure == GROUPED_MA: (output, figure, fig_caption, chart_data, analysis_values, - citations) = MetaWinAnalysisFunctions.grouped_meta_analysis(data, options, decimal_places, alpha) + citations) = MetaWinAnalysisFunctions.grouped_meta_analysis(data, options, decimal_places, alpha, norm_ci) elif options.structure == CUMULATIVE_MA: output, figure, fig_caption, chart_data = MetaWinAnalysisFunctions.cumulative_meta_analysis(data, options, decimal_places, - alpha) + alpha, norm_ci) analysis_values = None citations = [] elif options.structure == REGRESSION_MA: (output, figure, fig_caption, chart_data, analysis_values, - citations) = MetaWinAnalysisFunctions.regression_meta_analysis(data, options, decimal_places, alpha) + citations) = MetaWinAnalysisFunctions.regression_meta_analysis(data, options, decimal_places, alpha, norm_ci) elif options.structure == COMPLEX_MA: output, analysis_values, citations = MetaWinAnalysisFunctions.complex_meta_analysis(data, options, - decimal_places, alpha) + decimal_places, alpha, + norm_ci) figure = None fig_caption = None chart_data = None elif options.structure == NESTED_MA: (output, figure, fig_caption, chart_data, analysis_values, - citations) = MetaWinAnalysisFunctions.nested_meta_analysis(data, options, decimal_places, alpha) + citations) = MetaWinAnalysisFunctions.nested_meta_analysis(data, options, decimal_places, alpha, norm_ci) elif options.structure == TRIM_FILL: (output, figure, fig_caption, chart_data, analysis_values, - citations) = MetaWinAnalysisFunctions.trim_and_fill_analysis(data, options, decimal_places, alpha) + citations) = MetaWinAnalysisFunctions.trim_and_fill_analysis(data, options, decimal_places, alpha, norm_ci) elif options.structure == JACKKNIFE: (output, figure, fig_caption, - chart_data, citations) = MetaWinAnalysisFunctions.jackknife_meta_analysis(data, options, decimal_places, alpha) + chart_data, citations) = MetaWinAnalysisFunctions.jackknife_meta_analysis(data, options, decimal_places, + alpha, norm_ci) analysis_values = None elif options.structure == PHYLOGENETIC_MA: output, citations = MetaWinAnalysisFunctions.phylogenetic_meta_analysis(data, options, tree, decimal_places, - alpha) + alpha, norm_ci) analysis_values = None figure = None fig_caption = None chart_data = None elif options.structure == RANKCOR: - output, citations = MetaWinAnalysisFunctions.rank_correlation_analysis(data, options, decimal_places, alpha) + output, citations = MetaWinAnalysisFunctions.rank_correlation_analysis(data, options, decimal_places) figure = None fig_caption = None chart_data = None @@ -1841,7 +1857,7 @@ def do_meta_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05 def meta_analysis(sender, data, last_effect, last_var, decimal_places: int = 4, alpha: float = 0.05, - tree: Optional = None): + tree: Optional = None, norm_ci: bool = True): """ primary function for calling various dialogs to retrieve user choices about how to run various analyses """ @@ -1894,7 +1910,7 @@ def meta_analysis(sender, data, last_effect, last_var, decimal_places: int = 4, if meta_analysis_options.structure is not None: output, figure, fig_caption, chart_data, _ = do_meta_analysis(data, meta_analysis_options, decimal_places, - alpha, tree) + alpha, tree, norm_ci) sender.last_effect = meta_analysis_options.effect_data sender.last_var = meta_analysis_options.effect_vars return output, figure, fig_caption, chart_data diff --git a/src/MetaWinAnalysisFunctions.py b/src/MetaWinAnalysisFunctions.py index 2c070e5..741c4d7 100644 --- a/src/MetaWinAnalysisFunctions.py +++ b/src/MetaWinAnalysisFunctions.py @@ -2349,7 +2349,7 @@ def kendalls_tau(e_ranks, x_ranks): return tau -def rank_correlation_analysis(data, options, decimal_places: int = 4, alpha: float = 0.05): +def rank_correlation_analysis(data, options, decimal_places: int = 4): # filter and prepare data for analysis effect_sizes = options.effect_data variances = options.effect_vars diff --git a/src/MetaWinConfig.py b/src/MetaWinConfig.py index bae4be9..f2b3e62 100644 --- a/src/MetaWinConfig.py +++ b/src/MetaWinConfig.py @@ -46,7 +46,8 @@ def default_config() -> dict: "filtered row color": "lightpink", "filtered col color": "red", "auto update check": True, - "alpha": 0.05 + "alpha": 0.05, + "confidence interval distribution": "Normal" } @@ -86,6 +87,10 @@ def validate_config(key, value): return value except ValueError: return None + elif key == "confidence interval distribution": + if value == "Students t": + return value + return "Normal" return value @@ -103,5 +108,6 @@ def export_config(main_window) -> None: outfile.write("filtered col color={}\n".format(main_window.filtered_col_color)) outfile.write("auto update check={}\n".format(main_window.auto_update_check)) outfile.write("alpha={}\n".format(main_window.alpha)) + outfile.write("confidence interval distribution={}".format(main_window.confidence_interval_dist)) except IOError: pass diff --git a/src/MetaWinConstants.py b/src/MetaWinConstants.py index 6d47e2c..bf4efea 100644 --- a/src/MetaWinConstants.py +++ b/src/MetaWinConstants.py @@ -68,6 +68,8 @@ def resource_path(relative_path: str, inc_file: bool = False) -> str: radial_plot_icon = resource_path(icon_path + "draw-radial-plot@256px.png") forest_plot_icon = resource_path(icon_path + "chart-forest-plot@256px.png") normal_quantile_icon = resource_path(icon_path + "letter-z-2@256px.png") +norm_dist_icon = resource_path(icon_path + "letter-z-2@256px.png") +t_dist_icon = resource_path(icon_path + "letter-t@256px.png") gear_icon = resource_path(icon_path + "gear-filled@256px.png") clear_filter_icon = resource_path(icon_path + "filter-filled-eraser@256px.png") font_icon = resource_path(icon_path + "text-fonts@256px.png") diff --git a/src/MetaWinLanguage.py b/src/MetaWinLanguage.py index 611f401..2f5f335 100644 --- a/src/MetaWinLanguage.py +++ b/src/MetaWinLanguage.py @@ -14,6 +14,7 @@ "About MetaWin": "About MetaWin", "Additional Options": "Additional Options", "Analysis": "Analysis", + "Analysis Options": "Analysis Options", "available": "available", "Automatically check for udpates": "Automatically check for udpates", "Axes Titles": "Axes Titles", @@ -32,6 +33,8 @@ "Categorical Independent Variables(s)": "Categorical Independent Variable(s)", "Check for updates": "Check for updates", "Choose an Analysis": "Choose an Analysis", + "ci from norm": "Standard confidence intervals around means based on Normal distribution", + "ci from t": "Standard confidence intervals around means based on Student\'s t distribution", "Citation": "Citation", "Citations": "Citations", "Clear Data": "Clear Data", @@ -217,6 +220,8 @@ "No Response": "No Response", "No Weighting": "No Weighting", "None": "None", + "normal to t": "Change Confidence Interval Distribution from Normal to Student\'s t", + "t to normal": "Change Confidence Interval Distribution from Student\'s t to Normal", "Normal Quantile": "Normal Quantile", "Normal Quantile Plot": "Normal Quantile Plot", "normal_quantile_caption": "Normal Quantile plot following {}. The " diff --git a/src/MetaWinMain.py b/src/MetaWinMain.py index bd4055c..2270a55 100644 --- a/src/MetaWinMain.py +++ b/src/MetaWinMain.py @@ -48,6 +48,7 @@ def __init__(self, config: dict): self.filtered_col_color = config["filtered col color"] self.auto_update_check = config["auto update check"] self.alpha = config["alpha"] + self.confidence_interval_dist = config["confidence interval distribution"] self.help = MetaWinConstants.help_index["metawin"] self.localization_help = MetaWinConstants.help_index["localization"] self.main_area = None @@ -68,6 +69,7 @@ def __init__(self, config: dict): self.tree_area_action = None self.tree_toolbar = None self.auto_update_check_action = None + self.conf_int_action = None # self.language_actions = None self.language_box = None self.output_saved = True @@ -167,15 +169,23 @@ def init_ui(self): self.data_toolbar_action.triggered.connect(self.show_data_toolbar_click) data_options_menu.addAction(self.data_toolbar_action) options_menu.addMenu(data_options_menu) + # analysis options submenu + analysis_options_menu = QMenu(get_text("Analysis Options"), self) + # analysis_options_menu.setIcon(QIcon(MetaWinConstants.output_icon)) + output_alpha_action = QAction(QIcon(MetaWinConstants.alpha_icon), get_text("Significance Level"), self) + output_alpha_action.triggered.connect(self.set_alpha_significance) + analysis_options_menu.addAction(output_alpha_action) + self.conf_int_action = QAction("tmp", self) + self.update_conf_int_action() + self.conf_int_action.triggered.connect(self.change_conf_int_distribution) + analysis_options_menu.addAction(self.conf_int_action) + options_menu.addMenu(analysis_options_menu) # output options submenu output_options_menu = QMenu(get_text("Output Options"), self) output_options_menu.setIcon(QIcon(MetaWinConstants.output_icon)) output_decimal_action = QAction(QIcon(MetaWinConstants.decimal_icon), get_text("Decimal Places"), self) output_decimal_action.triggered.connect(self.set_output_decimal_places) output_options_menu.addAction(output_decimal_action) - output_alpha_action = QAction(QIcon(MetaWinConstants.alpha_icon), get_text("Significance Level"), self) - output_alpha_action.triggered.connect(self.set_alpha_significance) - output_options_menu.addAction(output_alpha_action) output_font_action = QAction(QIcon(MetaWinConstants.font_icon), get_text("Font"), self) output_font_action.triggered.connect(self.set_output_font) output_options_menu.addAction(output_font_action) @@ -682,9 +692,13 @@ def save_data(self) -> bool: def meta_analysis(self) -> None: if self.data is not None: + if self.confidence_interval_dist == "Students t": + norm_ci = False + else: + norm_ci = True output, figure, fig_caption, chart_data = MetaWinAnalysis.meta_analysis(self, self.data, self.last_effect, self.last_var, self.output_decimals, - self.alpha, self.phylogeny) + self.alpha, self.phylogeny, norm_ci) if output is not None: self.write_multi_output_blocks(output) self.main_area.setCurrentIndex(1) @@ -839,3 +853,18 @@ def edit_graph(self): if figure is not None: caption = self.chart_caption self.show_figure(figure, caption, self.chart_data) + + def change_conf_int_distribution(self): + if self.confidence_interval_dist == "Normal": + self.confidence_interval_dist = "Students t" + else: + self.confidence_interval_dist = "Normal" + self.update_conf_int_action() + + def update_conf_int_action(self): + if self.confidence_interval_dist == "Normal": + self.conf_int_action.setText(get_text("normal to t")) + self.conf_int_action.setIcon(QIcon(MetaWinConstants.norm_dist_icon)) + else: + self.conf_int_action.setText(get_text("t to normal")) + self.conf_int_action.setIcon(QIcon(MetaWinConstants.t_dist_icon)) From 92b4cbc9b76934387efd4de83241338cf008603f Mon Sep 17 00:00:00 2001 From: Michael Rosenberg Date: Tue, 6 Sep 2022 10:59:45 -0400 Subject: [PATCH 5/6] updated help file to include distribution choice --- resources/metawin_help.html | 60 +++++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/resources/metawin_help.html b/resources/metawin_help.html index 656d831..70e4eb7 100644 --- a/resources/metawin_help.html +++ b/resources/metawin_help.html @@ -58,6 +58,7 @@

    Help Table of Contents

  • Phylogeny Tab
  • +
  • Analysis Options
  • Additional Options
  • @@ -353,21 +354,6 @@

    Decimal Places

    places used to display these values is automatically determined by the software based on the number of desired replicates.

    -

    Significance Level

    -

    By default, significance levels for generating confidence intervals and certain types of tests are - based on a standard value of 5% (α = 0.05). This value can be changed by - choosing Significance Level from the - Options→Output Options menu or - from the toolbar on the - left side of the Output Tab. Valid options are numbers between 0.01 and 1.0). - Note that changing this value will - only affect future output; vales already computed will not be changed. If you change the significance - level to use in the output, MetaWin will attempt to remember this - choice the next time you run the program.

    -

    This choice also effects a few of the direct figures you can draw, such as - Forest Plots - and Normal Quantile Plots.

    -

    Font

    You can change the font and it's properties used in the Output Tab by choosing Font from the @@ -470,6 +456,40 @@

    Phylogeny Tab

    Meta-Analysis option becomes available.

    +

    Analysis Options

    +

    Significance Level

    +

    By default, significance levels for generating confidence intervals and certain types of tests are + based on a standard value of 5% (α = 0.05). This value can be changed by + choosing Significance Level from the + Options→Analysis Options menu or + from the toolbar on the + left side of the Output Tab. Valid options are numbers between 0.01 and 1.0). + Note that changing this value will + only affect future output; vales already computed will not be changed. If you change the significance + level to use in the output, MetaWin will attempt to remember this + choice the next time you run the program.

    +

    This choice also effects a few of the direct figures you can draw, such as + Forest Plots + and Normal Quantile Plots.

    + +

    Confidence Interval Distribution

    +

    When determining confidence intervals around means using standard distributions (rather than a + boostrap procedure), the traditional approach in meta-analysis has generally been to use a Normal + distribution. In earlier versions of MetaWin, we used Student's t + distribution instead, because we thought it useful to account for the uncertainty in estimation due + to the small number of studies often found in meta-analyses. With this new version, the user can + specify which distribution they wish to use, with the Normal distribution set as default. +

    +

    To change the distribution, choose the item under the + Options→Analysis Options menu, which will toggle between the two + distributions. The current distribution is indicated by the icon to the left of the menu option + (either a Z or a t), as well as by the text + of the menu item which specifies the distributions being changed both "from" and "to". + MetaWin will attempt to remember this choice the next time you run the + program.

    +

    The specified distribution is also listed as one of the user-specified parameters at the beginning of + analysis output.

    +

    Additional Options

    Check for Updates

    @@ -889,6 +909,7 @@

    Output

    → Fixed Effects Model

    +→ Standard confidence intervals around means based on Normal distribution
    → Use bootstrap for confidence intervals around means: 999 iterations
    → Citations: Adams et al. (1997), Dixon (1993)

    @@ -1119,6 +1140,9 @@

    Output

    → Fixed Effects Model

    +→ Standard confidence intervals around means based on Normal distribution +

    +

    85 studies will be included in this analysis

    @@ -1194,6 +1218,7 @@

    Output

    → Fixed Effects Model

    +→ Standard confidence intervals around means based on Normal distribution
    → Use bootstrap for confidence intervals around means: 999 iterations
    → Citations: Adams et al. (1997), Dixon (1993)

    @@ -1371,6 +1396,7 @@

    Output

    → Fixed Effects Model

    +→ Standard confidence intervals around means based on Normal distribution
    → Use bootstrap for confidence intervals around means: 999 iterations
    → Citations: Adams et al. (1997), Dixon (1993)

    @@ -1577,6 +1603,7 @@

    Output

    → Fixed Effects Model

    +→ Standard confidence intervals around means based on Normal distribution
    → Use bootstrap for confidence intervals around means: 999 iterations
    → Citations: Adams et al. (1997), Dixon (1993)

    @@ -1768,6 +1795,7 @@

    Output

    → Citations: Adams et al. (1997), Dixon (1993)

    +→ Standard confidence intervals around means based on Normal distribution
    → Use randomization to test model structure: 999 iterations
    → Citation: Adams et al. (1997)

    @@ -1987,6 +2015,7 @@

    Output

    → Fixed Effects Model

    +→ Standard confidence intervals around means based on Normal distribution
    → Use bootstrap for confidence intervals around means: 999 iterations
    → Citations: Adams et al. (1997), Dixon (1993)

    @@ -2216,6 +2245,7 @@

    Output

    → Fixed Effects Model

    +→ Standard confidence intervals around means based on Normal distribution
    → Use bootstrap for confidence intervals around means: 999 iterations
    → Citations: Adams et al. (1997), Dixon (1993)

    From 28bfc02d280b684e19a3db39bcfa219cbab152d1 Mon Sep 17 00:00:00 2001 From: Michael Rosenberg Date: Tue, 6 Sep 2022 11:02:28 -0400 Subject: [PATCH 6/6] removed stray empty function --- src/MetaWinCharts.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/MetaWinCharts.py b/src/MetaWinCharts.py index 8f37fd6..10c51ad 100644 --- a/src/MetaWinCharts.py +++ b/src/MetaWinCharts.py @@ -499,10 +499,6 @@ def export_to_list(self): return outlist -def show_color_dialog(): - pass - - def base_figure(): """ create the baseline figure used for all plots