Skip to content

Commit

Permalink
Merge branch 'main' of github.com:mind-inria/hidimstat into PR_cpi_lo…
Browse files Browse the repository at this point in the history
…co_pi
  • Loading branch information
jpaillard committed Feb 18, 2025
2 parents d269315 + 349ade8 commit 8446192
Show file tree
Hide file tree
Showing 8 changed files with 306 additions and 180 deletions.
7 changes: 4 additions & 3 deletions doc_conf/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ Functions
knockoff_aggregation
model_x_knockoff
multivariate_1D_simulation
permutation_test_cv
permutation_importance
permutation_test
permutation_test_pval
reid
standardized_svr
zscore_from_pval
Expand All @@ -39,4 +39,5 @@ Classes
:toctree: generated/

LOCO
CPI
CPI
PermutationImportance
18 changes: 18 additions & 0 deletions doc_conf/references.bib
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,24 @@ @article{meinshausen2009p
publisher={Taylor \& Francis}
}

@book{westfall1993resampling,
title={Resampling-based multiple testing: Examples and methods for p-value adjustment},
author={Westfall, Peter H and Young, S Stanley},
volume={279},
year={1993},
publisher={John Wiley \& Sons}
}

@article{hirschhorn2005genome,
title={Genome-wide association studies for common diseases and complex traits},
author={Hirschhorn, Joel N and Daly, Mark J},
journal={Nature reviews genetics},
volume={6},
number={2},
pages={95--108},
year={2005},
publisher={Nature Publishing Group UK London}
}
@article{gaonkar_deriving_2012,
title = {Deriving statistical significance maps for {SVM} based image classification and group comparisons},
volume = {15},
Expand Down
26 changes: 19 additions & 7 deletions examples/plot_fmri_data_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,15 +52,18 @@
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_extraction import image
from sklearn.linear_model import Ridge
from sklearn.svm import LinearSVR
from sklearn.utils import Bunch

from hidimstat.ada_svr import ada_svr
from hidimstat.clustered_inference import clustered_inference
from hidimstat.ensemble_clustered_inference import ensemble_clustered_inference
from hidimstat.permutation_test import permutation_test, permutation_test_cv
from hidimstat.permutation_test import permutation_test, permutation_test_pval
from hidimstat.standardized_svr import standardized_svr
from hidimstat.stat_tools import pval_from_scale, zscore_from_pval

n_job = None


#############################################################################
# Function to fetch and preprocess Haxby dataset
Expand Down Expand Up @@ -151,19 +154,28 @@ def preprocess_haxby(subject=2, memory=None):

SVR_permutation_test_inference = False
if SVR_permutation_test_inference:
# We computed the regularization parameter by CV (C = 0.1)
pval_corr_svr_perm_test, one_minus_pval_corr_svr_perm_test = permutation_test_cv(
X, y, n_permutations=50, C=0.1
# It will be better to associate cross validation with the estimator
# but for a sake of time, this is not done.
estimator = LinearSVR()
weight_svr, weight_svr_distribution = permutation_test(
X, y, estimator, n_permutations=50
)
pval_corr_svr_perm_test, one_minus_pval_corr_svr_perm_test = permutation_test_pval(
weight_svr, weight_svr_distribution
)

# Another method is to compute the p-values by permutation test from the
# Ridge decoder. The solution provided by this method should be very close to
# the previous one and the computation time is much shorter: around 20 seconds.

# We computed the parameter from a cross valisation (alpha = 0.0215)
# It will be better to use RidgeCV but for a sake of time, this is not done.
estimator = Ridge()
pval_corr_ridge_perm_test, one_minus_pval_corr_ridge_perm_test = permutation_test(
weight_ridge, weight_ridge_distribution = permutation_test(
X, y, estimator=estimator, n_permutations=200
)
pval_corr_ridge_perm_test, one_minus_pval_corr_ridge_perm_test = permutation_test_pval(
weight_ridge, weight_ridge_distribution
)

#############################################################################
# Now, let us run the algorithm introduced by Gaonkar et al. (c.f. References).
Expand Down Expand Up @@ -305,4 +317,4 @@ def plot_map(
# (EnCluDL) seems realistic as we recover the visual cortex and do not make
# spurious discoveries.

show()
# show()
6 changes: 5 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,8 @@ where = ["src"]


[tool.hatch.version]
source = "vcs"
source = "vcs"

#pyproject.toml
[tool.pytest.ini_options]
addopts = "--ignore=src" # ignore src directory
5 changes: 3 additions & 2 deletions src/hidimstat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from .multi_sample_split import aggregate_quantiles
from .noise_std import group_reid, reid
from .permutation_importance import PermutationImportance
from .permutation_test import permutation_test_cv
from .permutation_test import permutation_test, permutation_test_pval
from .scenario import multivariate_1D_simulation
from .standardized_svr import standardized_svr
from .stat_tools import zscore_from_pval
Expand All @@ -34,7 +34,8 @@
"knockoff_aggregation",
"model_x_knockoff",
"multivariate_1D_simulation",
"permutation_test_cv",
"permutation_test",
"permutation_test_pval",
"reid",
"standardized_svr",
"zscore_from_pval",
Expand Down
Loading

0 comments on commit 8446192

Please sign in to comment.