Skip to content

Commit

Permalink
Update documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
actions-user committed Feb 20, 2025
1 parent 635d003 commit d6e93f7
Show file tree
Hide file tree
Showing 58 changed files with 258 additions and 435 deletions.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
from sklearn.svm import LinearSVR
from sklearn.utils import Bunch

from hidimstat.ada_svr import ada_svr
from hidimstat.adaptative_permutation_threshold_SVR import ada_svr
from hidimstat.clustered_inference import clustered_inference
from hidimstat.ensemble_clustered_inference import ensemble_clustered_inference
from hidimstat.permutation_test import permutation_test, permutation_test_pval
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
},
"outputs": [],
"source": [
"import numpy as np\nimport pandas as pd\nfrom nilearn import datasets\nfrom nilearn.image import mean_img\nfrom nilearn.input_data import NiftiMasker\nfrom nilearn.plotting import plot_stat_map, show\nfrom sklearn.cluster import FeatureAgglomeration\nfrom sklearn.feature_extraction import image\nfrom sklearn.linear_model import Ridge\nfrom sklearn.svm import LinearSVR\nfrom sklearn.utils import Bunch\n\nfrom hidimstat.ada_svr import ada_svr\nfrom hidimstat.clustered_inference import clustered_inference\nfrom hidimstat.ensemble_clustered_inference import ensemble_clustered_inference\nfrom hidimstat.permutation_test import permutation_test, permutation_test_pval\nfrom hidimstat.standardized_svr import standardized_svr\nfrom hidimstat.stat_tools import pval_from_scale, zscore_from_pval\n\nn_job = None"
"import numpy as np\nimport pandas as pd\nfrom nilearn import datasets\nfrom nilearn.image import mean_img\nfrom nilearn.input_data import NiftiMasker\nfrom nilearn.plotting import plot_stat_map, show\nfrom sklearn.cluster import FeatureAgglomeration\nfrom sklearn.feature_extraction import image\nfrom sklearn.linear_model import Ridge\nfrom sklearn.svm import LinearSVR\nfrom sklearn.utils import Bunch\n\nfrom hidimstat.adaptative_permutation_threshold_SVR import ada_svr\nfrom hidimstat.clustered_inference import clustered_inference\nfrom hidimstat.ensemble_clustered_inference import ensemble_clustered_inference\nfrom hidimstat.permutation_test import permutation_test, permutation_test_pval\nfrom hidimstat.standardized_svr import standardized_svr\nfrom hidimstat.stat_tools import pval_from_scale, zscore_from_pval\n\nn_job = None"
]
},
{
Expand Down
Binary file not shown.
1 change: 0 additions & 1 deletion docs/_sources/api.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ Functions
.. autosummary::
:toctree: generated/

ada_svr
aggregate_quantiles
clustered_inference
data_simulation
Expand Down
18 changes: 9 additions & 9 deletions docs/_sources/auto_examples/index.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -87,35 +87,35 @@ Examples Gallery

.. raw:: html

<div class="sphx-glr-thumbcontainer" tooltip="This example compares several methods that estimate a decoder map support with statistical guarantees. More precisely, we aim at thresholding the weights of some estimated decoder maps according to the confidence we have that they are nonzero. Here, we work with the Haxby dataset and we focus on the &#x27;face vs house&#x27; contrast. Thus, we consider the labeled activation maps of a given subject and try produce a brain map that corresponds to the discriminative pattern that makes the decoding of the two conditions.">
<div class="sphx-glr-thumbcontainer" tooltip="In this example, we illustrate how to measure variable importance in a classification context. The problem under consideration is a binary classification where the target variable is generated using a non-linear function of the features. Therefore illustrating the importance of model-agnostic variable importance methods, which, as opposed to linear models for instance, can capture non-linear relationships. The features are generated from a multivariate normal distribution with a Toeplitz correlation matrix. This second specificity of the problem is interesting to exemplify the benefits of the conditional permutation importance (CPI) method [:footciteChamma_NeurIPS2023] over the standard permutation importance (PI) method [:footcitebreimanRandomForests2001].">

.. only:: html

.. image:: /auto_examples/images/thumb/sphx_glr_plot_fmri_data_example_thumb.png
.. image:: /auto_examples/images/thumb/sphx_glr_plot_variable_importance_classif_thumb.png
:alt:

:ref:`sphx_glr_auto_examples_plot_fmri_data_example.py`
:ref:`sphx_glr_auto_examples_plot_variable_importance_classif.py`

.. raw:: html

<div class="sphx-glr-thumbnail-title">Support recovery on fMRI data</div>
<div class="sphx-glr-thumbnail-title">Measuring variable importance in classification</div>
</div>


.. raw:: html

<div class="sphx-glr-thumbcontainer" tooltip="In this example, we illustrate how to measure variable importance in a classification context. The problem under consideration is a binary classification where the target variable is generated using a non-linear function of the features. Therefore illustrating the importance of model-agnostic variable importance methods, which, as opposed to linear models for instance, can capture non-linear relationships. The features are generated from a multivariate normal distribution with a Toeplitz correlation matrix. This second specificity of the problem is interesting to exemplify the benefits of the conditional permutation importance (CPI) method [:footciteChamma_NeurIPS2023] over the standard permutation importance (PI) method [:footcitebreimanRandomForests2001].">
<div class="sphx-glr-thumbcontainer" tooltip="This example compares several methods that estimate a decoder map support with statistical guarantees. More precisely, we aim at thresholding the weights of some estimated decoder maps according to the confidence we have that they are nonzero. Here, we work with the Haxby dataset and we focus on the &#x27;face vs house&#x27; contrast. Thus, we consider the labeled activation maps of a given subject and try produce a brain map that corresponds to the discriminative pattern that makes the decoding of the two conditions.">

.. only:: html

.. image:: /auto_examples/images/thumb/sphx_glr_plot_variable_importance_classif_thumb.png
.. image:: /auto_examples/images/thumb/sphx_glr_plot_fmri_data_example_thumb.png
:alt:

:ref:`sphx_glr_auto_examples_plot_variable_importance_classif.py`
:ref:`sphx_glr_auto_examples_plot_fmri_data_example.py`

.. raw:: html

<div class="sphx-glr-thumbnail-title">Measuring variable importance in classification</div>
<div class="sphx-glr-thumbnail-title">Support recovery on fMRI data</div>
</div>


Expand All @@ -133,8 +133,8 @@ Examples Gallery
/auto_examples/plot_knockoff_aggregation
/auto_examples/plot_diabetes_variable_importance_example
/auto_examples/plot_2D_simulation_example
/auto_examples/plot_fmri_data_example
/auto_examples/plot_variable_importance_classif
/auto_examples/plot_fmri_data_example


.. only:: html
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ randomization.

.. rst-class:: sphx-glr-timing

**Total running time of the script:** (1 minutes 5.593 seconds)
**Total running time of the script:** (1 minutes 5.804 seconds)

**Estimated memory usage:** 722 MB

Expand Down
2 changes: 1 addition & 1 deletion docs/_sources/auto_examples/plot_dcrt_example.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ Plotting the comparison

.. rst-class:: sphx-glr-timing

**Total running time of the script:** (1 minutes 2.729 seconds)
**Total running time of the script:** (1 minutes 2.893 seconds)

**Estimated memory usage:** 658 MB

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ Analyze the results
.. rst-class:: sphx-glr-timing

**Total running time of the script:** (0 minutes 8.186 seconds)
**Total running time of the script:** (0 minutes 8.329 seconds)

**Estimated memory usage:** 643 MB

Expand Down
26 changes: 18 additions & 8 deletions docs/_sources/auto_examples/plot_fmri_data_example.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ Imports needed for this script
from sklearn.svm import LinearSVR
from sklearn.utils import Bunch
from hidimstat.ada_svr import ada_svr
from hidimstat.adaptative_permutation_threshold_SVR import ada_svr
from hidimstat.clustered_inference import clustered_inference
from hidimstat.ensemble_clustered_inference import ensemble_clustered_inference
from hidimstat.permutation_test import permutation_test, permutation_test_pval
Expand Down Expand Up @@ -179,15 +179,25 @@ You may choose a subject in [1, 2, 3, 4, 5, 6]. By default subject=2.
[_add_readme_to_default_data_locations] Added README.md to /home/runner/nilearn_data
[get_dataset_dir] Dataset created in /home/runner/nilearn_data/haxby2001
[fetch_single_file] Downloading data from https://www.nitrc.org/frs/download.php/7868/mask.nii.gz ...
[fetch_single_file] ...done. (0 seconds, 0 min)
[fetch_single_file] ...done. (1 seconds, 0 min)
[fetch_single_file] Downloading data from http://data.pymvpa.org/datasets/haxby2001/MD5SUMS ...
[fetch_single_file] ...done. (0 seconds, 0 min)
[fetch_single_file] Downloading data from http://data.pymvpa.org/datasets/haxby2001/subj2-2010.01.14.tar.gz ...
[_chunk_report_] Downloaded 94011392 of 291168628 bytes (32.3%%, 2.1s remaining)
[_chunk_report_] Downloaded 212819968 of 291168628 bytes (73.1%%, 0.7s remaining)
[fetch_single_file] ...done. (3 seconds, 0 min)
[_chunk_report_] Downloaded 15261696 of 291168628 bytes (5.2%%, 18.2s remaining)
[_chunk_report_] Downloaded 53190656 of 291168628 bytes (18.3%%, 9.1s remaining)
[_chunk_report_] Downloaded 91701248 of 291168628 bytes (31.5%%, 6.6s remaining)
[_chunk_report_] Downloaded 129933312 of 291168628 bytes (44.6%%, 5.0s remaining)
[_chunk_report_] Downloaded 156491776 of 291168628 bytes (53.7%%, 4.4s remaining)
[_chunk_report_] Downloaded 178135040 of 291168628 bytes (61.2%%, 3.9s remaining)
[_chunk_report_] Downloaded 196550656 of 291168628 bytes (67.5%%, 3.4s remaining)
[_chunk_report_] Downloaded 213925888 of 291168628 bytes (73.5%%, 2.9s remaining)
[_chunk_report_] Downloaded 232153088 of 291168628 bytes (79.7%%, 2.3s remaining)
[_chunk_report_] Downloaded 250953728 of 291168628 bytes (86.2%%, 1.6s remaining)
[_chunk_report_] Downloaded 270131200 of 291168628 bytes (92.8%%, 0.9s remaining)
[_chunk_report_] Downloaded 289538048 of 291168628 bytes (99.4%%, 0.1s remaining)
[fetch_single_file] ...done. (12 seconds, 0 min)
[uncompress_file] Extracting data from /home/runner/nilearn_data/haxby2001/9cabe068089e791ef0c5fe930fc20e30/subj2-2010.01.14.tar.gz...
[uncompress_file] .. done.
Expand Down Expand Up @@ -375,7 +385,7 @@ However you might benefit from clustering randomization taking
.. code-block:: none
[Parallel(n_jobs=2)]: Using backend LokyBackend with 2 concurrent workers.
[Parallel(n_jobs=2)]: Done 5 out of 5 | elapsed: 32.0s finished
[Parallel(n_jobs=2)]: Done 5 out of 5 | elapsed: 34.1s finished
Expand Down Expand Up @@ -620,9 +630,9 @@ spurious discoveries.
.. rst-class:: sphx-glr-timing

**Total running time of the script:** (1 minutes 23.795 seconds)
**Total running time of the script:** (1 minutes 37.697 seconds)

**Estimated memory usage:** 3287 MB
**Estimated memory usage:** 3398 MB


.. _sphx_glr_download_auto_examples_plot_fmri_data_example.py:
Expand Down
4 changes: 2 additions & 2 deletions docs/_sources/auto_examples/plot_knockoff_aggregation.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -205,9 +205,9 @@ Imports needed for this script

.. rst-class:: sphx-glr-timing

**Total running time of the script:** (5 minutes 40.613 seconds)
**Total running time of the script:** (5 minutes 39.238 seconds)

**Estimated memory usage:** 841 MB
**Estimated memory usage:** 793 MB


.. _sphx_glr_download_auto_examples_plot_knockoff_aggregation.py:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ Visualize the data
.. code-block:: none
[<matplotlib.axis.YTick object at 0x7fef2ae3e660>, <matplotlib.axis.YTick object at 0x7fef2ae3eae0>, <matplotlib.axis.YTick object at 0x7fef1c38e780>, <matplotlib.axis.YTick object at 0x7fef1c38e660>, <matplotlib.axis.YTick object at 0x7fef1c38e990>, <matplotlib.axis.YTick object at 0x7fef1c38e060>, <matplotlib.axis.YTick object at 0x7fef2aee7710>, <matplotlib.axis.YTick object at 0x7fef2aee4aa0>, <matplotlib.axis.YTick object at 0x7fef2aee48c0>, <matplotlib.axis.YTick object at 0x7fef1c38c7d0>]
[<matplotlib.axis.YTick object at 0x7f5052d70ad0>, <matplotlib.axis.YTick object at 0x7f514c368b00>, <matplotlib.axis.YTick object at 0x7f514c2b5b50>, <matplotlib.axis.YTick object at 0x7f514c2b5bb0>, <matplotlib.axis.YTick object at 0x7f514c36a5d0>, <matplotlib.axis.YTick object at 0x7f5067066810>, <matplotlib.axis.YTick object at 0x7f5067067500>, <matplotlib.axis.YTick object at 0x7f5067065760>, <matplotlib.axis.YTick object at 0x7f5064899970>, <matplotlib.axis.YTick object at 0x7f5067065d60>]
Expand Down Expand Up @@ -283,13 +283,6 @@ estimate the importance of the features.
.. rst-class:: sphx-glr-script-out

.. code-block:: none
/opt/hostedtoolcache/Python/3.12.9/x64/lib/python3.12/site-packages/joblib/externals/loky/process_executor.py:752: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.
warnings.warn(
Expand Down Expand Up @@ -422,16 +415,16 @@ the features.
.. code-block:: none
<matplotlib.legend.Legend object at 0x7fef2ae82540>
<matplotlib.legend.Legend object at 0x7f505c767590>
.. rst-class:: sphx-glr-timing

**Total running time of the script:** (0 minutes 42.709 seconds)
**Total running time of the script:** (0 minutes 33.797 seconds)

**Estimated memory usage:** 639 MB
**Estimated memory usage:** 640 MB


.. _sphx_glr_download_auto_examples_plot_variable_importance_classif.py:
Expand Down
26 changes: 13 additions & 13 deletions docs/_sources/auto_examples/sg_execution_times.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

Computation times
=================
**10:03.624** total execution time for 6 files **from auto_examples**:
**10:07.758** total execution time for 6 files **from auto_examples**:

.. container::

Expand All @@ -33,20 +33,20 @@ Computation times
- Time
- Mem (MB)
* - :ref:`sphx_glr_auto_examples_plot_knockoff_aggregation.py` (``plot_knockoff_aggregation.py``)
- 05:40.613
- 841.3
- 05:39.238
- 793.3
* - :ref:`sphx_glr_auto_examples_plot_fmri_data_example.py` (``plot_fmri_data_example.py``)
- 01:23.795
- 3287.3
- 01:37.697
- 3398.5
* - :ref:`sphx_glr_auto_examples_plot_2D_simulation_example.py` (``plot_2D_simulation_example.py``)
- 01:05.593
- 722.1
- 01:05.804
- 722.0
* - :ref:`sphx_glr_auto_examples_plot_dcrt_example.py` (``plot_dcrt_example.py``)
- 01:02.729
- 658.2
- 01:02.893
- 657.6
* - :ref:`sphx_glr_auto_examples_plot_variable_importance_classif.py` (``plot_variable_importance_classif.py``)
- 00:42.709
- 639.3
- 00:33.797
- 639.7
* - :ref:`sphx_glr_auto_examples_plot_diabetes_variable_importance_example.py` (``plot_diabetes_variable_importance_example.py``)
- 00:08.186
- 643.0
- 00:08.329
- 642.8
6 changes: 0 additions & 6 deletions docs/_sources/generated/hidimstat.ada_svr.rst.txt

This file was deleted.

26 changes: 13 additions & 13 deletions docs/_sources/sg_execution_times.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

Computation times
=================
**10:03.624** total execution time for 6 files **from all galleries**:
**10:07.758** total execution time for 6 files **from all galleries**:

.. container::

Expand All @@ -33,20 +33,20 @@ Computation times
- Time
- Mem (MB)
* - :ref:`sphx_glr_auto_examples_plot_knockoff_aggregation.py` (``../examples/plot_knockoff_aggregation.py``)
- 05:40.613
- 841.3
- 05:39.238
- 793.3
* - :ref:`sphx_glr_auto_examples_plot_fmri_data_example.py` (``../examples/plot_fmri_data_example.py``)
- 01:23.795
- 3287.3
- 01:37.697
- 3398.5
* - :ref:`sphx_glr_auto_examples_plot_2D_simulation_example.py` (``../examples/plot_2D_simulation_example.py``)
- 01:05.593
- 722.1
- 01:05.804
- 722.0
* - :ref:`sphx_glr_auto_examples_plot_dcrt_example.py` (``../examples/plot_dcrt_example.py``)
- 01:02.729
- 658.2
- 01:02.893
- 657.6
* - :ref:`sphx_glr_auto_examples_plot_variable_importance_classif.py` (``../examples/plot_variable_importance_classif.py``)
- 00:42.709
- 639.3
- 00:33.797
- 639.7
* - :ref:`sphx_glr_auto_examples_plot_diabetes_variable_importance_example.py` (``../examples/plot_diabetes_variable_importance_example.py``)
- 00:08.186
- 643.0
- 00:08.329
- 642.8
2 changes: 1 addition & 1 deletion docs/_static/documentation_options.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
const DOCUMENTATION_OPTIONS = {
VERSION: '0.1.dev1+g349ade8',
VERSION: '0.1.dev1+g635d003',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
Expand Down
13 changes: 8 additions & 5 deletions docs/_static/searchtools.js
Original file line number Diff line number Diff line change
Expand Up @@ -513,9 +513,11 @@ const Search = {
// perform the search on the required terms
searchTerms.forEach((word) => {
const files = [];
// find documents, if any, containing the query word in their text/title term indices
// use Object.hasOwnProperty to avoid mismatching against prototype properties
const arr = [
{ files: terms[word], score: Scorer.term },
{ files: titleTerms[word], score: Scorer.title },
{ files: terms.hasOwnProperty(word) ? terms[word] : undefined, score: Scorer.term },
{ files: titleTerms.hasOwnProperty(word) ? titleTerms[word] : undefined, score: Scorer.title },
];
// add support for partial matches
if (word.length > 2) {
Expand Down Expand Up @@ -547,8 +549,9 @@ const Search = {

// set score for the word in each file
recordFiles.forEach((file) => {
if (!scoreMap.has(file)) scoreMap.set(file, {});
scoreMap.get(file)[word] = record.score;
if (!scoreMap.has(file)) scoreMap.set(file, new Map());
const fileScores = scoreMap.get(file);
fileScores.set(word, record.score);
});
});

Expand Down Expand Up @@ -587,7 +590,7 @@ const Search = {
break;

// select one (max) score for the file.
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
const score = Math.max(...wordList.map((w) => scoreMap.get(file).get(w)));
// add result to the result list
results.push([
docNames[file],
Expand Down
Loading

0 comments on commit d6e93f7

Please sign in to comment.