Skip to content

Commit

Permalink
Docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
yger committed Mar 2, 2025
1 parent b913be2 commit 5d88e72
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 7 deletions.
5 changes: 3 additions & 2 deletions src/spikeinterface/sorters/internal/spyking_circus2.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,9 @@ class Spykingcircus2Sorter(ComponentsBasedSorter):
"matched_filtering": "Boolean to specify whether circus 2 should detect peaks via matched filtering (slightly slower)",
"cache_preprocessing": "How to cache the preprocessed recording. Mode can be memory, file, zarr, with extra arguments. In case of memory (default), \
memory_limit will control how much RAM can be used. In case of folder or zarr, delete_cache controls if cache is cleaned after sorting",
"chunk_preprocessing": "How much RAM (approximately) should be devoted to load data chunks. memory_limit will control how much RAM can be used\
as a fraction of available memory. Otherwise, use total_memory to fix a hard limit",
"chunk_preprocessing": "How much RAM (approximately) should be devoted to load all data chunks (given n_jobs).\
memory_limit will control how much RAM can be used as a fraction of available memory. Otherwise, use total_memory to fix a hard limit, with\
a string syntax (e.g. '1G', '500M')",
"multi_units_only": "Boolean to get only multi units activity (i.e. one template per electrode)",
"job_kwargs": "A dictionary to specify how many jobs and which parameters they should used",
"seed": "An int to control how chunks are shuffled while detecting peaks",
Expand Down
14 changes: 9 additions & 5 deletions src/spikeinterface/sortingcomponents/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ def get_optimal_n_jobs(job_kwargs, ram_requested, memory_limit=0.25):
recording: Recording
The recording object
ram_requested: dict
ram_requested: int
The amount of RAM (in bytes) requested for the job
memory_limit: float
The memory limit in fraction of available memory
Expand All @@ -324,7 +324,8 @@ def get_optimal_n_jobs(job_kwargs, ram_requested, memory_limit=0.25):
n_jobs = int(min(n_jobs, memory_usage // ram_requested))
job_kwargs.update(dict(n_jobs=n_jobs))
else:
print("psutil is required to use only a fraction of available memory")
import warnings
warnings.warn("psutil is required to use only a fraction of available memory")
return job_kwargs


Expand Down Expand Up @@ -367,14 +368,17 @@ def cache_preprocessing(
if recording.get_total_memory_size() < memory_usage:
recording = recording.save_to_memory(format="memory", shared=True, **job_kwargs)
else:
print("Recording too large to be preloaded in RAM...")
import warnings
warnings.warn("Recording too large to be preloaded in RAM...")
else:
print("psutil is required to preload in memory given only a fraction of available memory")
import warnings
warnings.warn("psutil is required to preload in memory given only a fraction of available memory")
else:
if recording.get_total_memory_size() < total_memory:
recording = recording.save_to_memory(format="memory", shared=True, **job_kwargs)
else:
print("Recording too large to be preloaded in RAM...")
import warnings
warnings.warn("Recording too large to be preloaded in RAM...")
elif mode == "folder":
recording = recording.save_to_folder(**extra_kwargs)
elif mode == "zarr":
Expand Down

0 comments on commit 5d88e72

Please sign in to comment.