Skip to content

Commit

Permalink
Merge branch 'total_memory' of github.com:yger/spikeinterface into to…
Browse files Browse the repository at this point in the history
…tal_memory
  • Loading branch information
yger committed Mar 3, 2025
2 parents b72ee86 + f2a3ac4 commit 151fefb
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 8 deletions.
5 changes: 3 additions & 2 deletions src/spikeinterface/sorters/internal/spyking_circus2.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,9 @@ class Spykingcircus2Sorter(ComponentsBasedSorter):
"matched_filtering": "Boolean to specify whether circus 2 should detect peaks via matched filtering (slightly slower)",
"cache_preprocessing": "How to cache the preprocessed recording. Mode can be memory, file, zarr, with extra arguments. In case of memory (default), \
memory_limit will control how much RAM can be used. In case of folder or zarr, delete_cache controls if cache is cleaned after sorting",
"chunk_preprocessing": "How much RAM (approximately) should be devoted to load data chunks. memory_limit will control how much RAM can be used\
as a fraction of available memory. Otherwise, use total_memory to fix a hard limit",
"chunk_preprocessing": "How much RAM (approximately) should be devoted to load all data chunks (given n_jobs).\
memory_limit will control how much RAM can be used as a fraction of available memory. Otherwise, use total_memory to fix a hard limit, with\
a string syntax (e.g. '1G', '500M')",
"multi_units_only": "Boolean to get only multi units activity (i.e. one template per electrode)",
"job_kwargs": "A dictionary to specify how many jobs and which parameters they should used",
"seed": "An int to control how chunks are shuffled while detecting peaks",
Expand Down
22 changes: 16 additions & 6 deletions src/spikeinterface/sortingcomponents/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,9 @@ def set_optimal_chunk_size(recording, job_kwargs, memory_limit=0.5, total_memory
chunk_duration = chunk_size / recording.get_sampling_frequency()
job_kwargs = fix_job_kwargs(dict(chunk_duration=f"{chunk_duration}s"))
else:
print("psutil is required to use only a fraction of available memory")
import warnings

warnings.warn("psutil is required to use only a fraction of available memory")
else:
from spikeinterface.core.job_tools import convert_string_to_bytes

Expand All @@ -305,7 +307,7 @@ def get_optimal_n_jobs(job_kwargs, ram_requested, memory_limit=0.25):
recording: Recording
The recording object
ram_requested: dict
ram_requested: int
The amount of RAM (in bytes) requested for the job
memory_limit: float
The memory limit in fraction of available memory
Expand All @@ -324,7 +326,9 @@ def get_optimal_n_jobs(job_kwargs, ram_requested, memory_limit=0.25):
n_jobs = int(min(n_jobs, memory_usage // ram_requested))
job_kwargs.update(dict(n_jobs=n_jobs))
else:
print("psutil is required to use only a fraction of available memory")
import warnings

warnings.warn("psutil is required to use only a fraction of available memory")
return job_kwargs


Expand Down Expand Up @@ -367,14 +371,20 @@ def cache_preprocessing(
if recording.get_total_memory_size() < memory_usage:
recording = recording.save_to_memory(format="memory", shared=True, **job_kwargs)
else:
print("Recording too large to be preloaded in RAM...")
import warnings

warnings.warn("Recording too large to be preloaded in RAM...")
else:
print("psutil is required to preload in memory given only a fraction of available memory")
import warnings

warnings.warn("psutil is required to preload in memory given only a fraction of available memory")
else:
if recording.get_total_memory_size() < total_memory:
recording = recording.save_to_memory(format="memory", shared=True, **job_kwargs)
else:
print("Recording too large to be preloaded in RAM...")
import warnings

warnings.warn("Recording too large to be preloaded in RAM...")
elif mode == "folder":
recording = recording.save_to_folder(**extra_kwargs)
elif mode == "zarr":
Expand Down

0 comments on commit 151fefb

Please sign in to comment.