From 3f6ea2ec504a6aac2a3893914608dffb5cb96e22 Mon Sep 17 00:00:00 2001 From: Dale Black Date: Mon, 20 Jan 2025 11:55:31 -0800 Subject: [PATCH] up --- benchmarks/setup.jl | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/benchmarks/setup.jl b/benchmarks/setup.jl index 31f0b19..084ec49 100644 --- a/benchmarks/setup.jl +++ b/benchmarks/setup.jl @@ -21,11 +21,11 @@ function monitor_gpu_memory(backend::String, duration=0.1) elseif backend == "oneAPI" # Get the first device since that's what we're using device = oneAPI.devices()[1] - # Get total memory from device properties - total_mem = oneAPI.oneL0.memory_properties(device)[1].totalSize - # Get free memory from device - free_mem = oneAPI.oneL0.memory_get_info(device).free - return Float64(total_mem - free_mem) / (1024 * 1024) + # Get memory properties + props = oneAPI.oneL0.memory_properties(device)[1] + @info props + # For now, just return total memory since free memory isn't easily accessible + return Float64(props.totalSize) / (1024 * 1024) elseif backend == "AMDGPU" free_mem, total_mem = AMDGPU.Runtime.Mem.info() # Use the correct memory info function return Float64(total_mem - free_mem) / (1024 * 1024)