diff --git a/benchmarks/setup.jl b/benchmarks/setup.jl index 31f0b19..084ec49 100644 --- a/benchmarks/setup.jl +++ b/benchmarks/setup.jl @@ -21,11 +21,11 @@ function monitor_gpu_memory(backend::String, duration=0.1) elseif backend == "oneAPI" # Get the first device since that's what we're using device = oneAPI.devices()[1] - # Get total memory from device properties - total_mem = oneAPI.oneL0.memory_properties(device)[1].totalSize - # Get free memory from device - free_mem = oneAPI.oneL0.memory_get_info(device).free - return Float64(total_mem - free_mem) / (1024 * 1024) + # Get memory properties + props = oneAPI.oneL0.memory_properties(device)[1] + @info props + # For now, just return total memory since free memory isn't easily accessible + return Float64(props.totalSize) / (1024 * 1024) elseif backend == "AMDGPU" free_mem, total_mem = AMDGPU.Runtime.Mem.info() # Use the correct memory info function return Float64(total_mem - free_mem) / (1024 * 1024)