Skip to content

Commit

Permalink
Use SFTDenseChunkMap on 64bits when vm_space is enabled (#1094)
Browse files Browse the repository at this point in the history
This PR resolves the issues found in
mmtk/mmtk-julia#143.
```console
[2024-03-19T05:23:33Z INFO  mmtk::policy::vmspace] Set [78624DC00000, 786258000000) as VM region (heap available range [20000000000, 220000000000))
thread '<unnamed>' panicked at 'start = 78624DC00000 + bytes = 171966464 should be smaller than space_start 380000000000 + max extent 2199023255552, index 28, table size 31', /home/runner/.cargo/git/checkouts/mmtk-core-89cdc7bf360cce7f/46b0abe/src/policy/sft_map.rs:202:21
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
fatal runtime error: failed to initiate panic, error 5
```

Basically we should not use `SFTSpaceMap` when we have off-heap memory.
Julia tries to set VM space at an address range outside our heap range,
and that causes issues when we compute index for `SFTSpaceMap`. Using
`SFTDenseChunkMap` will solve the issue.

Changes:
* Use `SFTDenseChunkMap` if `vm_space` is enabled.
* Add a range check for `SFTSpaceMap::has_sft_entry()`. The
address/index computation is only correct if the address is in the
range.
* Add a simple test case for
mmtk/mmtk-julia#143.
  • Loading branch information
qinsoon authored Mar 21, 2024
1 parent 00d316f commit dd84218
Show file tree
Hide file tree
Showing 10 changed files with 118 additions and 76 deletions.
55 changes: 34 additions & 21 deletions src/policy/sft_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,17 +67,23 @@ pub trait SFTMap {

pub(crate) fn create_sft_map() -> Box<dyn SFTMap> {
cfg_if::cfg_if! {
if #[cfg(all(feature = "malloc_mark_sweep", target_pointer_width = "64"))] {
// 64-bit malloc mark sweep needs a chunk-based SFT map, but the sparse map is not suitable for 64bits.
Box::new(dense_chunk_map::SFTDenseChunkMap::new())
} else if #[cfg(target_pointer_width = "64")] {
if #[cfg(target_pointer_width = "64")] {
// For 64bits, we generally want to use the space map, which requires using contiguous space and no off-heap memory.
// If the requirements do not meet, we have to choose a different SFT map implementation.
use crate::util::heap::layout::vm_layout::vm_layout;
if vm_layout().force_use_contiguous_spaces {
Box::new(space_map::SFTSpaceMap::new())
} else {
if !vm_layout().force_use_contiguous_spaces {
// This is usually the case for compressed pointer. Use the 32bits implementation.
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
} else if cfg!(any(feature = "malloc_mark_sweep", feature = "vm_space")) {
// We have off-heap memory (malloc'd objects, or VM space). We have to use a chunk-based map.
Box::new(dense_chunk_map::SFTDenseChunkMap::new())
} else {
// We can use space map.
Box::new(space_map::SFTSpaceMap::new())
}
} else if #[cfg(target_pointer_width = "32")] {
// Use sparse chunk map. As we have limited virtual address range on 32 bits,
// it is okay to have a sparse chunk map which maps every chunk into an index in the array.
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
} else {
compile_err!("Cannot figure out which SFT map to use.");
Expand Down Expand Up @@ -154,15 +160,17 @@ mod space_map {
/// Space map is a small table, and it has one entry for each MMTk space.
pub struct SFTSpaceMap {
sft: Vec<SFTRefStorage>,
space_address_start: Address,
space_address_end: Address,
}

unsafe impl Sync for SFTSpaceMap {}

impl SFTMap for SFTSpaceMap {
fn has_sft_entry(&self, _addr: Address) -> bool {
// Address::ZERO is mapped to index 0, and Address::MAX is mapped to index 31 (TABLE_SIZE-1)
// So any address has an SFT entry.
true
fn has_sft_entry(&self, addr: Address) -> bool {
// An arbitrary address from Address::ZERO to Address::MAX will be cyclically mapped to an index between 0 and 31
// Only addresses between the virtual address range we use have valid entries.
addr >= self.space_address_start && addr < self.space_address_end
}

fn get_side_metadata(&self) -> Option<&SideMetadataSpec> {
Expand All @@ -186,21 +194,23 @@ mod space_map {
start: Address,
bytes: usize,
) {
let table_size = Self::addr_to_index(Address::MAX) + 1;
let index = Self::addr_to_index(start);
if cfg!(debug_assertions) {
// Make sure we only update from empty to a valid space, or overwrite the space
let old = self.sft[index].load();
assert!((*old).name() == EMPTY_SFT_NAME || (*old).name() == (*space).name());
// Make sure the range is in the space
let space_start = Self::index_to_space_start(index);
// FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces,
// but the VM space is an exception. Any address after the last space is considered as the last space,
// based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT).
if index != table_size - 1 {
assert!(start >= space_start);
assert!(start + bytes <= space_start + vm_layout().max_space_extent());
}
assert!(start >= space_start);
assert!(
start + bytes <= space_start + vm_layout().max_space_extent(),
"The range of {} + {} bytes does not fall into the space range {} and {}, \
and it is probably outside the address range we use.",
start,
bytes,
space_start,
space_start + vm_layout().max_space_extent()
);
}

self.sft.get_unchecked(index).store(space);
Expand All @@ -216,12 +226,15 @@ mod space_map {
/// Create a new space map.
#[allow(clippy::assertions_on_constants)] // We assert to make sure the constants
pub fn new() -> Self {
use crate::util::heap::layout::heap_parameters::MAX_SPACES;
let table_size = Self::addr_to_index(Address::MAX) + 1;
debug_assert!(table_size >= crate::util::heap::layout::heap_parameters::MAX_SPACES);
debug_assert!(table_size >= MAX_SPACES);
Self {
sft: std::iter::repeat_with(SFTRefStorage::default)
.take(table_size)
.collect(),
space_address_start: Self::index_to_space_range(1).0, // the start of the first space
space_address_end: Self::index_to_space_range(MAX_SPACES - 1).1, // the end of the last space
}
}

Expand Down Expand Up @@ -261,7 +274,7 @@ mod space_map {

let assert_for_index = |i: usize| {
let (start, end) = SFTSpaceMap::index_to_space_range(i);
debug!("Space: Index#{} = [{}, {})", i, start, end);
println!("Space: Index#{} = [{}, {})", i, start, end);
assert_eq!(SFTSpaceMap::addr_to_index(start), i);
assert_eq!(SFTSpaceMap::addr_to_index(end - 1), i);
};
Expand Down
14 changes: 0 additions & 14 deletions src/policy/vmspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,20 +127,6 @@ impl<VM: VMBinding> Space<VM> for VMSpace<VM> {
// The default implementation checks with vm map. But vm map has some assumptions about
// the address range for spaces and the VM space may break those assumptions (as the space is
// mmapped by the runtime rather than us). So we we use SFT here.

// However, SFT map may not be an ideal solution either for 64 bits. The default
// implementation of SFT map on 64 bits is `SFTSpaceMap`, which maps the entire address
// space into an index between 0 and 31, and assumes any address with the same index
// is in the same space (with the same SFT). MMTk spaces uses 1-16. We guarantee that
// VM space does not overlap with the address range that MMTk spaces may use. So
// any region used as VM space will have an index of 0, or 17-31, and all the addresses
// that are mapped to the same index will be considered as in the VM space. That means,
// after we map a region as VM space, the nearby addresses will also be considered
// as in the VM space if we use the default `SFTSpaceMap`. We can guarantee the nearby
// addresses are not MMTk spaces, but we cannot tell whether they really in the VM space
// or not.
// A solution to this is to use `SFTDenseChunkMap` if `vm_space` is enabled on 64 bits.
// `SFTDenseChunkMap` has an overhead of a few percentages (~3%) compared to `SFTSpaceMap`.
SFT_MAP.get_checked(start).name() == self.name()
}
}
Expand Down
20 changes: 14 additions & 6 deletions src/util/test_util/fixtures.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ impl<T: FixtureContent> Default for SerialFixture<T> {
}

pub struct MMTKFixture {
pub mmtk: &'static MMTK<MockVM>,
mmtk: *mut MMTK<MockVM>,
}

impl FixtureContent for MMTKFixture {
Expand Down Expand Up @@ -143,13 +143,21 @@ impl MMTKFixture {

let mmtk = memory_manager::mmtk_init(&builder);
let mmtk_ptr = Box::into_raw(mmtk);
let mmtk_static: &'static MMTK<MockVM> = unsafe { &*mmtk_ptr };

if initialize_collection {
let mmtk_static: &'static MMTK<MockVM> = unsafe { &*mmtk_ptr };
memory_manager::initialize_collection(mmtk_static, VMThread::UNINITIALIZED);
}

MMTKFixture { mmtk: mmtk_static }
MMTKFixture { mmtk: mmtk_ptr }
}

pub fn get_mmtk(&self) -> &'static MMTK<MockVM> {
unsafe { &*self.mmtk }
}

pub fn get_mmtk_mut(&mut self) -> &'static mut MMTK<MockVM> {
unsafe { &mut *self.mmtk }
}
}

Expand Down Expand Up @@ -186,7 +194,7 @@ impl MutatorFixture {
true,
);
let mutator =
memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED));
memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED));
Self { mmtk, mutator }
}

Expand All @@ -196,12 +204,12 @@ impl MutatorFixture {
{
let mmtk = MMTKFixture::create_with_builder(with_builder, true);
let mutator =
memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED));
memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED));
Self { mmtk, mutator }
}

pub fn mmtk(&self) -> &'static MMTK<MockVM> {
self.mmtk.mmtk
self.mmtk.get_mmtk()
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ pub fn allocate_without_initialize_collection() {

// Build mutator
let mut mutator = memory_manager::bind_mutator(
fixture.mmtk,
fixture.get_mmtk(),
VMMutatorThread(VMThread::UNINITIALIZED),
);

Expand Down
8 changes: 5 additions & 3 deletions src/vm/tests/mock_tests/mock_test_allocator_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,14 @@ pub fn test_allocator_info() {
|| {
let fixture = MMTKFixture::create();

let selector =
memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default);
let selector = memory_manager::get_allocator_mapping(
fixture.get_mmtk(),
AllocationSemantics::Default,
);
let base_offset = crate::plan::Mutator::<MockVM>::get_allocator_base_offset(selector);
let allocator_info = AllocatorInfo::new::<MockVM>(selector);

match *fixture.mmtk.get_options().plan {
match *fixture.get_mmtk().get_options().plan {
PlanSelector::NoGC
| PlanSelector::Immix
| PlanSelector::SemiSpace
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,15 @@ pub fn acquire_typed_allocator() {

// ANCHOR: avoid_resolving_allocator
// At boot time
let selector =
memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default);
let selector = memory_manager::get_allocator_mapping(
fixture.get_mmtk(),
AllocationSemantics::Default,
);
unsafe {
DEFAULT_ALLOCATOR_OFFSET =
crate::plan::Mutator::<MockVM>::get_allocator_base_offset(selector);
}
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);

// At run time: allocate with the default semantics without resolving allocator
let default_allocator: &mut BumpAllocator<MockVM> = {
Expand Down
8 changes: 4 additions & 4 deletions src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ pub fn boxed_pointer() {
}

// Bind an MMTk mutator
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);
// Store the pointer in TLS
let mut storage = MutatorInTLS { ptr: mutator };

Expand Down Expand Up @@ -58,7 +58,7 @@ pub fn embed_mutator_struct() {
}

// Bind an MMTk mutator
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);
// Store the struct (or use memcpy for non-Rust code)
let mut storage = MutatorInTLS { embed: *mutator };
// Allocate
Expand Down Expand Up @@ -94,7 +94,7 @@ pub fn embed_fastpath_struct() {
}

// Bind an MMTk mutator
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);
// Create a fastpath BumpPointer with default(). The BumpPointer from default() will guarantee to fail on the first allocation
// so the allocation goes to the slowpath and we will get an allocation buffer from MMTk.
let default_bump_pointer = BumpPointer::default();
Expand All @@ -116,7 +116,7 @@ pub fn embed_fastpath_struct() {
} else {
use crate::util::alloc::Allocator;
let selector = memory_manager::get_allocator_mapping(
fixture.mmtk,
fixture.get_mmtk(),
AllocationSemantics::Default,
);
let default_allocator = unsafe {
Expand Down
48 changes: 24 additions & 24 deletions src/vm/tests/mock_tests/mock_test_malloc_counted.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@ pub fn malloc_free() {
default_setup,
|| {
MMTK.with_fixture(|fixture| {
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());

let res = memory_manager::counted_malloc(&fixture.mmtk, 8);
let res = memory_manager::counted_malloc(fixture.get_mmtk(), 8);
assert!(!res.is_zero());
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before + 8, bytes_after_alloc);

memory_manager::free_with_size(&fixture.mmtk, res, 8);
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
memory_manager::free_with_size(fixture.get_mmtk(), res, 8);
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before, bytes_after_free);
});
},
Expand All @@ -34,15 +34,15 @@ pub fn calloc_free() {
default_setup,
|| {
MMTK.with_fixture(|fixture| {
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());

let res = memory_manager::counted_calloc(&fixture.mmtk, 1, 8);
let res = memory_manager::counted_calloc(fixture.get_mmtk(), 1, 8);
assert!(!res.is_zero());
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before + 8, bytes_after_alloc);

memory_manager::free_with_size(&fixture.mmtk, res, 8);
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
memory_manager::free_with_size(fixture.get_mmtk(), res, 8);
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before, bytes_after_free);
});
},
Expand All @@ -56,21 +56,21 @@ pub fn realloc_grow() {
default_setup,
|| {
MMTK.with_fixture(|fixture| {
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());

let res1 = memory_manager::counted_malloc(&fixture.mmtk, 8);
let res1 = memory_manager::counted_malloc(&fixture.get_mmtk(), 8);
assert!(!res1.is_zero());
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before + 8, bytes_after_alloc);

// grow to 16 bytes
let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 16, 8);
let res2 = memory_manager::realloc_with_old_size(fixture.get_mmtk(), res1, 16, 8);
assert!(!res2.is_zero());
let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_after_realloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before + 16, bytes_after_realloc);

memory_manager::free_with_size(&fixture.mmtk, res2, 16);
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
memory_manager::free_with_size(&fixture.get_mmtk(), res2, 16);
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before, bytes_after_free);
});
},
Expand All @@ -84,21 +84,21 @@ pub fn realloc_shrink() {
default_setup,
|| {
MMTK.with_fixture(|fixture| {
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());

let res1 = memory_manager::counted_malloc(&fixture.mmtk, 16);
let res1 = memory_manager::counted_malloc(fixture.get_mmtk(), 16);
assert!(!res1.is_zero());
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before + 16, bytes_after_alloc);

// shrink to 8 bytes
let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 8, 16);
let res2 = memory_manager::realloc_with_old_size(fixture.get_mmtk(), res1, 8, 16);
assert!(!res2.is_zero());
let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
let bytes_after_realloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before + 8, bytes_after_realloc);

memory_manager::free_with_size(&fixture.mmtk, res2, 8);
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
memory_manager::free_with_size(fixture.get_mmtk(), res2, 8);
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
assert_eq!(bytes_before, bytes_after_free);
});
},
Expand Down
Loading

0 comments on commit dd84218

Please sign in to comment.