Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

task: assign each task a unique portion of the kernel address space #588

Merged
merged 5 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions fuzz/fuzz_targets/bitmap_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@ impl TestBitmapAllocator for BitmapAllocator1024 {
}

fuzz_target!(|actions: Vec<BmaAction>| {
let mut bma64 = BitmapAllocator64::new();
let mut bma1024 = BitmapAllocator1024::new();
let mut bma64 = BitmapAllocator64::new_full();
let mut bma1024 = BitmapAllocator1024::new_full();
for action in actions.iter() {
let bma64_before = bma64;
let bma1024_before = bma1024.clone();
Expand Down
6 changes: 5 additions & 1 deletion kernel/src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,11 @@ impl PerCpu {
}

pub fn init_page_table(&self, pgtable: PageBox<PageTable>) -> Result<(), SvsmError> {
self.vm_range.initialize()?;
// SAFETY: The per-CPU address range is fully aligned to top-level
// paging boundaries.
unsafe {
self.vm_range.initialize()?;
}
self.set_pgtable(PageBox::leak(pgtable));

Ok(())
Expand Down
16 changes: 8 additions & 8 deletions kernel/src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ pub const STACK_SIZE: usize = PAGE_SIZE * STACK_PAGES;
pub const STACK_GUARD_SIZE: usize = STACK_SIZE;
pub const STACK_TOTAL_SIZE: usize = STACK_SIZE + STACK_GUARD_SIZE;

const fn virt_from_idx(idx: usize) -> VirtAddr {
pub const fn virt_from_idx(idx: usize) -> VirtAddr {
VirtAddr::new(idx << ((3 * 9) + 12))
}

Expand Down Expand Up @@ -219,19 +219,19 @@ pub const SVSM_PERTASK_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PERTASK);
pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3);

/// Kernel stack for a task
pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE;
pub const SVSM_PERTASK_STACK_BASE_OFFSET: usize = 0;

/// Kernel shadow stack for normal execution of a task
pub const SVSM_PERTASK_SHADOW_STACK_BASE: VirtAddr =
SVSM_PERTASK_STACK_BASE.const_add(STACK_TOTAL_SIZE);
pub const SVSM_PERTASK_SHADOW_STACK_BASE_OFFSET: usize =
SVSM_PERTASK_STACK_BASE_OFFSET + STACK_TOTAL_SIZE;

/// Kernel shadow stack for exception handling
pub const SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE: VirtAddr =
SVSM_PERTASK_SHADOW_STACK_BASE.const_add(PAGE_SIZE);
pub const SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE_OFFSET: usize =
SVSM_PERTASK_SHADOW_STACK_BASE_OFFSET + PAGE_SIZE;

/// SSE context save area for a task
pub const SVSM_PERTASK_XSAVE_AREA_BASE: VirtAddr =
SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE.const_add(PAGE_SIZE);
pub const SVSM_PERTASK_XSAVE_AREA_BASE: usize =
SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE_OFFSET + PAGE_SIZE;

/// Page table self-map level 3 index
pub const PGTABLE_LVL3_IDX_PTE_SELFMAP: usize = 493;
Expand Down
2 changes: 1 addition & 1 deletion kernel/src/mm/virtualrange.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ impl VirtualRange {
start_virt: VirtAddr::null(),
page_count: 0,
page_shift: PAGE_SHIFT,
bits: BitmapAllocator1024::new(),
bits: BitmapAllocator1024::new_full(),
}
}

Expand Down
46 changes: 37 additions & 9 deletions kernel/src/mm/vm/range.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::cpu::{flush_tlb_global_percpu, flush_tlb_global_sync};
use crate::error::SvsmError;
use crate::locking::RWLock;
use crate::mm::pagetable::{PTEntryFlags, PageTable, PageTablePart};
use crate::mm::virt_from_idx;
use crate::types::{PageSize, PAGE_SHIFT, PAGE_SIZE};
use crate::utils::{align_down, align_up};

Expand Down Expand Up @@ -102,9 +103,12 @@ impl VMR {
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
fn alloc_page_tables(&self, lazy: bool) -> Result<(), SvsmError> {
let start = VirtAddr::from(self.start_pfn << PAGE_SHIFT);
let end = VirtAddr::from(self.end_pfn << PAGE_SHIFT);
let count = end.to_pgtbl_idx::<3>() - start.to_pgtbl_idx::<3>();
let first = VirtAddr::from(self.start_pfn << PAGE_SHIFT);
let first_idx = first.to_pgtbl_idx::<3>();
let start = virt_from_idx(first_idx);
let last = VirtAddr::from(self.end_pfn << PAGE_SHIFT) - 1;
let last_idx = last.to_pgtbl_idx::<3>();
let count = last_idx + 1 - first_idx;
let mut vec = self.pgtbl_parts.lock_write();

for idx in 0..count {
Expand Down Expand Up @@ -144,37 +148,61 @@ impl VMR {
/// Initialize this [`VMR`] by checking the `start` and `end` values and
/// allocating the [`PageTablePart`]s required for the mappings.
///
/// # Safety
/// Callers must ensure that the bounds of the address range are
/// appropriately aligned to prevent the possibilty that adjacent address
/// ranges may attempt to share top-level paging entries. If any overlap
/// is attempted, page tables may be corrupted.
///
/// # Arguments
///
/// * `lazy` - When `true`, use lazy allocation of [`PageTablePart`] pages.
///
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
fn initialize_common(&self, lazy: bool) -> Result<(), SvsmError> {
unsafe fn initialize_common(&self, lazy: bool) -> Result<(), SvsmError> {
let start = VirtAddr::from(self.start_pfn << PAGE_SHIFT);
let end = VirtAddr::from(self.end_pfn << PAGE_SHIFT);
assert!(start < end && start.is_aligned(VMR_GRANULE) && end.is_aligned(VMR_GRANULE));
assert!(start < end);

self.alloc_page_tables(lazy)
}

/// Initialize this [`VMR`] by calling `VMR::initialize_common` with `lazy = false`
///
/// # Safety
/// Callers must ensure that the bounds of the address range are
/// appropriately aligned to prevent the possibilty that adjacent address
/// ranges may attempt to share top-level paging entries. If any overlap
/// is attempted, page tables may be corrupted.
///
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
pub fn initialize(&self) -> Result<(), SvsmError> {
self.initialize_common(false)
pub unsafe fn initialize(&self) -> Result<(), SvsmError> {
// SAFETY: The caller takes responsibilty for ensuring that the address
// bounds of the range have appropriate alignment with respect to
// the page table alignment boundaries.
unsafe { self.initialize_common(false) }
}

/// Initialize this [`VMR`] by calling `VMR::initialize_common` with `lazy = true`
///
/// # Safety
/// Callers must ensure that the bounds of the address range are
/// appropriately aligned to prevent the possibilty that adjacent address
/// ranges may attempt to share top-level paging entries. If any overlap
/// is attempted, page tables may be corrupted.
///
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
pub fn initialize_lazy(&self) -> Result<(), SvsmError> {
self.initialize_common(true)
pub unsafe fn initialize_lazy(&self) -> Result<(), SvsmError> {
// SAFETY: The caller takes responsibilty for ensuring that the address
// bounds of the range have appropriate alignment with respect to
// the page table alignment boundaries.
unsafe { self.initialize_common(true) }
}

/// Returns the virtual start and end addresses for this region
Expand Down
Loading
Loading