diff --git a/fuzz/fuzz_targets/bitmap_allocator.rs b/fuzz/fuzz_targets/bitmap_allocator.rs index cb0c8a904..5ecdac49b 100644 --- a/fuzz/fuzz_targets/bitmap_allocator.rs +++ b/fuzz/fuzz_targets/bitmap_allocator.rs @@ -149,8 +149,8 @@ impl TestBitmapAllocator for BitmapAllocator1024 { } fuzz_target!(|actions: Vec| { - let mut bma64 = BitmapAllocator64::new(); - let mut bma1024 = BitmapAllocator1024::new(); + let mut bma64 = BitmapAllocator64::new_full(); + let mut bma1024 = BitmapAllocator1024::new_full(); for action in actions.iter() { let bma64_before = bma64; let bma1024_before = bma1024.clone(); diff --git a/kernel/src/cpu/percpu.rs b/kernel/src/cpu/percpu.rs index e4d75c1ec..8425c1ce5 100644 --- a/kernel/src/cpu/percpu.rs +++ b/kernel/src/cpu/percpu.rs @@ -560,7 +560,11 @@ impl PerCpu { } pub fn init_page_table(&self, pgtable: PageBox) -> Result<(), SvsmError> { - self.vm_range.initialize()?; + // SAFETY: The per-CPU address range is fully aligned to top-level + // paging boundaries. + unsafe { + self.vm_range.initialize()?; + } self.set_pgtable(PageBox::leak(pgtable)); Ok(()) diff --git a/kernel/src/mm/address_space.rs b/kernel/src/mm/address_space.rs index cb5213e27..499b46dbb 100644 --- a/kernel/src/mm/address_space.rs +++ b/kernel/src/mm/address_space.rs @@ -133,7 +133,7 @@ pub const STACK_SIZE: usize = PAGE_SIZE * STACK_PAGES; pub const STACK_GUARD_SIZE: usize = STACK_SIZE; pub const STACK_TOTAL_SIZE: usize = STACK_SIZE + STACK_GUARD_SIZE; -const fn virt_from_idx(idx: usize) -> VirtAddr { +pub const fn virt_from_idx(idx: usize) -> VirtAddr { VirtAddr::new(idx << ((3 * 9) + 12)) } @@ -219,19 +219,19 @@ pub const SVSM_PERTASK_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PERTASK); pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3); /// Kernel stack for a task -pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE; +pub const SVSM_PERTASK_STACK_BASE_OFFSET: usize = 0; /// Kernel shadow stack for normal execution of a task -pub const SVSM_PERTASK_SHADOW_STACK_BASE: VirtAddr = - SVSM_PERTASK_STACK_BASE.const_add(STACK_TOTAL_SIZE); +pub const SVSM_PERTASK_SHADOW_STACK_BASE_OFFSET: usize = + SVSM_PERTASK_STACK_BASE_OFFSET + STACK_TOTAL_SIZE; /// Kernel shadow stack for exception handling -pub const SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE: VirtAddr = - SVSM_PERTASK_SHADOW_STACK_BASE.const_add(PAGE_SIZE); +pub const SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE_OFFSET: usize = + SVSM_PERTASK_SHADOW_STACK_BASE_OFFSET + PAGE_SIZE; /// SSE context save area for a task -pub const SVSM_PERTASK_XSAVE_AREA_BASE: VirtAddr = - SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE.const_add(PAGE_SIZE); +pub const SVSM_PERTASK_XSAVE_AREA_BASE: usize = + SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE_OFFSET + PAGE_SIZE; /// Page table self-map level 3 index pub const PGTABLE_LVL3_IDX_PTE_SELFMAP: usize = 493; diff --git a/kernel/src/mm/virtualrange.rs b/kernel/src/mm/virtualrange.rs index 3fb6ee297..6083011f7 100644 --- a/kernel/src/mm/virtualrange.rs +++ b/kernel/src/mm/virtualrange.rs @@ -36,7 +36,7 @@ impl VirtualRange { start_virt: VirtAddr::null(), page_count: 0, page_shift: PAGE_SHIFT, - bits: BitmapAllocator1024::new(), + bits: BitmapAllocator1024::new_full(), } } diff --git a/kernel/src/mm/vm/range.rs b/kernel/src/mm/vm/range.rs index ddd31d87d..ad2656aa9 100644 --- a/kernel/src/mm/vm/range.rs +++ b/kernel/src/mm/vm/range.rs @@ -9,6 +9,7 @@ use crate::cpu::{flush_tlb_global_percpu, flush_tlb_global_sync}; use crate::error::SvsmError; use crate::locking::RWLock; use crate::mm::pagetable::{PTEntryFlags, PageTable, PageTablePart}; +use crate::mm::virt_from_idx; use crate::types::{PageSize, PAGE_SHIFT, PAGE_SIZE}; use crate::utils::{align_down, align_up}; @@ -102,9 +103,12 @@ impl VMR { /// /// `Ok(())` on success, Err(SvsmError::Mem) on allocation error fn alloc_page_tables(&self, lazy: bool) -> Result<(), SvsmError> { - let start = VirtAddr::from(self.start_pfn << PAGE_SHIFT); - let end = VirtAddr::from(self.end_pfn << PAGE_SHIFT); - let count = end.to_pgtbl_idx::<3>() - start.to_pgtbl_idx::<3>(); + let first = VirtAddr::from(self.start_pfn << PAGE_SHIFT); + let first_idx = first.to_pgtbl_idx::<3>(); + let start = virt_from_idx(first_idx); + let last = VirtAddr::from(self.end_pfn << PAGE_SHIFT) - 1; + let last_idx = last.to_pgtbl_idx::<3>(); + let count = last_idx + 1 - first_idx; let mut vec = self.pgtbl_parts.lock_write(); for idx in 0..count { @@ -144,6 +148,12 @@ impl VMR { /// Initialize this [`VMR`] by checking the `start` and `end` values and /// allocating the [`PageTablePart`]s required for the mappings. /// + /// # Safety + /// Callers must ensure that the bounds of the address range are + /// appropriately aligned to prevent the possibilty that adjacent address + /// ranges may attempt to share top-level paging entries. If any overlap + /// is attempted, page tables may be corrupted. + /// /// # Arguments /// /// * `lazy` - When `true`, use lazy allocation of [`PageTablePart`] pages. @@ -151,30 +161,48 @@ impl VMR { /// # Returns /// /// `Ok(())` on success, Err(SvsmError::Mem) on allocation error - fn initialize_common(&self, lazy: bool) -> Result<(), SvsmError> { + unsafe fn initialize_common(&self, lazy: bool) -> Result<(), SvsmError> { let start = VirtAddr::from(self.start_pfn << PAGE_SHIFT); let end = VirtAddr::from(self.end_pfn << PAGE_SHIFT); - assert!(start < end && start.is_aligned(VMR_GRANULE) && end.is_aligned(VMR_GRANULE)); + assert!(start < end); self.alloc_page_tables(lazy) } /// Initialize this [`VMR`] by calling `VMR::initialize_common` with `lazy = false` /// + /// # Safety + /// Callers must ensure that the bounds of the address range are + /// appropriately aligned to prevent the possibilty that adjacent address + /// ranges may attempt to share top-level paging entries. If any overlap + /// is attempted, page tables may be corrupted. + /// /// # Returns /// /// `Ok(())` on success, Err(SvsmError::Mem) on allocation error - pub fn initialize(&self) -> Result<(), SvsmError> { - self.initialize_common(false) + pub unsafe fn initialize(&self) -> Result<(), SvsmError> { + // SAFETY: The caller takes responsibilty for ensuring that the address + // bounds of the range have appropriate alignment with respect to + // the page table alignment boundaries. + unsafe { self.initialize_common(false) } } /// Initialize this [`VMR`] by calling `VMR::initialize_common` with `lazy = true` /// + /// # Safety + /// Callers must ensure that the bounds of the address range are + /// appropriately aligned to prevent the possibilty that adjacent address + /// ranges may attempt to share top-level paging entries. If any overlap + /// is attempted, page tables may be corrupted. + /// /// # Returns /// /// `Ok(())` on success, Err(SvsmError::Mem) on allocation error - pub fn initialize_lazy(&self) -> Result<(), SvsmError> { - self.initialize_common(true) + pub unsafe fn initialize_lazy(&self) -> Result<(), SvsmError> { + // SAFETY: The caller takes responsibilty for ensuring that the address + // bounds of the range have appropriate alignment with respect to + // the page table alignment boundaries. + unsafe { self.initialize_common(true) } } /// Returns the virtual start and end addresses for this region diff --git a/kernel/src/task/tasks.rs b/kernel/src/task/tasks.rs index e68d2ad7e..5c16f71db 100644 --- a/kernel/src/task/tasks.rs +++ b/kernel/src/task/tasks.rs @@ -20,8 +20,7 @@ use crate::cpu::irq_state::EFLAGS_IF; use crate::cpu::percpu::{current_task, PerCpu}; use crate::cpu::shadow_stack::is_cet_ss_supported; use crate::cpu::sse::{get_xsave_area_size, sse_restore_context}; -use crate::cpu::X86ExceptionContext; -use crate::cpu::{irqs_enable, X86GeneralRegs}; +use crate::cpu::{irqs_enable, X86ExceptionContext, X86GeneralRegs}; use crate::error::SvsmError; use crate::fs::{opendir, stdout_open, Directory, FileHandle}; use crate::locking::{RWLock, SpinLock}; @@ -30,20 +29,55 @@ use crate::mm::vm::{ Mapping, ShadowStackInit, VMFileMappingFlags, VMKernelShadowStack, VMKernelStack, VMR, }; use crate::mm::{ - mappings::create_anon_mapping, mappings::create_file_mapping, PageBox, VMMappingGuard, - SVSM_PERTASK_BASE, SVSM_PERTASK_END, SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE, - SVSM_PERTASK_SHADOW_STACK_BASE, SVSM_PERTASK_STACK_BASE, USER_MEM_END, USER_MEM_START, + alloc::AllocError, mappings::create_anon_mapping, mappings::create_file_mapping, PageBox, + VMMappingGuard, SIZE_LEVEL3, SVSM_PERTASK_BASE, SVSM_PERTASK_END, + SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE_OFFSET, SVSM_PERTASK_SHADOW_STACK_BASE_OFFSET, + SVSM_PERTASK_STACK_BASE_OFFSET, USER_MEM_END, USER_MEM_START, }; use crate::platform::SVSM_PLATFORM; use crate::syscall::{Obj, ObjError, ObjHandle}; use crate::types::{SVSM_USER_CS, SVSM_USER_DS}; +use crate::utils::bitmap_allocator::{BitmapAllocator, BitmapAllocator1024}; use crate::utils::MemoryRegion; use intrusive_collections::{intrusive_adapter, LinkedListAtomicLink}; use super::schedule::{current_task_terminated, schedule}; +pub static KTASK_VADDR_BITMAP: SpinLock = + SpinLock::new(BitmapAllocator1024::new_empty()); + pub const INITIAL_TASK_ID: u32 = 1; +// The task virtual range guard manages the allocation of a task virtual +// address range within the task address space. The address range is reserved +// as long as the guard continues to exist. +#[derive(Debug)] +struct TaskVirtualRegionGuard { + index: usize, +} + +impl TaskVirtualRegionGuard { + fn alloc() -> Result { + let index = KTASK_VADDR_BITMAP + .lock() + .alloc(1, 1) + .ok_or(SvsmError::Alloc(AllocError::OutOfMemory))?; + Ok(Self { index }) + } + + fn vaddr_region(&self) -> MemoryRegion { + const SPAN: usize = SIZE_LEVEL3 / BitmapAllocator1024::CAPACITY; + let base = SVSM_PERTASK_BASE + (self.index * SPAN); + MemoryRegion::::new(base, SPAN) + } +} + +impl Drop for TaskVirtualRegionGuard { + fn drop(&mut self) { + KTASK_VADDR_BITMAP.lock().free(self.index, 1); + } +} + #[derive(PartialEq, Debug, Copy, Clone, Default)] pub enum TaskState { RUNNING, @@ -137,6 +171,11 @@ pub struct Task { /// Page table that is loaded when the task is scheduled pub page_table: SpinLock>, + /// Virtual address region that has been allocated for this task. + /// This is not referenced but must be stored so that it is dropped when + /// the Task is dropped. + _ktask_region: TaskVirtualRegionGuard, + /// Task virtual memory range for use at CPL 0 vm_kernel_range: VMR, @@ -168,7 +207,10 @@ pub struct Task { // SAFETY: Send + Sync is required for Arc to implement Send. All members // of `Task` are Send + Sync except for the intrusive_collection links, which // are only Send. The only access to these is via the intrusive_adapter! -// generated code which does not use them concurrently across threads. +// generated code which does not use them concurrently across threads. The +// kernal address cell is also not Sync, but this is only populated during +// task creation, and can safely be accessed by multiple threads once it has +// been populated. unsafe impl Sync for Task {} pub type TaskPointer = Arc; @@ -192,170 +234,155 @@ impl fmt::Debug for Task { } } +struct CreateTaskArguments { + // The entry point of the task. For user tasks, this is a user-mode + // address, and for kernel tasks, it is a kernel address, + entry: usize, + + // The name of the task. + name: String, + + // For a user task, supplies the `VMR` that will represent the user-mode + // address space. + vm_user_range: Option, + + // The root directory that will be associated with this task. + rootdir: Arc, +} + impl Task { - pub fn create( - cpu: &PerCpu, - entry: extern "C" fn(), - name: String, - ) -> Result { + fn create_common(cpu: &PerCpu, args: CreateTaskArguments) -> Result { let mut pgtable = cpu.get_pgtable().clone_shared()?; cpu.populate_page_table(&mut pgtable); - let vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty()); - vm_kernel_range.initialize()?; + let ktask_region = TaskVirtualRegionGuard::alloc()?; + let vaddr_region = ktask_region.vaddr_region(); + let vm_kernel_range = VMR::new( + vaddr_region.start(), + vaddr_region.end(), + PTEntryFlags::empty(), + ); + // SAFETY: The selected kernel mode task address range is the only + // range that will live within the top-level entry associated with the + // task address space. + unsafe { + vm_kernel_range.initialize()?; + } let xsa = Self::allocate_xsave_area(); let xsa_addr = u64::from(xsa.vaddr()) as usize; + // Determine which kernel-mode entry/exit routines will be used for + // this task. + let (entry_return, exit_return) = if args.vm_user_range.is_some() { + (return_new_task as usize, None) + } else { + (run_kernel_task as usize, Some(task_exit as usize)) + }; + let mut shadow_stack_offset = VirtAddr::null(); let mut exception_shadow_stack = VirtAddr::null(); if is_cet_ss_supported() { let shadow_stack; (shadow_stack, shadow_stack_offset) = VMKernelShadowStack::new( - SVSM_PERTASK_SHADOW_STACK_BASE, + vaddr_region.start() + SVSM_PERTASK_SHADOW_STACK_BASE_OFFSET, ShadowStackInit::Normal { - entry_return: run_kernel_task as usize, - exit_return: Some(task_exit as usize), + entry_return, + exit_return, }, )?; vm_kernel_range.insert_at( - SVSM_PERTASK_SHADOW_STACK_BASE, + vaddr_region.start() + SVSM_PERTASK_SHADOW_STACK_BASE_OFFSET, Arc::new(Mapping::new(shadow_stack)), )?; let shadow_stack; (shadow_stack, exception_shadow_stack) = VMKernelShadowStack::new( - SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE, + vaddr_region.start() + SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE_OFFSET, ShadowStackInit::Exception, )?; vm_kernel_range.insert_at( - SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE, + vaddr_region.start() + SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE_OFFSET, Arc::new(Mapping::new(shadow_stack)), )?; } - let (stack, raw_bounds, rsp_offset) = Self::allocate_ktask_stack(cpu, entry, xsa_addr)?; - vm_kernel_range.insert_at(SVSM_PERTASK_STACK_BASE, stack)?; + // Call the correct stack creation routine for this task. + let (stack, raw_bounds, rsp_offset) = if args.vm_user_range.is_some() { + Self::allocate_utask_stack(cpu, args.entry, xsa_addr)? + } else { + Self::allocate_ktask_stack(cpu, args.entry, xsa_addr)? + }; + let stack_start = vaddr_region.start() + SVSM_PERTASK_STACK_BASE_OFFSET; + vm_kernel_range.insert_at(stack_start, stack)?; vm_kernel_range.populate(&mut pgtable); // Remap at the per-task offset - let bounds = MemoryRegion::new( - SVSM_PERTASK_STACK_BASE + raw_bounds.start().into(), - raw_bounds.len(), - ); + let bounds = MemoryRegion::new(stack_start + raw_bounds.start().into(), raw_bounds.len()); Ok(Arc::new(Task { rsp: bounds .end() .checked_sub(rsp_offset) - .expect("Invalid stack offset from task::allocate_ktask_stack()") + .expect("Invalid stack offset from task stack allocator") .bits() as u64, ssp: shadow_stack_offset, xsa, stack_bounds: bounds, exception_shadow_stack, page_table: SpinLock::new(pgtable), + _ktask_region: ktask_region, vm_kernel_range, - vm_user_range: None, + vm_user_range: args.vm_user_range, sched_state: RWLock::new(TaskSchedState { idle_task: false, state: TaskState::RUNNING, cpu: cpu.get_apic_id(), }), - name, + name: args.name, id: TASK_ID_ALLOCATOR.next_id(), - rootdir: opendir("/")?, + rootdir: args.rootdir, list_link: LinkedListAtomicLink::default(), runlist_link: LinkedListAtomicLink::default(), objs: Arc::new(RWLock::new(BTreeMap::new())), })) } + pub fn create( + cpu: &PerCpu, + entry: extern "C" fn(), + name: String, + ) -> Result { + let create_args = CreateTaskArguments { + entry: entry as usize, + name, + vm_user_range: None, + rootdir: opendir("/")?, + }; + Self::create_common(cpu, create_args) + } + pub fn create_user( cpu: &PerCpu, user_entry: usize, root: Arc, name: String, ) -> Result { - let mut pgtable = cpu.get_pgtable().clone_shared()?; - - cpu.populate_page_table(&mut pgtable); - - let vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty()); - vm_kernel_range.initialize()?; - - let xsa = Self::allocate_xsave_area(); - let xsa_addr = u64::from(xsa.vaddr()) as usize; - - let mut shadow_stack_offset = VirtAddr::null(); - let mut exception_shadow_stack = VirtAddr::null(); - if is_cet_ss_supported() { - let shadow_stack; - (shadow_stack, shadow_stack_offset) = VMKernelShadowStack::new( - SVSM_PERTASK_SHADOW_STACK_BASE, - ShadowStackInit::Normal { - entry_return: return_new_task as usize, - exit_return: None, - }, - )?; - vm_kernel_range.insert_at( - SVSM_PERTASK_SHADOW_STACK_BASE, - Arc::new(Mapping::new(shadow_stack)), - )?; - - let shadow_stack; - (shadow_stack, exception_shadow_stack) = VMKernelShadowStack::new( - SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE, - ShadowStackInit::Exception, - )?; - vm_kernel_range.insert_at( - SVSM_PERTASK_EXCEPTION_SHADOW_STACK_BASE, - Arc::new(Mapping::new(shadow_stack)), - )?; - } - - let (stack, raw_bounds, stack_offset) = - Self::allocate_utask_stack(cpu, user_entry, xsa_addr)?; - vm_kernel_range.insert_at(SVSM_PERTASK_STACK_BASE, stack)?; - - vm_kernel_range.populate(&mut pgtable); - let vm_user_range = VMR::new(USER_MEM_START, USER_MEM_END, PTEntryFlags::USER); - vm_user_range.initialize_lazy()?; - - // Remap at the per-task offset - let bounds = MemoryRegion::new( - SVSM_PERTASK_STACK_BASE + raw_bounds.start().into(), - raw_bounds.len(), - ); - - Ok(Arc::new(Task { - rsp: bounds - .end() - .checked_sub(stack_offset) - .expect("Invalid stack offset from task::allocate_utask_stack()") - .bits() as u64, - ssp: shadow_stack_offset, - xsa, - stack_bounds: bounds, - exception_shadow_stack, - page_table: SpinLock::new(pgtable), - vm_kernel_range, - vm_user_range: Some(vm_user_range), - sched_state: RWLock::new(TaskSchedState { - idle_task: false, - state: TaskState::RUNNING, - cpu: cpu.get_apic_id(), - }), + // SAFETY: the user address range is fully aligned to top-level paging + // boundaries. + unsafe { + vm_user_range.initialize_lazy()?; + } + let create_args = CreateTaskArguments { + entry: user_entry, name, - id: TASK_ID_ALLOCATOR.next_id(), + vm_user_range: Some(vm_user_range), rootdir: root, - list_link: LinkedListAtomicLink::default(), - runlist_link: LinkedListAtomicLink::default(), - objs: Arc::new(RWLock::new(BTreeMap::new())), - })) + }; + Self::create_common(cpu, create_args) } pub fn stack_bounds(&self) -> MemoryRegion { @@ -442,7 +469,7 @@ impl Task { fn allocate_ktask_stack( cpu: &PerCpu, - entry: extern "C" fn(), + entry: usize, xsa_addr: usize, ) -> Result<(Arc, MemoryRegion, usize), SvsmError> { let (mapping, bounds) = Task::allocate_stack_common()?; @@ -465,7 +492,7 @@ impl Task { // task with interrupts disabled. (*task_context).flags = 2; // ret_addr - (*task_context).regs.rdi = entry as *const () as usize; + (*task_context).regs.rdi = entry; // xsave area addr (*task_context).regs.rsi = xsa_addr; (*task_context).ret_addr = run_kernel_task as *const () as u64; @@ -789,7 +816,7 @@ extern "C" fn task_exit() { schedule(); } -#[cfg(test)] +#[cfg(all(test, test_in_svsm))] mod tests { extern crate alloc; use crate::task::start_kernel_task; diff --git a/kernel/src/utils/bitmap_allocator.rs b/kernel/src/utils/bitmap_allocator.rs index 2965b095d..2a6063ef0 100644 --- a/kernel/src/utils/bitmap_allocator.rs +++ b/kernel/src/utils/bitmap_allocator.rs @@ -32,10 +32,14 @@ pub struct BitmapAllocator64 { } impl BitmapAllocator64 { - pub const fn new() -> Self { + pub const fn new_full() -> Self { Self { bits: u64::MAX } } + pub const fn new_empty() -> Self { + Self { bits: 0 } + } + #[cfg(fuzzing)] pub fn get_bits(&self) -> u64 { self.bits @@ -101,10 +105,17 @@ pub struct BitmapAllocatorTree { } impl BitmapAllocatorTree { - pub const fn new() -> Self { + pub const fn new_full() -> Self { Self { bits: u16::MAX, - child: [BitmapAllocator64::new(); 16], + child: [BitmapAllocator64::new_full(); 16], + } + } + + pub const fn new_empty() -> Self { + Self { + bits: 0, + child: [BitmapAllocator64::new_empty(); 16], } } @@ -362,7 +373,7 @@ mod tests { #[test] fn tree_set_all() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, 64 * 16, false); for i in 0..16 { assert_eq!(b.child[i].bits, 0); @@ -373,7 +384,7 @@ mod tests { #[test] fn tree_clear_all() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, 64 * 16, true); for i in 0..16 { assert_eq!(b.child[i].bits, u64::MAX); @@ -384,7 +395,7 @@ mod tests { #[test] fn tree_set_some() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); // First child b.set(0, BitmapAllocatorTree::::CAPACITY, false); @@ -430,7 +441,7 @@ mod tests { #[test] fn tree_alloc_simple() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, BitmapAllocatorTree::::CAPACITY, false); for i in 0..256 { assert_eq!(b.alloc(1, 0), Some(i)); @@ -438,9 +449,18 @@ mod tests { assert_eq!(b.used(), 256); } + #[test] + fn tree_alloc_empty_simple() { + let mut b = BitmapAllocatorTree::::new_empty(); + for i in 0..256 { + assert_eq!(b.alloc(1, 0), Some(i)); + } + assert_eq!(b.used(), 256); + } + #[test] fn tree_alloc_aligned() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, BitmapAllocatorTree::::CAPACITY, false); // Alignment of 1 << 5 bits : 32 bit alignment assert_eq!(b.alloc(1, 5), Some(0)); @@ -454,7 +474,7 @@ mod tests { #[test] fn tree_alloc_large_aligned() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, BitmapAllocatorTree::::CAPACITY, false); // Alignment of 1 << 4 bits : 16 bit alignment assert_eq!(b.alloc(500, 4), Some(0)); @@ -464,7 +484,7 @@ mod tests { #[test] fn tree_alloc_out_of_space() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, BitmapAllocatorTree::::CAPACITY, false); // Alignment of 1 << 4 bits : 16 bit alignment assert_eq!(b.alloc(1000, 4), Some(0)); @@ -477,7 +497,7 @@ mod tests { #[test] fn tree_free_space() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, BitmapAllocatorTree::::CAPACITY, false); // Alignment of 1 << 4 bits : 16 bit alignment assert_eq!( @@ -492,7 +512,7 @@ mod tests { #[test] fn tree_free_multiple() { - let mut b = BitmapAllocatorTree::::new(); + let mut b = BitmapAllocatorTree::::new_full(); b.set(0, BitmapAllocatorTree::::CAPACITY, true); b.free(0, 16); b.free(765, 16);