Skip to content

Commit

Permalink
Merge pull request syswonder#151 from ken4647/mp_el1
Browse files Browse the repository at this point in the history
fix bug for unexpected pagefault when nested fork.
  • Loading branch information
ken4647 authored Dec 2, 2024
2 parents a98992a + e149cde commit 6bc5b32
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 15 deletions.
1 change: 0 additions & 1 deletion api/ruxos_posix_api/src/imp/pthread/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,6 @@ pub unsafe fn sys_clone(
} else if (flags as u32 & ctypes::SIGCHLD) != 0 {
TID_TO_PTHREAD.read();
let pid = if let Some(task_ref) = ruxtask::fork_task() {
warn!("fork_task success, pid: {}", task_ref.id().as_u64());
task_ref.id().as_u64()
} else {
let children_ref = ruxtask::current();
Expand Down
8 changes: 3 additions & 5 deletions crates/driver_net/src/loopback.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,6 @@ impl BaseDriverOps for LoopbackDevice {
}
}

use log::info;

impl NetDriverOps for LoopbackDevice {
#[inline]
fn mac_address(&self) -> EthernetAddress {
Expand All @@ -85,19 +83,19 @@ impl NetDriverOps for LoopbackDevice {
self.queue.len()
}

fn fill_rx_buffers(&mut self, buf_pool: &Arc<NetBufPool>) -> DevResult {
fn fill_rx_buffers(&mut self, _buf_pool: &Arc<NetBufPool>) -> DevResult {
Ok(())
}

fn recycle_rx_buffer(&mut self, rx_buf: NetBufPtr) -> DevResult {
fn recycle_rx_buffer(&mut self, _rx_buf: NetBufPtr) -> DevResult {
Ok(())
}

fn recycle_tx_buffers(&mut self) -> DevResult {
Ok(())
}

fn prepare_tx_buffer(&self, tx_buf: &mut NetBuf, pkt_len: usize) -> DevResult {
fn prepare_tx_buffer(&self, _tx_buf: &mut NetBuf, _pkt_len: usize) -> DevResult {
Ok(())
}

Expand Down
4 changes: 1 addition & 3 deletions modules/ruxnet/src/smoltcp_impl/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ pub use self::dns::dns_query;
pub use self::tcp::TcpSocket;
pub use self::udp::UdpSocket;

pub use driver_net::loopback::LoopbackDevice;

macro_rules! env_or_default {
($key:literal) => {
match option_env!($key) {
Expand Down Expand Up @@ -347,7 +345,7 @@ pub fn bench_receive() {
}

pub(crate) fn init() {
let mut socketset = SocketSetWrapper::new();
let socketset = SocketSetWrapper::new();

IFACE_LIST.init_by(Mutex::new(vec::Vec::new()));
SOCKET_SET.init_by(socketset);
Expand Down
1 change: 1 addition & 0 deletions modules/ruxtask/src/run_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ impl AxRunQueue {
assert!(!curr.is_idle());

// we must not block current task with preemption disabled.
// only allow blocking current task with run_queue lock held.
#[cfg(feature = "preempt")]
assert!(curr.can_preempt(1));

Expand Down
21 changes: 15 additions & 6 deletions modules/ruxtask/src/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ pub struct TaskInner {
exit_code: AtomicI32,
wait_for_exit: WaitQueue,

stack_map_addr: SpinNoIrq<VirtAddr>,
kstack: SpinNoIrq<Arc<Option<TaskStack>>>,
ctx: UnsafeCell<TaskContext>,

Expand Down Expand Up @@ -235,6 +236,7 @@ impl TaskInner {
preempt_disable_count: AtomicUsize::new(0),
exit_code: AtomicI32::new(0),
wait_for_exit: WaitQueue::new(),
stack_map_addr: SpinNoIrq::new(VirtAddr::from(0)), // should be set later
kstack: SpinNoIrq::new(Arc::new(None)),
ctx: UnsafeCell::new(TaskContext::new()),
#[cfg(feature = "tls")]
Expand Down Expand Up @@ -279,6 +281,7 @@ impl TaskInner {
preempt_disable_count: AtomicUsize::new(0),
exit_code: AtomicI32::new(0),
wait_for_exit: WaitQueue::new(),
stack_map_addr: SpinNoIrq::new(VirtAddr::from(0)),
kstack: SpinNoIrq::new(Arc::new(None)),
ctx: UnsafeCell::new(TaskContext::new()),
#[cfg(feature = "tls")]
Expand All @@ -299,6 +302,7 @@ impl TaskInner {

pub fn set_stack_top(&self, begin: usize, size: usize) {
debug!("set_stack_top: begin={:#x}, size={:#x}", begin, size);
*self.stack_map_addr.lock() = VirtAddr::from(begin);
*self.kstack.lock() = Arc::new(Some(TaskStack {
ptr: NonNull::new(begin as *mut u8).unwrap(),
layout: Layout::from_size_align(size, PAGE_SIZE_4K).unwrap(),
Expand Down Expand Up @@ -406,14 +410,14 @@ impl TaskInner {

// Note: the stack region is mapped to the same position as the parent process's stack, be careful when update the stack region for the forked process.
let (_, prev_flag, _) = cloned_page_table
.query(current_stack.end())
.query(*current().stack_map_addr.lock())
.expect("failed to query stack region when forking");
cloned_page_table
.unmap_region(current_stack.end(), align_up_4k(stack_size))
.unmap_region(*current().stack_map_addr.lock(), align_up_4k(stack_size))
.expect("failed to unmap stack region when forking");
cloned_page_table
.map_region(
current_stack.end(),
*current().stack_map_addr.lock(),
stack_paddr,
stack_size,
prev_flag,
Expand Down Expand Up @@ -477,10 +481,11 @@ impl TaskInner {
need_resched: AtomicBool::new(current_task.need_resched.load(Ordering::Relaxed)),
#[cfg(feature = "preempt")]
preempt_disable_count: AtomicUsize::new(
current_task.preempt_disable_count.load(Ordering::Relaxed),
current_task.preempt_disable_count.load(Ordering::Acquire),
),
exit_code: AtomicI32::new(0),
wait_for_exit: WaitQueue::new(),
stack_map_addr: SpinNoIrq::new(*current().stack_map_addr.lock()),
kstack: SpinNoIrq::new(Arc::new(Some(new_stack))),
ctx: UnsafeCell::new(TaskContext::new()),
#[cfg(feature = "tls")]
Expand Down Expand Up @@ -515,6 +520,7 @@ impl TaskInner {
.lock()
.insert(new_pid.as_u64(), task_ref.clone());

warn!("forked task: save_current_content {}", task_ref.id_name());
unsafe {
// copy the stack content from current stack to new stack
(*task_ref.ctx_mut_ptr()).save_current_content(
Expand Down Expand Up @@ -554,6 +560,7 @@ impl TaskInner {
preempt_disable_count: AtomicUsize::new(0),
exit_code: AtomicI32::new(0),
wait_for_exit: WaitQueue::new(),
stack_map_addr: SpinNoIrq::new(VirtAddr::from(0)), // set in set_stack_top
kstack: SpinNoIrq::new(Arc::new(None)),
ctx: UnsafeCell::new(TaskContext::new()),
#[cfg(feature = "tls")]
Expand Down Expand Up @@ -590,6 +597,7 @@ impl TaskInner {
let bindings = PROCESS_MAP.lock();
let (&_parent_id, &ref task_ref) = bindings.first_key_value().unwrap();
let idle_kstack = TaskStack::alloc(align_up_4k(IDLE_STACK_SIZE));
let idle_kstack_top = idle_kstack.top();

let mut t = Self {
parent_process: Some(Arc::downgrade(task_ref)),
Expand All @@ -609,7 +617,8 @@ impl TaskInner {
preempt_disable_count: AtomicUsize::new(0),
exit_code: AtomicI32::new(0),
wait_for_exit: WaitQueue::new(),
kstack: SpinNoIrq::new(Arc::new(None)),
stack_map_addr: SpinNoIrq::new(idle_kstack.end()),
kstack: SpinNoIrq::new(Arc::new(Some(idle_kstack))),
ctx: UnsafeCell::new(TaskContext::new()),
#[cfg(feature = "tls")]
tls: TlsArea::alloc(),
Expand All @@ -633,7 +642,7 @@ impl TaskInner {
debug!("new idle task: {}", t.id_name());
t.ctx
.get_mut()
.init(task_entry as usize, idle_kstack.top(), tls);
.init(task_entry as usize, idle_kstack_top, tls);

let task_ref = Arc::new(AxTask::new(t));

Expand Down

0 comments on commit 6bc5b32

Please sign in to comment.