From 7380ca72cf065e1950170b18f31fbc69f1458b9d Mon Sep 17 00:00:00 2001 From: WuZheng Date: Mon, 11 Nov 2024 16:50:19 +0800 Subject: [PATCH 1/2] fix wrong aligned_size in .percpu and deadlock problem in fork_task(). --- api/ruxos_posix_api/src/imp/pthread/mod.rs | 12 ++++++++ api/ruxos_posix_api/src/imp/task.rs | 19 +++++++++++- api/ruxos_posix_api/src/lib.rs | 2 +- crates/percpu/src/imp.rs | 5 ++-- modules/ruxtask/src/api.rs | 12 ++++++-- modules/ruxtask/src/task.rs | 35 +++++++++++++++------- ulib/ruxmusl/src/aarch64/mod.rs | 3 ++ ulib/ruxmusl/src/aarch64/syscall_id.rs | 1 + 8 files changed, 72 insertions(+), 17 deletions(-) diff --git a/api/ruxos_posix_api/src/imp/pthread/mod.rs b/api/ruxos_posix_api/src/imp/pthread/mod.rs index a9e7d1374..759512cf9 100644 --- a/api/ruxos_posix_api/src/imp/pthread/mod.rs +++ b/api/ruxos_posix_api/src/imp/pthread/mod.rs @@ -226,6 +226,18 @@ pub fn sys_pthread_exit(retval: *mut c_void) -> ! { Pthread::exit_current(retval); } +/// Exits the current thread. The value `retval` will be returned to the joiner. +pub fn sys_exit_group(status: c_int) -> ! { + error!("sys_exit_group <= {:#?}", status); + + // TODO: exit all threads, send signal to all threads + + #[cfg(feature = "multitask")] + ruxtask::exit(status); + #[cfg(not(feature = "multitask"))] + ruxhal::misc::terminate(); +} + /// Waits for the given thread to exit, and stores the return value in `retval`. pub unsafe fn sys_pthread_join(thread: ctypes::pthread_t, retval: *mut *mut c_void) -> c_int { debug!("sys_pthread_join <= {:#x}", retval as usize); diff --git a/api/ruxos_posix_api/src/imp/task.rs b/api/ruxos_posix_api/src/imp/task.rs index eb93268f1..b4227b8a1 100644 --- a/api/ruxos_posix_api/src/imp/task.rs +++ b/api/ruxos_posix_api/src/imp/task.rs @@ -63,7 +63,7 @@ pub fn sys_wait4( ) -> c_int { const WNOHANG: c_int = 0x00000001; - error!( + debug!( "sys_wait4 <= pid: {}, wstatus: {:?}, options: {}, rusage: {:?}", pid, wstatus, options, rusage ); @@ -73,6 +73,12 @@ pub fn sys_wait4( let mut process_map = PROCESS_MAP.lock(); if let Some(task) = process_map.get(&(pid as u64)) { if task.state() == ruxtask::task::TaskState::Exited { + unsafe { + // lower 8 bits of exit_code is the signal number, while upper 8 bits of exit_code is the exit status + // according to "bits/waitstatus.h" in glibc source code. + // TODO: add signal number to wstatus + wstatus.write(task.exit_code()<<8); + } process_map.remove(&(pid as u64)); return pid; } else if options & WNOHANG != 0 { @@ -98,6 +104,13 @@ pub fn sys_wait4( if parent_pid == ruxtask::current().id().as_u64() { if task.state() == ruxtask::task::TaskState::Exited { // add to to_remove list + let task_ref = process_map.get(child_pid).unwrap(); + unsafe { + // lower 8 bits of exit_code is the signal number, while upper 8 bits of exit_code is the exit status + // according to "bits/waitstatus.h" in glibc source code. + // TODO: add signal number to wstatus + wstatus.write(task.exit_code()<<8); + } let _ = to_remove.insert(*child_pid); break; } @@ -108,6 +121,10 @@ pub fn sys_wait4( } // drop lock before yielding to other tasks drop(process_map); + // check if the condition is meet + if !to_remove.is_none() { + break; + } // for single-cpu system, we must yield to other tasks instead of dead-looping here. yield_now(); } diff --git a/api/ruxos_posix_api/src/lib.rs b/api/ruxos_posix_api/src/lib.rs index 51c44653f..699eae360 100644 --- a/api/ruxos_posix_api/src/lib.rs +++ b/api/ruxos_posix_api/src/lib.rs @@ -117,7 +117,7 @@ pub use imp::pthread::sys_clone; #[cfg(all(feature = "multitask", feature = "musl"))] pub use imp::pthread::sys_set_tid_address; #[cfg(feature = "multitask")] -pub use imp::pthread::{sys_pthread_create, sys_pthread_exit, sys_pthread_join, sys_pthread_self}; +pub use imp::pthread::{sys_pthread_create, sys_pthread_exit, sys_exit_group, sys_pthread_join, sys_pthread_self}; #[cfg(feature = "fs")] pub use imp::execve::sys_execve; diff --git a/crates/percpu/src/imp.rs b/crates/percpu/src/imp.rs index aa7aeed99..f63c0b3dc 100644 --- a/crates/percpu/src/imp.rs +++ b/crates/percpu/src/imp.rs @@ -8,8 +8,9 @@ */ const fn align_up(val: usize) -> usize { - const PAGE_SIZE: usize = 0x1000; - (val + PAGE_SIZE - 1) & !(PAGE_SIZE - 1) + // should be the same as align_size in percpu. + const ALIGN_SIZE: usize = 64; + (val + ALIGN_SIZE - 1) & !(ALIGN_SIZE - 1) } #[cfg(not(target_os = "none"))] diff --git a/modules/ruxtask/src/api.rs b/modules/ruxtask/src/api.rs index edcd42f99..2ae4d1d4b 100644 --- a/modules/ruxtask/src/api.rs +++ b/modules/ruxtask/src/api.rs @@ -132,19 +132,25 @@ where } pub fn fork_task() -> Option { - let current_process = current(); - let current_id = current_process.id().as_u64(); + let current_id = current().id().as_u64(); let children_process = TaskInner::fork(); // Judge whether the parent process is blocked, if yes, add it to the blocking queue of the child process if current().id().as_u64() == current_id { RUN_QUEUE.lock().add_task(children_process.clone()); + return Some(children_process); } - + + unsafe {RUN_QUEUE.force_unlock(); } + // should not drop the children_process here, because it will be taken in the parent process + // and dropped in the parent process let _ = ManuallyDrop::new(children_process); + #[cfg(feature = "irq")] + ruxhal::arch::enable_irqs(); + return None; } diff --git a/modules/ruxtask/src/task.rs b/modules/ruxtask/src/task.rs index 4099dbe61..d84b72c89 100644 --- a/modules/ruxtask/src/task.rs +++ b/modules/ruxtask/src/task.rs @@ -163,6 +163,11 @@ impl TaskInner { None } + /// Get pointer for process task + pub fn exit_code(&self) -> i32 { + self.exit_code.load(Ordering::Acquire) + } + /// Get process task pub fn process_task(&self) -> Arc { if let Some(process_task) = self.process_task.upgrade() { @@ -581,16 +586,19 @@ impl TaskInner { } pub fn new_idle(name: String) -> AxTaskRef { + const IDLE_STACK_SIZE: usize = 4096; let bindings = PROCESS_MAP.lock(); let (&_parent_id, &ref task_ref) = bindings.first_key_value().unwrap(); - let t = Self { + let idle_kstack = TaskStack::alloc(align_up_4k(IDLE_STACK_SIZE)); + + let mut t = Self { parent_process: Some(Arc::downgrade(task_ref)), process_task: task_ref.process_task.clone(), id: TaskId::new(), name, is_idle: true, is_init: false, - entry: None, + entry: Some(Box::into_raw(Box::new(|| crate::run_idle()))), state: AtomicU8::new(TaskState::Ready as u8), in_wait_queue: AtomicBool::new(false), #[cfg(feature = "irq")] @@ -617,7 +625,21 @@ impl TaskInner { mm: task_ref.mm.clone(), }; - Arc::new(AxTask::new(t)) + #[cfg(feature = "tls")] + let tls = VirtAddr::from(t.tls.tls_ptr() as usize); + #[cfg(not(feature = "tls"))] + let tls = VirtAddr::from(0); + + debug!("new idle task: {}", t.id_name()); + t.ctx.get_mut().init( + task_entry as usize, + idle_kstack.top(), + tls, + ); + + let task_ref = Arc::new(AxTask::new(t)); + + task_ref } /// Get task state @@ -771,12 +793,6 @@ impl fmt::Debug for TaskInner { } } -impl Drop for TaskInner { - fn drop(&mut self) { - error!("task drop: {}", self.id_name()); - } -} - #[derive(Debug)] pub struct TaskStack { ptr: NonNull, @@ -786,7 +802,6 @@ pub struct TaskStack { impl TaskStack { pub fn alloc(size: usize) -> Self { let layout = Layout::from_size_align(size, 8).unwrap(); - debug!("taskStack::layout = {:?}", layout); Self { ptr: NonNull::new(unsafe { alloc::alloc::alloc(layout) }).unwrap(), layout, diff --git a/ulib/ruxmusl/src/aarch64/mod.rs b/ulib/ruxmusl/src/aarch64/mod.rs index c7a15f889..fb8f6f17b 100644 --- a/ulib/ruxmusl/src/aarch64/mod.rs +++ b/ulib/ruxmusl/src/aarch64/mod.rs @@ -201,6 +201,9 @@ pub fn syscall(syscall_id: SyscallId, args: [usize; 6]) -> isize { SyscallId::EXIT => { ruxos_posix_api::sys_pthread_exit(args[0] as *mut core::ffi::c_void) as _ } + SyscallId::EXIT_GROUP => { + ruxos_posix_api::sys_exit_group(args[0] as c_int) + } #[cfg(feature = "multitask")] SyscallId::SET_TID_ADDRESS => ruxos_posix_api::sys_set_tid_address(args[0]) as _, #[cfg(feature = "multitask")] diff --git a/ulib/ruxmusl/src/aarch64/syscall_id.rs b/ulib/ruxmusl/src/aarch64/syscall_id.rs index c79257b66..c7faeb89d 100644 --- a/ulib/ruxmusl/src/aarch64/syscall_id.rs +++ b/ulib/ruxmusl/src/aarch64/syscall_id.rs @@ -73,6 +73,7 @@ pub enum SyscallId { FDATASYNC = 83, CAP_GET = 90, EXIT = 93, + EXIT_GROUP = 94, #[cfg(feature = "multitask")] SET_TID_ADDRESS = 96, #[cfg(feature = "multitask")] From ee23e4def16a5643a0dff83358eaf189801dac53 Mon Sep 17 00:00:00 2001 From: WuZheng Date: Thu, 14 Nov 2024 17:19:21 +0800 Subject: [PATCH 2/2] fix bugs for pipe's write end closure and exit group --- api/ruxos_posix_api/src/imp/pipe.rs | 12 +++++++++--- api/ruxos_posix_api/src/imp/pthread/mod.rs | 9 ++++++--- api/ruxos_posix_api/src/imp/task.rs | 3 +-- modules/ruxtask/src/fs.rs | 10 ++++++++++ 4 files changed, 26 insertions(+), 8 deletions(-) diff --git a/api/ruxos_posix_api/src/imp/pipe.rs b/api/ruxos_posix_api/src/imp/pipe.rs index 8b61a3f1a..6fb29a04c 100644 --- a/api/ruxos_posix_api/src/imp/pipe.rs +++ b/api/ruxos_posix_api/src/imp/pipe.rs @@ -7,7 +7,7 @@ * See the Mulan PSL v2 for more details. */ -use alloc::sync::Arc; +use alloc::sync::{Weak, Arc}; use core::ffi::c_int; use axerrno::{LinuxError, LinuxResult}; @@ -87,6 +87,8 @@ impl PipeRingBuffer { pub struct Pipe { readable: bool, buffer: Arc>, + // to find the write end when the read end is closed + _write_end_closed: Option>>, } impl Pipe { @@ -95,10 +97,12 @@ impl Pipe { let read_end = Pipe { readable: true, buffer: buffer.clone(), + _write_end_closed: None, }; let write_end = Pipe { readable: false, - buffer, + buffer: buffer.clone(), + _write_end_closed: Some(Arc::downgrade(&buffer)), }; (read_end, write_end) } @@ -112,7 +116,9 @@ impl Pipe { } pub fn write_end_close(&self) -> bool { - Arc::strong_count(&self.buffer) == 1 + let write_end_count = Arc::weak_count(&self.buffer); + // error!("Pipe::write_end_close <= buffer: {:#?} {:#?}", write_end_count, Arc::as_ptr(&self.buffer)); + write_end_count == 0 } } diff --git a/api/ruxos_posix_api/src/imp/pthread/mod.rs b/api/ruxos_posix_api/src/imp/pthread/mod.rs index 759512cf9..405d5b13a 100644 --- a/api/ruxos_posix_api/src/imp/pthread/mod.rs +++ b/api/ruxos_posix_api/src/imp/pthread/mod.rs @@ -12,7 +12,7 @@ use core::cell::UnsafeCell; use core::ffi::{c_int, c_void}; use axerrno::{LinuxError, LinuxResult}; -use ruxtask::AxTaskRef; +use ruxtask::{current, AxTaskRef}; use spin::RwLock; use crate::ctypes; @@ -228,9 +228,12 @@ pub fn sys_pthread_exit(retval: *mut c_void) -> ! { /// Exits the current thread. The value `retval` will be returned to the joiner. pub fn sys_exit_group(status: c_int) -> ! { - error!("sys_exit_group <= {:#?}", status); + debug!("sys_exit_group <= status: {:#?}", status); // TODO: exit all threads, send signal to all threads + + // drop all file opened by current task + current().fs.lock().as_mut().unwrap().close_all_files(); #[cfg(feature = "multitask")] ruxtask::exit(status); @@ -322,7 +325,7 @@ pub unsafe fn sys_clone( TID_TO_PTHREAD.write().insert(tid, ForceSendSync(ptr)); 0 }; - warn!("will sys_clone <= pid: {}", pid); + debug!("will sys_clone <= pid: {}", pid); return Ok(pid); } else { debug!("ONLY support CLONE_THREAD and SIGCHLD"); diff --git a/api/ruxos_posix_api/src/imp/task.rs b/api/ruxos_posix_api/src/imp/task.rs index b4227b8a1..17d365c83 100644 --- a/api/ruxos_posix_api/src/imp/task.rs +++ b/api/ruxos_posix_api/src/imp/task.rs @@ -54,7 +54,7 @@ pub fn sys_getppid() -> c_int { /// Wait for a child process to exit and return its status. /// -/// TOSO, wstatus, options, and rusage are not implemented yet. +/// TODO: part of options, and rusage are not implemented yet. pub fn sys_wait4( pid: c_int, wstatus: *mut c_int, @@ -104,7 +104,6 @@ pub fn sys_wait4( if parent_pid == ruxtask::current().id().as_u64() { if task.state() == ruxtask::task::TaskState::Exited { // add to to_remove list - let task_ref = process_map.get(child_pid).unwrap(); unsafe { // lower 8 bits of exit_code is the signal number, while upper 8 bits of exit_code is the exit status // according to "bits/waitstatus.h" in glibc source code. diff --git a/modules/ruxtask/src/fs.rs b/modules/ruxtask/src/fs.rs index 75d71a332..b603d237d 100644 --- a/modules/ruxtask/src/fs.rs +++ b/modules/ruxtask/src/fs.rs @@ -216,6 +216,16 @@ pub struct FileSystem { pub root_dir: Arc, } +impl FileSystem { + pub fn close_all_files(&mut self) { + for fd in 0..self.fd_table.capacity() { + if let Some(_) = self.fd_table.get(fd) { + self.fd_table.remove(fd).unwrap(); + } + } + } +} + impl Clone for FileSystem { fn clone(&self) -> Self { let mut new_fd_table = FlattenObjects::new();