From c925368d6e81e74e879c477afcd2dd299b4bf144 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sat, 25 Nov 2023 16:34:16 -0800 Subject: [PATCH 01/23] o General code cleanup --- fs/dev.h | 2 +- fs/dyndev.c | 45 ++------------------------------------------- fs/tty.c | 4 ++-- kernel/calls.c | 2 +- kernel/futex.c | 2 +- kernel/log.c | 4 ++-- kernel/task.c | 2 +- kernel/time.c | 2 +- 8 files changed, 11 insertions(+), 52 deletions(-) diff --git a/fs/dev.h b/fs/dev.h index 7b2ca6178f..79246f43ea 100644 --- a/fs/dev.h +++ b/fs/dev.h @@ -34,7 +34,7 @@ static inline dev_t_ dev_fake_from_real(dev_t dev) { #define DEV_CHAR 1 struct dev_ops { - intptr_t (*open)(int major, int minor, struct fd *fd); + int (*open)(int major, int minor, struct fd *fd); struct fd_ops fd; }; diff --git a/fs/dyndev.c b/fs/dyndev.c index b540ff66bc..dd434ebee1 100644 --- a/fs/dyndev.c +++ b/fs/dyndev.c @@ -50,7 +50,7 @@ int dyn_dev_register(struct dev_ops *ops, int type, int major, int minor) { return 0; } -static intptr_t dyn_open(int type, int major, int minor, struct fd *fd) { +static int dyn_open(int type, int major, int minor, struct fd *fd) { assert((type == DEV_CHAR) || (type == DEV_BLOCK)); assert(major == DYN_DEV_MAJOR || major == DEV_RTC_MAJOR); // mkemkemke // it's safe to access devs without locking (read-only) @@ -73,52 +73,11 @@ static intptr_t dyn_open(int type, int major, int minor, struct fd *fd) { return 0; } -static intptr_t dyn_open_char(int major, int minor, struct fd *fd) { +static int dyn_open_char(int major, int minor, struct fd *fd) { return dyn_open(DEV_CHAR, major, minor, fd); } -static intptr_t rtc_open(int major, int minor, struct fd *fd) { - //return &(intptr_t)(major, minor, fd); - return (intptr_t)fd; -} - -struct rtc_time { - int tm_sec; /* seconds */ - int tm_min; /* minutes */ - int tm_hour; /* hours */ - int tm_mday; /* day of the month */ - int tm_mon; /* month */ - int tm_year; /* year */ -}; - -intptr_t rtc_dev(void *buf, size_t count) { - if (count < sizeof(struct rtc_time)) { - errno = EFAULT; - return -1; - } - - time_t now; - struct tm *tm_now; - struct rtc_time emulatedRTC; - - time(&now); - tm_now = localtime(&now); - - emulatedRTC.tm_sec = tm_now->tm_sec; - emulatedRTC.tm_min = tm_now->tm_min; - emulatedRTC.tm_hour = tm_now->tm_hour; - emulatedRTC.tm_mday = tm_now->tm_mday; - emulatedRTC.tm_mon = tm_now->tm_mon; - emulatedRTC.tm_year = tm_now->tm_year + 1900; - - memcpy(buf, &emulatedRTC, sizeof(emulatedRTC)); - return sizeof(emulatedRTC); -} - struct dev_ops dyn_dev_char = { .open = dyn_open_char, }; -struct dev_ops rtc_dev_char = { - .open = rtc_dev, -}; diff --git a/fs/tty.c b/fs/tty.c index 211f759f77..8be108115a 100644 --- a/fs/tty.c +++ b/fs/tty.c @@ -161,7 +161,7 @@ int tty_open(struct tty *tty, struct fd *fd) { return 0; } -static intptr_t tty_device_open(int major, int minor, struct fd *fd) { +static int tty_device_open(int major, int minor, struct fd *fd) { struct tty *tty; if (major == TTY_ALTERNATE_MAJOR) { if (minor == DEV_TTY_MINOR) { @@ -189,7 +189,7 @@ static intptr_t tty_device_open(int major, int minor, struct fd *fd) { assert(driver != NULL); tty = tty_get(driver, major, minor); if (IS_ERR(tty)) - return PTR_ERR(tty); + return (int)PTR_ERR(tty); } if (tty->driver->ops->open) { diff --git a/kernel/calls.c b/kernel/calls.c index 02999adcfd..497067f525 100644 --- a/kernel/calls.c +++ b/kernel/calls.c @@ -380,7 +380,7 @@ void handle_illegal_instruction_interrupt(struct cpu_state *cpu) { deliver_signal(current, SIGILL_, info); } -void handle_timer_interrupt(struct cpu_state *cpu) { +void handle_timer_interrupt(__attribute__((unused)) struct cpu_state *cpu) { // For now we just return. return; } diff --git a/kernel/futex.c b/kernel/futex.c index f915a65eb3..f8d39ea06a 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -200,7 +200,7 @@ static int futex_cmp_requeue(addr_t uaddr1, dword_t val, addr_t uaddr2, dword_t err = _EAGAIN; } else { struct futex_wait *wait, *tmp_wait; - int requeued = 0; + dword_t requeued = 0; list_for_each_entry_safe(&futex1->queue, wait, tmp_wait, queue) { if (requeued >= val2) { break; diff --git a/kernel/log.c b/kernel/log.c index 8e16c076a7..9c730f21f2 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -85,7 +85,7 @@ static size_t syslog_read(addr_t buf_addr, size_t len, int flags) { } static size_t do_syslog(int type, addr_t buf_addr, int_t len) { - size_t res; + int res; switch (type) { case SYSLOG_ACTION_READ_: return syslog_read(buf_addr, len, 0); @@ -93,7 +93,7 @@ static size_t do_syslog(int type, addr_t buf_addr, int_t len) { return syslog_read(buf_addr, len, FIFO_LAST | FIFO_PEEK); case SYSLOG_ACTION_READ_CLEAR_: - res = syslog_read(buf_addr, len, FIFO_LAST | FIFO_PEEK); + res = (int)syslog_read(buf_addr, len, FIFO_LAST | FIFO_PEEK); if (res < 0) return res; fallthrough; diff --git a/kernel/task.c b/kernel/task.c index cd61d90e32..8315ed10ef 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -283,7 +283,7 @@ void update_thread_name(void) { result = snprintf(name, sizeof(name) - 1, "%.7s-%d", current->comm, current->pid); // Check if the output was truncated - if (result >= sizeof(name)) { + if (result >= (int)sizeof(name)) { // Handle truncation (e.g., by logging, adjusting the name format, etc.) // For this example, we just log a warning printk("WARNING: Thread name truncated in update_thread_name(%s).\n", name); diff --git a/kernel/time.c b/kernel/time.c index 511c4df99d..f78af239e2 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -48,7 +48,7 @@ static struct itimerspec_ timer_spec_from_real(struct timer_spec spec) { #include #include -dword_t sys_clock_nanosleep_time64(int clock_id, int flags, dword_t req_val, dword_t rem_val) { +dword_t sys_clock_nanosleep_time64(__attribute__((unused)) int clock_id, __attribute__((unused)) int flags, dword_t req_val, dword_t rem_val) { struct timespec req; struct timespec rem; From 85bf26584aed90e9a13572e47fd541d2dd5fdeba Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 26 Nov 2023 12:19:54 -0800 Subject: [PATCH 02/23] Code cleanup, stability fix (I believe) --- app/AppDelegate.m | 4 + app/PasteboardDevice.m | 2 +- emu/memory.c | 2 +- fs/adhoc.c | 7 +- fs/proc/pid.c | 2 + iSH-AOK.xcodeproj/project.pbxproj | 24 +-- .../xcshareddata/xcschemes/iSH.xcscheme | 1 + kernel/calls.c | 2 +- kernel/exec.c | 6 +- kernel/exit.c | 92 ++---------- kernel/fork.c | 8 +- kernel/log.c | 16 +- kernel/signal.c | 2 +- kernel/task.c | 9 +- kernel/task.h | 2 +- util/sync.c | 22 ++- util/sync.h | 140 +++++++++++------- 17 files changed, 168 insertions(+), 173 deletions(-) diff --git a/app/AppDelegate.m b/app/AppDelegate.m index 0ee63db801..e76eac8214 100644 --- a/app/AppDelegate.m +++ b/app/AppDelegate.m @@ -44,14 +44,17 @@ @interface AppDelegate () static void ios_handle_exit(struct task *task, int code) { // we are interested in init and in children of init // this is called with pids_lock as an implementation side effect, please do not cite as an example of good API design + lock(&task->general_lock, 0); complex_lockt(&pids_lock, 0, __FILE__, __LINE__); if(task->pid > MAX_PID) {// Corruption printk("ERROR: Insane PID in ios_handle_exit(%d)\n", task->pid); unlock(&pids_lock); + unlock(&task->general_lock); return; } if (task->parent != NULL && task->parent->parent != NULL) { unlock(&pids_lock); + unlock(&task->general_lock); return; } // pid should be saved now since task would be freed @@ -62,6 +65,7 @@ static void ios_handle_exit(struct task *task, int code) { // nanosleep(&lock_pause, NULL); // } unlock(&pids_lock); + unlock(&task->general_lock); dispatch_async(dispatch_get_main_queue(), ^{ [[NSNotificationCenter defaultCenter] postNotificationName:ProcessExitedNotification object:nil diff --git a/app/PasteboardDevice.m b/app/PasteboardDevice.m index 54f19b4a23..d6bfb6c1c9 100644 --- a/app/PasteboardDevice.m +++ b/app/PasteboardDevice.m @@ -223,7 +223,7 @@ static int clipboard_close(clip_fd *fd) { return 0; } -static intptr_t clipboard_open(int major, int minor, clip_fd *fd) { +static int clipboard_open(int major, int minor, clip_fd *fd) { // Zero fd_priv data memset(&fd_priv(fd), 0, sizeof(fd_priv(fd))); diff --git a/emu/memory.c b/emu/memory.c index 91a923ed14..7c4644df64 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -401,7 +401,7 @@ int mem_segv_reason(struct mem *mem, addr_t addr) { } size_t real_page_size; -__attribute__((constructor)) static void get_real_page_size() { +__attribute__((constructor)) static void get_real_page_size(void) { real_page_size = sysconf(_SC_PAGESIZE); } diff --git a/fs/adhoc.c b/fs/adhoc.c index a56a3bc15d..b2eef5a36e 100644 --- a/fs/adhoc.c +++ b/fs/adhoc.c @@ -40,11 +40,14 @@ static int adhoc_fsetattr(struct fd *fd, struct attr attr) { } static int adhoc_getpath(struct fd *fd, char *buf) { + // Need to specify max path size const char *type = "unknown"; // TODO allow this to be customized + size_t buf_size = 4096; // A size that should be sufficient for the formatted string + if (fd->stat.inode == 0) - sprintf(buf, "anon_inode:[%s]", type); + snprintf(buf, buf_size, "anon_inode:[%s]", type); else - sprintf(buf, "%s:[%lu]", type, (unsigned long) fd->stat.inode); + snprintf(buf, buf_size, "%s:[%lu]", type, (unsigned long) fd->stat.inode); return 0; } diff --git a/fs/proc/pid.c b/fs/proc/pid.c index aaaecadf95..76583565dd 100644 --- a/fs/proc/pid.c +++ b/fs/proc/pid.c @@ -36,6 +36,8 @@ static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) { return _ESRCH; ////modify_critical_region_counter(task, 1, __FILE__, __LINE__); + if(!strcmp(task->general_lock.lname, "task_creat_gen")) // Work around. Sometimes the general lock is locked when it shouldn't be + unlock(&task->general_lock); lock(&task->general_lock, 0); lock(&task->group->lock, 0); // lock(&task->sighand->lock); //mkemke. Evil, but I'm tired of trying to track down why this is getting munged for now. diff --git a/iSH-AOK.xcodeproj/project.pbxproj b/iSH-AOK.xcodeproj/project.pbxproj index 6067b2d31d..5e5b12b36e 100644 --- a/iSH-AOK.xcodeproj/project.pbxproj +++ b/iSH-AOK.xcodeproj/project.pbxproj @@ -2555,7 +2555,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2581,7 +2581,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2616,7 +2616,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2640,7 +2640,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2665,7 +2665,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2776,7 +2776,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2794,7 +2794,7 @@ CODE_SIGN_ENTITLEMENTS = iSHFileProviderRelease.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2897,7 +2897,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2912,7 +2912,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2940,7 +2940,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2963,7 +2963,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2988,7 +2988,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 502; + CURRENT_PROJECT_VERSION = 503; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; diff --git a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme index 4d398b54bc..961f16429a 100644 --- a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme +++ b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme @@ -91,6 +91,7 @@ buildConfiguration = "Debug-ApplePleaseFixFB19282108" selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + enableAddressSanitizer = "YES" enableASanStackUseAfterReturn = "YES" enableUBSanitizer = "YES" launchStyle = "0" diff --git a/kernel/calls.c b/kernel/calls.c index 497067f525..a70f2b9207 100644 --- a/kernel/calls.c +++ b/kernel/calls.c @@ -357,7 +357,7 @@ void handle_page_fault_interrupt(struct cpu_state *cpu) { .code = mem_segv_reason(current->mem, cpu->segfault_addr), .fault.addr = cpu->segfault_addr, }; - current->zombie = true; + //current->zombie = true; dump_stack(8); deliver_signal(current, SIGSEGV_, info); } diff --git a/kernel/exec.c b/kernel/exec.c index 724108ad08..8d3268f0d6 100644 --- a/kernel/exec.c +++ b/kernel/exec.c @@ -477,7 +477,7 @@ static inline int user_memset(addr_t start, byte_t val, dword_t len) { } static int format_exec(struct fd *fd, const char *file, struct exec_args argv, struct exec_args envp) { - int err = elf_exec(fd, file, argv, envp); + int err = (int)elf_exec(fd, file, argv, envp); if (err != _ENOEXEC) return err; // other formats would go here @@ -557,7 +557,7 @@ static int shebang_exec(struct fd *fd, const char *file, struct exec_args argv, struct fd *interpreter_fd = generic_open(interpreter, O_RDONLY_, 0); if (IS_ERR(interpreter_fd)) - return PTR_ERR(interpreter_fd); + return (int)PTR_ERR(interpreter_fd); int err = format_exec(interpreter_fd, interpreter, new_argv, envp); fd_close(interpreter_fd); return err; @@ -566,7 +566,7 @@ static int shebang_exec(struct fd *fd, const char *file, struct exec_args argv, int __do_execve(const char *file, struct exec_args argv, struct exec_args envp) { struct fd *fd = generic_open(file, O_RDONLY, 0); if (IS_ERR(fd)) - return PTR_ERR(fd); + return (int)PTR_ERR(fd); struct statbuf stat; int err = fd->mount->fs->fstat(fd, &stat); diff --git a/kernel/exit.c b/kernel/exit.c index bce82a74ab..0b3ca8401c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -60,6 +60,8 @@ noreturn void do_exit(int status) { current->exiting = true; + lock(¤t->general_lock, 0); + bool signal_pending = !!(current->pending & ~current->blocked); // has to happen before mm_release @@ -170,6 +172,7 @@ noreturn void do_exit(int status) { if (exit_tgroup(current)) { // notify parent that we died struct task *parent = leader->parent; + lock(&parent->general_lock, 0); if (parent == NULL) { // init died halt_system(); @@ -188,16 +191,23 @@ noreturn void do_exit(int status) { send_signal(parent, leader->exit_signal, info); } + if (exit_hook != NULL) exit_hook(current, status); + + unlock(&parent->general_lock); } modify_critical_region_counter(current, -1, __FILE__, __LINE__); vfork_notify(current); - if(current != leader) - task_destroy(current); + if(current != leader) { + task_destroy(current, 1); + } else { + unlock(¤t->general_lock); + } unlock(&pids_lock); + //atomic_l_unlockf(); EXIT:pthread_exit(NULL); @@ -223,14 +233,10 @@ noreturn void do_exit_group(int status) { modify_locks_held_count(current, tmpvar); // Reset to zero -mke } - //while((critical_region_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks - // nanosleep(&lock_pause, NULL); - // } modify_critical_region_counter(current, 1, __FILE__, __LINE__); list_for_each_entry(&group->threads, task, group_links) { task->exiting = true; deliver_signal(task, SIGKILL_, SIGINFO_NIL); - //printk("INFO: Killing %s(%d)\n", current->comm, current->pid); task->group->stopped = false; notify(&task->group->stopped_cond); } @@ -286,15 +292,7 @@ dword_t sys_exit_group(dword_t status) { static bool reap_if_zombie(struct task *task, struct siginfo_ *info_out, struct rusage_ *rusage_out, int options) { if (!task->zombie) return false; - bool signal_pending = !!(task->pending & ~task->blocked); - while(((signal_pending) || - (critical_region_count(task) > 1) || - (locks_held_count(task))) && - (task->pid > 10)) { - nanosleep(&lock_pause, NULL); - signal_pending = !!(task->pending & ~task->blocked); - } - complex_lockt(&task->group->lock, 0, __FILE__, __LINE__); + lock(&task->group->lock, 0); dword_t exit_code = task->exit_code; if (task->group->doing_group_exit) @@ -317,66 +315,16 @@ static bool reap_if_zombie(struct task *task, struct siginfo_ *info_out, struct return true; // tear down group - // lock(&pids_lock); //mkemkemke Doesn't work - //if(doEnableExtraLocking) //mke Doesn't work - // extra_lockf(task->pid); - - signal_pending = !!(task->pending & ~task->blocked); - while(((signal_pending) || - (critical_region_count(task) > 1) || - (locks_held_count(task))) && - (task->pid > 10)) { - nanosleep(&lock_pause, NULL); - signal_pending = !!(task->pending & ~task->blocked); - } cond_destroy(&task->group->child_exit); - - signal_pending = !!(task->pending & ~task->blocked); - while(((signal_pending) || - (critical_region_count(task) > 1) || - (locks_held_count(task))) && - (task->pid > 10)) { - nanosleep(&lock_pause, NULL); - signal_pending = !!(task->pending & ~task->blocked); - } task_leave_session(task); - - signal_pending = !!(task->pending & ~task->blocked); - while(((signal_pending) || - (critical_region_count(task) > 1) || - (locks_held_count(task))) && - (task->pid > 10)) { - nanosleep(&lock_pause, NULL); - signal_pending = !!(task->pending & ~task->blocked); - } list_remove(&task->group->pgroup); - - signal_pending = !!(task->pending & ~task->blocked); - while(((signal_pending) || - (critical_region_count(task) > 1) || - (locks_held_count(task))) && - (task->pid > 10)) { - nanosleep(&lock_pause, NULL); - signal_pending = !!(task->pending & ~task->blocked); - } free(task->group); - - signal_pending = !!(task->pending & ~task->blocked); - while(((signal_pending) || - (critical_region_count(task) > 1) || - (locks_held_count(task))) && - (task->pid > 10)) { - nanosleep(&lock_pause, NULL); - signal_pending = !!(task->pending & ~task->blocked); - } - // &pids_lock is locked already at this point - //complex_lockt(&pids_lock, 0, __FILE__, __LINE__); - task_destroy(task); - //unlock(&pids_lock); - + + task_destroy(task, 2); return true; } + static bool notify_if_stopped(struct task *task, struct siginfo_ *info_out) { complex_lockt(&task->group->lock, 0, __FILE__, __LINE__); bool stopped = task->group->stopped; @@ -391,13 +339,9 @@ static bool notify_if_stopped(struct task *task, struct siginfo_ *info_out) { static bool reap_if_needed(struct task *task, struct siginfo_ *info_out, struct rusage_ *rusage_out, int options) { assert(task_is_leader(task)); - //if(doEnableExtraLocking) - // pthread_mutex_lock(&extra_lock); if ((options & WUNTRACED_ && notify_if_stopped(task, info_out)) || (options & WEXITED_ && reap_if_zombie(task, info_out, rusage_out, options))) { info_out->sig = SIGCHLD_; - // if(doEnableExtraLocking) - // pthread_mutex_unlock(&extra_lock); return true; } lock(&task->ptrace.lock, 0); @@ -408,13 +352,9 @@ static bool reap_if_needed(struct task *task, struct siginfo_ *info_out, struct info_out->child.status = /* task->ptrace.trap_event << 16 |*/ task->ptrace.signal << 8 | 0x7f; task->ptrace.signal = 0; unlock(&task->ptrace.lock); - //if(doEnableExtraLocking) - // pthread_mutex_unlock(&extra_lock); return true; } unlock(&task->ptrace.lock); - //if(doEnableExtraLocking) - // pthread_mutex_unlock(&extra_lock); return false; } diff --git a/kernel/fork.c b/kernel/fork.c index bddf0e8617..c12a87ab60 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -72,7 +72,7 @@ static int copy_task(struct task *task, dword_t flags, addr_t stack, addr_t ptid } else { task->files = fdtable_copy(task->files); if (IS_ERR(task->files)) { - err = PTR_ERR(task->files); + err = (int)PTR_ERR(task->files); goto fail_free_mem; } } @@ -161,7 +161,7 @@ dword_t sys_clone(dword_t flags, addr_t stack, addr_t ptid, addr_t tls, addr_t c // FIXME: task_destroy doesn't free all aspects of the task, which // could cause leaks complex_lockt(&pids_lock, 0, __FILE__, __LINE__); - task_destroy(task); + task_destroy(task, 3); unlock(&pids_lock); return err; @@ -201,11 +201,11 @@ dword_t sys_clone(dword_t flags, addr_t stack, addr_t ptid, addr_t tls, addr_t c return pid; } -dword_t sys_fork() { +dword_t sys_fork(void) { return sys_clone(SIGCHLD_, 0, 0, 0, 0); } -dword_t sys_vfork() { +dword_t sys_vfork(void) { return sys_clone(CLONE_VFORK_ | CLONE_VM_ | SIGCHLD_, 0, 0, 0, 0); } diff --git a/kernel/log.c b/kernel/log.c index 9c730f21f2..f98c6b40a0 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -232,34 +232,34 @@ void die(const char *msg, ...) { // fun little utility function int current_pid(void) { - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //modify_critical_region_counter(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //modify_critical_region_counter(current, -1, __FILE__, __LINE__); return current->pid; } else { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } } - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } int current_uid(void) { - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //modify_critical_region_counter(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //modify_critical_region_counter(current, -1, __FILE__, __LINE__); return current->uid; } else { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } } - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } diff --git a/kernel/signal.c b/kernel/signal.c index 15bd85b303..11452401bb 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -82,7 +82,7 @@ static void deliver_signal_unlocked(struct task *task, int sig, struct siginfo_ lock(&task->waiting_cond_lock, 0); if (task->waiting_cond != NULL) { bool mine = false; - if (trylock(task->waiting_lock) == EBUSY) { + if (trylock(task->waiting_lock) == _EBUSY) { if (pthread_equal(task->waiting_lock->owner, pthread_self())) mine = true; if (!mine) { diff --git a/kernel/task.c b/kernel/task.c index 8315ed10ef..09a2d76975 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -163,8 +163,13 @@ bool should_wait(struct task *t) { return critical_region_count(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); } -void task_destroy(struct task *task) { - task->exiting = true; +void task_destroy(struct task *task, int caller) { + if(trylock(&task->general_lock) == (_EBUSY)) { // Get it if a lock does not exist + task->exiting = true; + lock(&task->general_lock, 0); + } + + printk("TD(%s:%d): Called by %d\n", task->comm, task->pid, caller); // We use a single loop to wait for the task to be ready to destroy. // This loop replaces all the similar while-loops in the original code. diff --git a/kernel/task.h b/kernel/task.h index faa95c214b..a4e8dc10fa 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -126,7 +126,7 @@ static inline void task_set_mm(struct task *task, struct mm *mm) { // Ends with an underscore because there's a mach function by the same name struct task *task_create_(struct task *parent); // Removes the process from the process table and frees it. Must be called with pids_lock. -void task_destroy(struct task *task); +void task_destroy(struct task *task, int caller); // misc void vfork_notify(struct task *task); diff --git a/util/sync.c b/util/sync.c index ee65aa9187..0510dfced5 100644 --- a/util/sync.c +++ b/util/sync.c @@ -35,27 +35,34 @@ static bool is_signal_pending(lock_t *lock) { void modify_critical_region_counter(struct task *task, int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // value Should only be -1 or 1. -mke - if(!doEnableExtraLocking) // If they want to fly by the seat of their pants... -mke + if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke return; - + } + if(task == NULL) { if(current != NULL) { task = current; } else { return; } - } else if(task->exiting) { // Don't mess with tasks that are exiting. -mke - return; } - if(task->pid > 9) // Bad things happen if this is enabled for low number tasks. For reasons I do not understand. -mke - return; + bool ilocked = false; + + if (trylocknl(&task->general_lock) != _EBUSY) { + ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. + } + + // if(task->pid > 9) // Bad things happen if this is enabled for low number tasks. For reasons I do not understand. -mke + // return; pthread_mutex_lock(&task->critical_region.lock); if(((task->critical_region.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke //if(!task->critical_region.count && (value < 0)) { // Prevent our unsigned value attempting to go negative. -mke printk("ERROR: Attempt to decrement critical_region count to be negative, ignoring(%s:%d) (%d - %d) (%s:%d)\n", task->comm, task->pid, task->critical_region.count, value, file, line); + if(ilocked == true) + unlock(&task->general_lock); return; } @@ -69,6 +76,9 @@ void modify_critical_region_counter(struct task *task, int value, __attribute__( task->critical_region.count = task->critical_region.count + value; pthread_mutex_unlock(&task->critical_region.lock); + + if(ilocked == true) + unlock(&task->general_lock); } void modify_critical_region_counter_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // sync.h can't know about the definition of task struct due to recursive include files. -mke diff --git a/util/sync.h b/util/sync.h index 729fc8af41..63827da6b7 100644 --- a/util/sync.h +++ b/util/sync.h @@ -82,16 +82,43 @@ static inline void lock_init(lock_t *lock, char lname[16]) { #define LOCK_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, 0} #endif -static inline void atomic_l_lockf(char lname[16], const char *file, int line) { // Make all locks atomic by wrapping them. -mke +static inline void unlock(lock_t *lock) { + //pid_t pid = current_pid(); + + /* if ((pthread_mutex_trylock(&lock->m) == 0) && (pid > 0)) { // Sometimes pid = -1 when it shouldn't be, ignore + printk("WARNING: Process with PID %d trying to unlock an already unlocked lock\n", pid); + //pthread_mutex_unlock(&lock->m); // unlock it again + //return; + } + if ((lock->pid != pid) && (pid > 0) && (lock->pid > 0)) { // Sometimes pid = -1 when it shouldn't be, ignore + printk("WARNING: Process with PID %d trying to unlock a lock owned by PID %d\n", pid, lock->pid); + //return; // Return early or handle the discrepancy in another manner if required + } + */ + lock->owner = zero_init(pthread_t); + pthread_mutex_unlock(&lock->m); + lock->pid = -1; // + lock->comm[0] = 0; + modify_locks_held_count_wrapper(-1); + +#if LOCK_DEBUG + assert(lock->debug.initialized); + assert(lock->debug.file && "Attempting to unlock an unlocked lock"); + lock->debug = (struct lock_debug) { .initialized = true }; +#endif + return; +} + +static inline void atomic_l_lockf(char lname[16], int skiplog, const char *file, int line) { // Make all locks atomic by wrapping them. -mke if(!doEnableExtraLocking) return; int res = 0; - modify_critical_region_counter_wrapper(1, file, line); + // modify_critical_region_counter_wrapper(1, file, line); if(atomic_l_lock.pid > 0) { if(current_pid() != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke res = pthread_mutex_lock(&atomic_l_lock.m); atomic_l_lock.pid = current_pid(); - } else { + } else if(!skiplog) { printk("WARNING: Odd attempt by process (%s:%d) to attain same locking lock twice. Ignoring\n", current_comm(), current_pid()); res = 0; } @@ -100,20 +127,20 @@ static inline void atomic_l_lockf(char lname[16], const char *file, int line) { strncpy((char *)&atomic_l_lock.comm, current_comm(), 16); strncpy((char *)&atomic_l_lock.lname, lname, 16); modify_locks_held_count_wrapper(1); - } else { + } else if (!skiplog) { printk("Error on locking lock (%s) Called from %s:%d\n", lname, file, line); } //STRACE("atomic_l_lockf(%d)\n", count); // This is too verbose most of the time - modify_critical_region_counter_wrapper(-1, file, line); + // modify_critical_region_counter_wrapper(-1, file, line); } static inline void atomic_l_unlockf(void) { if(!doEnableExtraLocking) return; int res = 0; - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + //modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); strncpy((char *)&atomic_l_lock.lname,"\0", 1); res = pthread_mutex_unlock(&atomic_l_lock.m); if(res) { @@ -123,7 +150,7 @@ static inline void atomic_l_unlockf(void) { } modify_locks_held_count_wrapper(-1); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + //modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); //STRACE("atomic_l_unlockf()\n"); // modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); } @@ -162,60 +189,38 @@ static inline void complex_lockt(lock_t *lock, int log_lock, __attribute__((unus } lock->owner = pthread_self(); - //lock->pid = current_pid(); - //lock->uid = current_uid(); - //strncpy(lock->comm, current_comm(), sizeof(lock->comm) - 1); - //lock->comm[sizeof(lock->comm) - 1] = '\0'; // Null-terminate just in case + lock->pid = current_pid(); + lock->uid = current_uid(); + strncpy(lock->comm, current_comm(), sizeof(lock->comm) - 1); + lock->comm[sizeof(lock->comm) - 1] = '\0'; // Null-terminate just in case } static inline void __lock(lock_t *lock, int log_lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { + if(!strcmp(lock->lname, "task_creat_gen")) // kluge. This means the lock is new, and SHOULD be unlocked + unlock(lock); + if(!log_lock) { modify_critical_region_counter_wrapper(1,__FILE__, __LINE__); pthread_mutex_lock(&lock->m); modify_locks_held_count_wrapper(1); lock->owner = pthread_self(); - //lock->pid = current_pid(); - //lock->uid = current_uid(); - //strncpy(lock->comm, current_comm(), 16); + lock->pid = current_pid(); + lock->uid = current_uid(); + strncpy(lock->comm, current_comm(), 16); modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); } else { pthread_mutex_lock(&lock->m); lock->owner = pthread_self(); - //lock->pid = current_pid(); - //lock->uid = current_uid(); - //strncpy(lock->comm, current_comm(), 16); + lock->pid = current_pid(); + lock->uid = current_uid(); + strncpy(lock->comm, current_comm(), 16); } return; } #define lock(lock, log_lock) __lock(lock, log_lock, __FILE__, __LINE__) -static inline void unlock(lock_t *lock) { - //pid_t pid = current_pid(); - - /* if ((pthread_mutex_trylock(&lock->m) == 0) && (pid > 0)) { // Sometimes pid = -1 when it shouldn't be, ignore - printk("WARNING: Process with PID %d trying to unlock an already unlocked lock\n", pid); - //pthread_mutex_unlock(&lock->m); // unlock it again - //return; - } - if ((lock->pid != pid) && (pid > 0) && (lock->pid > 0)) { // Sometimes pid = -1 when it shouldn't be, ignore - printk("WARNING: Process with PID %d trying to unlock a lock owned by PID %d\n", pid, lock->pid); - //return; // Return early or handle the discrepancy in another manner if required - } - */ - lock->owner = zero_init(pthread_t); - pthread_mutex_unlock(&lock->m); - lock->pid = -1; // - lock->comm[0] = 0; - modify_locks_held_count_wrapper(-1); - -#if LOCK_DEBUG - assert(lock->debug.initialized); - assert(lock->debug.file && "Attempting to unlock an unlocked lock"); - lock->debug = (struct lock_debug) { .initialized = true }; -#endif - return; -} + typedef struct { pthread_rwlock_t l; @@ -274,7 +279,7 @@ static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, } atomic_l_unlockf(); nanosleep(&lock_pause, NULL); - atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", __FILE__, __LINE__); + atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0, __FILE__, __LINE__); } modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); @@ -303,7 +308,7 @@ static inline void _read_unlock(wrlock_t *lock, __attribute__((unused)) const ch static inline void read_unlock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { if(lock->pid != current_pid() && (lock->pid != -1)) { - atomic_l_lockf("r_unlock\0", __FILE__, __LINE__); + atomic_l_lockf("r_unlock\0", 0, __FILE__, __LINE__); _read_unlock(lock, file, line); } else { // We can unlock our own lock without additional locking. -mke _read_unlock(lock, file, line); @@ -329,7 +334,7 @@ static inline void _write_unlock(wrlock_t *lock, __attribute__((unused)) const c } static inline void write_unlock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Wrap it. External calls lock, internal calls using _write_unlock() don't -mke - atomic_l_lockf("w_unlock\0", __FILE__, __LINE__); + atomic_l_lockf("w_unlock\0", 0, __FILE__, __LINE__); _write_unlock(lock, file, line); atomic_l_unlockf(); return; @@ -349,14 +354,14 @@ static inline void __write_lock(wrlock_t *lock, const char *file, int line) { // } static inline void _write_lock(wrlock_t *lock, const char *file, int line) { - atomic_l_lockf("_w_lock", __FILE__, __LINE__); + atomic_l_lockf("_w_lock", 0, __FILE__, __LINE__); __write_lock(lock, file, line); atomic_l_unlockf(); } static inline int trylockw(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { //modify_critical_region_counter_wrapper(1,__FILE__, __LINE__); - atomic_l_lockf("trylockw\0", __FILE__, __LINE__); + atomic_l_lockf("trylockw\0", 0, __FILE__, __LINE__); int status = pthread_rwlock_trywrlock(&lock->l); atomic_l_unlockf(); #if LOCK_DEBUG @@ -380,7 +385,7 @@ static inline int trylockw(wrlock_t *lock, __attribute__((unused)) const char *f static inline int trylock(lock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { //modify_critical_region_counter_wrapper(1,__FILE__, __LINE__); - atomic_l_lockf("trylock\0", __FILE__, __LINE__); + atomic_l_lockf("trylock\0", 0, __FILE__, __LINE__); int status = pthread_mutex_trylock(&lock->m); atomic_l_unlockf(); #if LOCK_DEBUG @@ -403,6 +408,31 @@ static inline int trylock(lock_t *lock, __attribute__((unused)) const char *file #define trylock(lock) trylock(lock, __FILE__, __LINE__) +static inline int trylocknl(lock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { + //Don't log, avoid recursion + atomic_l_lockf("trylock\0", 1, __FILE__, __LINE__); + int status = pthread_mutex_trylock(&lock->m); + atomic_l_unlockf(); +#if LOCK_DEBUG + if (!status) { + lock->debug.file = file; + lock->debug.line = line; + extern int current_pid(void); + lock->debug.pid = current_pid(); + } +#endif + if((!status) && (current_pid() > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke + modify_locks_held_count_wrapper(1); + + //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + lock->pid = current_pid(); + strncpy(lock->comm, current_comm(), 16); + } + return status; +} + +#define trylocknl(lock) trylocknl(lock, __FILE__, __LINE__) + // conditions, implemented using pthread conditions but hacked so you can also // be woken by a signal @@ -471,7 +501,7 @@ static inline void lock_destroy(wrlock_t *lock) { nanosleep(&lock_pause, NULL); } - atomic_l_lockf("l_destroy\0", __FILE__, __LINE__); + atomic_l_lockf("l_destroy\0", 0, __FILE__, __LINE__); _lock_destroy(lock); atomic_l_unlockf(); } @@ -506,7 +536,7 @@ static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char } static inline void read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Wrapper so that external calls lock, internal calls using _read_unlock() don't -mke - atomic_l_lockf("r_lock\0", __FILE__, __LINE__); + atomic_l_lockf("r_lock\0", 0, __FILE__, __LINE__); _read_lock(lock, file, line); atomic_l_unlockf(); } @@ -515,7 +545,7 @@ static inline void read_lock(wrlock_t *lock, __attribute__((unused)) const char static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); - atomic_l_lockf("rtw_lock\0", __FILE__, __LINE__); + atomic_l_lockf("rtw_lock\0", 0, __FILE__, __LINE__); _read_unlock(lock, __FILE__, __LINE__); __write_lock(lock, __FILE__, __LINE__); atomic_l_unlockf(); @@ -524,7 +554,7 @@ static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically sw static inline void write_to_read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Try to atomically swap a Write lock to a RO lock. -mke modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); - atomic_l_lockf("wtr_lock\0", __FILE__, __LINE__); + atomic_l_lockf("wtr_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, file, line); _read_lock(lock, file, line); atomic_l_unlockf(); @@ -533,7 +563,7 @@ static inline void write_to_read_lock(wrlock_t *lock, __attribute__((unused)) co static inline void write_unlock_and_destroy(wrlock_t *lock) { modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); - atomic_l_lockf("wuad_lock\0", __FILE__, __LINE__); + atomic_l_lockf("wuad_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, __FILE__, __LINE__); _lock_destroy(lock); atomic_l_unlockf(); @@ -541,7 +571,7 @@ static inline void write_unlock_and_destroy(wrlock_t *lock) { } static inline void read_unlock_and_destroy(wrlock_t *lock) { - atomic_l_lockf("ruad_lock", __FILE__, __LINE__); + atomic_l_lockf("ruad_lock", 0, __FILE__, __LINE__); if(trylockw(lock)) // It should be locked, but just in case. Likely masking underlying issue. -mke _read_unlock(lock, __FILE__, __LINE__); _lock_destroy(lock); From e4fd38ebf4c88c828b15f71b81ccbdb452aca8c5 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 26 Nov 2023 12:39:02 -0800 Subject: [PATCH 03/23] o More stability fixes --- app/AppDelegate.m | 6 +----- kernel/log.c | 18 ++++++++++-------- kernel/task.c | 7 ++++++- util/sync.c | 7 +++++-- 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/app/AppDelegate.m b/app/AppDelegate.m index e76eac8214..b50d68b054 100644 --- a/app/AppDelegate.m +++ b/app/AppDelegate.m @@ -59,11 +59,7 @@ static void ios_handle_exit(struct task *task, int code) { } // pid should be saved now since task would be freed pid_t pid = task->pid; - // if(pids_lock.pid == pid) - // unlock(&pids_lock); -// while((critical_region_count(task)) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks -// nanosleep(&lock_pause, NULL); -// } + unlock(&pids_lock); unlock(&task->general_lock); dispatch_async(dispatch_get_main_queue(), ^{ diff --git a/kernel/log.c b/kernel/log.c index f98c6b40a0..0b59397431 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -232,39 +232,40 @@ void die(const char *msg, ...) { // fun little utility function int current_pid(void) { - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_counter(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_counter(current, -1, __FILE__, __LINE__); return current->pid; } else { - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } } - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } int current_uid(void) { - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_counter(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_counter(current, -1, __FILE__, __LINE__); return current->uid; } else { - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } } - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_counter(current, -1, __FILE__, __LINE__); return -1; } char * current_comm(void) { static char comm[16]; + modify_critical_region_counter(current, 1, __FILE__, __LINE__); if(current != NULL) { if(strcmp(current->comm, "")) { strncpy(comm, current->comm, 16); @@ -277,6 +278,7 @@ char * current_comm(void) { return ""; } } + modify_critical_region_counter(current, -1, __FILE__, __LINE__); return ""; } diff --git a/kernel/task.c b/kernel/task.c index 09a2d76975..071cb2be2b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -197,8 +197,13 @@ void task_destroy(struct task *task, int caller) { unlock(&pids_lock); } +retry: // Free the task's resources. - free(task); + if (!critical_region_count(task)) { + free(task); + } else { + goto retry; + } } void run_at_boot(void) { // Stuff we run only once, at boot time. diff --git a/util/sync.c b/util/sync.c index 0510dfced5..18ea97203e 100644 --- a/util/sync.c +++ b/util/sync.c @@ -63,6 +63,9 @@ void modify_critical_region_counter(struct task *task, int value, __attribute__( printk("ERROR: Attempt to decrement critical_region count to be negative, ignoring(%s:%d) (%d - %d) (%s:%d)\n", task->comm, task->pid, task->critical_region.count, value, file, line); if(ilocked == true) unlock(&task->general_lock); + + pthread_mutex_unlock(&task->critical_region.lock); + return; } @@ -209,11 +212,11 @@ void sigusr1_handler(void) { // Because sometimes we can't #include "kernel/task.h" -mke unsigned critical_region_count(struct task *task) { unsigned tmp = 0; -// pthread_mutex_lock(task->critical_region.lock); // This would make more + pthread_mutex_lock(&task->critical_region.lock); // This would make more tmp = task->critical_region.count; if(tmp > 1000) // Not likely tmp = 0; - // pthread_mutex_unlock(task->critical_region.lock); + pthread_mutex_unlock(&task->critical_region.lock); return tmp; } From 06f22905178dc4f27d2aa2dfa6e366271c3dc035 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 26 Nov 2023 16:56:16 -0800 Subject: [PATCH 04/23] o Stability fixes o Fixed bug that caused tasks to be wrongly marked as in a zombie state sometimes --- app/AppDelegate.m | 3 +- .../xcshareddata/xcschemes/iSH.xcscheme | 28 ++++++++++++++++++- kernel/exit.c | 2 +- kernel/task.c | 2 +- util/sync.c | 17 +++-------- 5 files changed, 35 insertions(+), 17 deletions(-) diff --git a/app/AppDelegate.m b/app/AppDelegate.m index b50d68b054..dbb7e91909 100644 --- a/app/AppDelegate.m +++ b/app/AppDelegate.m @@ -49,7 +49,8 @@ static void ios_handle_exit(struct task *task, int code) { if(task->pid > MAX_PID) {// Corruption printk("ERROR: Insane PID in ios_handle_exit(%d)\n", task->pid); unlock(&pids_lock); - unlock(&task->general_lock); + // No reason to unlock the task, it has already been freed. :-( + //unlock(&task->general_lock); return; } if (task->parent != NULL && task->parent->parent != NULL) { diff --git a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme index 961f16429a..b7786dac42 100644 --- a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme +++ b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme @@ -91,7 +91,6 @@ buildConfiguration = "Debug-ApplePleaseFixFB19282108" selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" - enableAddressSanitizer = "YES" enableASanStackUseAfterReturn = "YES" enableUBSanitizer = "YES" launchStyle = "0" @@ -111,6 +110,33 @@ ReferencedContainer = "container:iSH-AOK.xcodeproj"> + + + + + + + + + + + + general_lock); } - modify_critical_region_counter(current, -1, __FILE__, __LINE__); vfork_notify(current); + modify_critical_region_counter(current, -1, __FILE__, __LINE__); if(current != leader) { task_destroy(current, 1); } else { diff --git a/kernel/task.c b/kernel/task.c index 071cb2be2b..749364b9f6 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -169,7 +169,7 @@ void task_destroy(struct task *task, int caller) { lock(&task->general_lock, 0); } - printk("TD(%s:%d): Called by %d\n", task->comm, task->pid, caller); + //printk("TD(%s:%d): Called by %d\n", task->comm, task->pid, caller); // We use a single loop to wait for the task to be ready to destroy. // This loop replaces all the similar while-loops in the original code. diff --git a/util/sync.c b/util/sync.c index 18ea97203e..efbc11fffe 100644 --- a/util/sync.c +++ b/util/sync.c @@ -53,13 +53,9 @@ void modify_critical_region_counter(struct task *task, int value, __attribute__( ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. } - // if(task->pid > 9) // Bad things happen if this is enabled for low number tasks. For reasons I do not understand. -mke - // return; - pthread_mutex_lock(&task->critical_region.lock); if(((task->critical_region.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke - //if(!task->critical_region.count && (value < 0)) { // Prevent our unsigned value attempting to go negative. -mke printk("ERROR: Attempt to decrement critical_region count to be negative, ignoring(%s:%d) (%d - %d) (%s:%d)\n", task->comm, task->pid, task->critical_region.count, value, file, line); if(ilocked == true) unlock(&task->general_lock); @@ -70,12 +66,6 @@ void modify_critical_region_counter(struct task *task, int value, __attribute__( } - /* if((strcmp(task->comm, "easter_egg") == 0) && ( !noprintk)) { // Extra logging for the some command - noprintk = 1; // Avoid recursive logging -mke - printk("INFO: MCRC(%d(%s):%s:%d:%d:%d)\n", task->pid, task->comm, file, line, value, task->critical_region.count + value); - noprintk = 0; - } */ - task->critical_region.count = task->critical_region.count + value; pthread_mutex_unlock(&task->critical_region.lock); @@ -84,7 +74,8 @@ void modify_critical_region_counter(struct task *task, int value, __attribute__( unlock(&task->general_lock); } -void modify_critical_region_counter_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // sync.h can't know about the definition of task struct due to recursive include files. -mke +void modify_critical_region_counter_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { + // sync.h can't know about the definition of task struct due to recursive include files. -mke if((current != NULL) && (doEnableExtraLocking)) modify_critical_region_counter(current, value, file, line); @@ -214,8 +205,8 @@ unsigned critical_region_count(struct task *task) { unsigned tmp = 0; pthread_mutex_lock(&task->critical_region.lock); // This would make more tmp = task->critical_region.count; - if(tmp > 1000) // Not likely - tmp = 0; + // if(tmp > 1000) // Not likely + // tmp = 0; pthread_mutex_unlock(&task->critical_region.lock); return tmp; From 73e9d22ba6caf527527dd9a0d38657b342408265 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 26 Nov 2023 17:46:11 -0800 Subject: [PATCH 05/23] o Rename mofify_critical_region_counter() to mofify_critical_region_count(), same for wrapper o Broke critical_region_count() in current_comm(), fix --- app/iOSFS.m | 4 +-- emu/memory.c | 34 ++++++++++++------------ emu/tlb.c | 26 +++++++++---------- fs/proc/pid.c | 6 ++--- fs/proc/root.c | 6 ++--- jit/jit.c | 14 +++++----- kernel/exec.c | 4 +-- kernel/exit.c | 14 +++++----- kernel/futex.c | 4 +-- kernel/log.c | 27 +++++++++++--------- kernel/mmap.c | 8 +++--- kernel/resource_locking.h | 3 +-- kernel/signal.c | 8 +++--- kernel/task.c | 4 +-- kernel/task.h | 4 +-- kernel/time.c | 4 +-- kernel/user.c | 18 ++++++------- util/fifo.c | 8 +++--- util/sync.c | 8 +++--- util/sync.h | 54 +++++++++++++++++++-------------------- 20 files changed, 129 insertions(+), 129 deletions(-) diff --git a/app/iOSFS.m b/app/iOSFS.m index ec030eb3bb..71aedc982b 100644 --- a/app/iOSFS.m +++ b/app/iOSFS.m @@ -239,7 +239,7 @@ static int combine_error(NSError *coordinatorError, int err) { __block NSError *error = nil; __block struct fd *fd; __block dispatch_semaphore_t file_opened = dispatch_semaphore_create(0); - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(1, __FILE__, __LINE__); dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void){ void (^operation)(NSURL *url) = ^(NSURL *url) { fd = realfs_open(mount, path_for_url_in_mount(mount, url, path), flags, mode); @@ -265,7 +265,7 @@ static int combine_error(NSError *coordinatorError, int err) { } [coordinator coordinateReadingItemAtURL:url options:options error:&error byAccessor:operation]; }); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); dispatch_semaphore_wait(file_opened, DISPATCH_TIME_FOREVER); diff --git a/emu/memory.c b/emu/memory.c index 7c4644df64..cdc0de3216 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -67,7 +67,7 @@ void mem_destroy(struct mem *mem) { free(mem->pgdir[i]); } - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); do { nanosleep(&lock_pause, NULL); @@ -79,7 +79,7 @@ void mem_destroy(struct mem *mem) { write_unlock_and_destroy(&mem->lock); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } @@ -97,34 +97,34 @@ static struct pt_entry *mem_pt_new(struct mem *mem, page_t page) { struct pt_entry *mem_pt(struct mem *mem, page_t page) { - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if (mem->pgdir[PGDIR_TOP(page)] != NULL) { // Check if defined. Likely still leaves a potential race condition as no locking currently. -MKE FIXME struct pt_entry *pgdir = mem->pgdir[PGDIR_TOP(page)]; if (pgdir == NULL) { - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return NULL; } struct pt_entry *entry = &pgdir[PGDIR_BOTTOM(page)]; if (entry->data == NULL) { - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return NULL; } - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return entry; } else { mem->pgdir[PGDIR_TOP(page)] = NULL; - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return NULL; } - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } static void mem_pt_del(struct mem *mem, page_t page) { - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { while(critical_region_count(current) > 4) { // mark @@ -132,17 +132,17 @@ static void mem_pt_del(struct mem *mem, page_t page) { } entry->data = NULL; } - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } void mem_next_page(struct mem *mem, page_t *page) { (*page)++; if (*page >= MEM_PAGES) return; - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); while (*page < MEM_PAGES && mem->pgdir[PGDIR_TOP(*page)] == NULL) *page = (*page - PGDIR_BOTTOM(*page)) + MEM_PGDIR_SIZE; - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } page_t pt_find_hole(struct mem *mem, pages_t size) { @@ -346,7 +346,7 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { if (type != MEM_WRITE_PTRACE && !(entry->flags & P_WRITE)) return NULL; - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if (type == MEM_WRITE_PTRACE) { // TODO: Is P_WRITE really correct? The page shouldn't be writable without ptrace. @@ -361,17 +361,17 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { if (entry->flags & P_COW) { lock(¤t->general_lock, 0); // prevent elf_exec from doing mm_release while we are in flight? -mke - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); read_to_write_lock(&mem->lock); void *copy = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); void *data = (char *) entry->data->data + entry->offset; - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); // copy/paste from above - modify_critical_region_counter(current, 1,__FILE__, __LINE__); + modify_critical_region_count(current, 1,__FILE__, __LINE__); //read_to_write_lock(&mem->lock); memcpy(copy, data, PAGE_SIZE); //mkemkemke Crashes here a lot when running both the go and parallel make test. 01 June 2022 - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW); unlock(¤t->general_lock); write_to_read_lock(&mem->lock, __FILE__, __LINE__); diff --git a/emu/tlb.c b/emu/tlb.c index 43c3b20d2d..4af0128068 100644 --- a/emu/tlb.c +++ b/emu/tlb.c @@ -5,16 +5,16 @@ #include "kernel/resource_locking.h" void tlb_refresh(struct tlb *tlb, struct mmu *mmu) { - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); // WORKING ON -mke + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); // WORKING ON -mke if (tlb->mmu == mmu && tlb->mem_changes == mmu->changes) { - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return; } tlb->mmu = mmu; tlb->dirty_page = TLB_PAGE_EMPTY; tlb->mem_changes = mmu->changes; tlb_flush(tlb); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } void tlb_flush(struct tlb *tlb) { @@ -24,48 +24,48 @@ void tlb_flush(struct tlb *tlb) { } void tlb_free(struct tlb *tlb) { - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); free(tlb); - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } bool __tlb_read_cross_page(struct tlb *tlb, addr_t addr, char *value, unsigned size) { - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); char *ptr1 = __tlb_read_ptr(tlb, addr); if (ptr1 == NULL) { - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } char *ptr2 = __tlb_read_ptr(tlb, (PAGE(addr) + 1) << PAGE_BITS); if (ptr2 == NULL) { - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } size_t part1 = PAGE_SIZE - PGOFFSET(addr); assert(part1 < size); memcpy(value, ptr1, part1); memcpy(value + part1, ptr2, size - part1); - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return true; } bool __tlb_write_cross_page(struct tlb *tlb, addr_t addr, const char *value, unsigned size) { - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); char *ptr1 = __tlb_write_ptr(tlb, addr); if (ptr1 == NULL) { - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } char *ptr2 = __tlb_write_ptr(tlb, (PAGE(addr) + 1) << PAGE_BITS); if (ptr2 == NULL) { - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } size_t part1 = PAGE_SIZE - PGOFFSET(addr); assert(part1 < size); memcpy(ptr1, value, part1); memcpy(ptr2, value + part1, size - part1); - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return true; } diff --git a/fs/proc/pid.c b/fs/proc/pid.c index 76583565dd..184e152999 100644 --- a/fs/proc/pid.c +++ b/fs/proc/pid.c @@ -35,7 +35,7 @@ static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) { if ((task == NULL) || (task->exiting == true)) return _ESRCH; - ////modify_critical_region_counter(task, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); if(!strcmp(task->general_lock.lname, "task_creat_gen")) // Work around. Sometimes the general lock is locked when it shouldn't be unlock(&task->general_lock); lock(&task->general_lock, 0); @@ -117,7 +117,7 @@ static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) { //unlock(&task->sighand->lock); unlock(&task->group->lock); unlock(&task->general_lock); - ////modify_critical_region_counter(task, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); proc_put_task(task); return 0; } @@ -167,7 +167,7 @@ static int proc_pid_cmdline_show(struct proc_entry *entry, struct proc_data *buf if ((task == NULL) || (task->exiting == true)) return _ESRCH; - ////modify_critical_region_counter(task, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); int err = 0; lock(&task->general_lock, 0); diff --git a/fs/proc/root.c b/fs/proc/root.c index 8e709d9103..d156dadd2e 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -328,19 +328,19 @@ static bool proc_root_readdir(struct proc_entry *UNUSED(entry), unsigned long *i pid_t_ pid = *index - PROC_ROOT_LEN; if (pid <= MAX_PID) { - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); //lock(&pids_lock, 0); do { pid++; } while (pid <= MAX_PID && pid_get_task(pid) == NULL); //unlock(&pids_lock); - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); if (pid > MAX_PID) { return false; } *next_entry = (struct proc_entry) {&proc_pid, .pid = pid}; *index = pid + PROC_ROOT_LEN; - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return true; } diff --git a/jit/jit.c b/jit/jit.c index 6eb1416474..0d9ab61087 100644 --- a/jit/jit.c +++ b/jit/jit.c @@ -87,9 +87,9 @@ void jit_invalidate_page(struct jit *jit, page_t page) { while(critical_region_count(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke nanosleep(&lock_pause, NULL); } - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); jit_invalidate_range(jit, page, page + 1); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } void jit_invalidate_all(struct jit *jit) { @@ -227,7 +227,7 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { addr_t ip = frame->cpu.eip; size_t cache_index = jit_cache_hash(ip); struct jit_block *block = cache[cache_index]; - //////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if (block == NULL || block->addr != ip) { lock(&jit->lock, 0); block = jit_lookup(jit, ip); @@ -240,7 +240,7 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { cache[cache_index] = block; unlock(&jit->lock); } - //////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); struct jit_block *last_block = frame->last_block; if (last_block != NULL && (last_block->jump_ip[0] != NULL || @@ -253,9 +253,9 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { if (last_block->jump_ip[i] != NULL && (*last_block->jump_ip[i] & 0xffffffff) == block->addr) { *last_block->jump_ip[i] = (unsigned long) block->code; - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); list_add(&block->jumps_from[i], &last_block->jumps_from_links[i]); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } } } @@ -263,7 +263,7 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { unlock(&jit->lock); } - //////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); frame->last_block = block; diff --git a/kernel/exec.c b/kernel/exec.c index 8d3268f0d6..b3448da4d1 100644 --- a/kernel/exec.c +++ b/kernel/exec.c @@ -109,12 +109,12 @@ static int load_entry(struct prg_header ph, addr_t bias, struct fd *fd) { if (tail_size != 0) { // Unlock and lock the mem because the user functions must be // called without locking mem. - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if(trylockw(¤t->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke write_unlock(¤t->mem->lock, __FILE__, __LINE__); user_memset(file_end, 0, tail_size); write_lock(¤t->mem->lock); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } if (tail_size > bss_size) tail_size = bss_size; diff --git a/kernel/exit.c b/kernel/exit.c index 278472f6c5..0494130ed1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -130,7 +130,7 @@ noreturn void do_exit(int status) { unlock(¤t->group->lock); // the actual freeing needs pids_lock - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); complex_lockt(&pids_lock, 0, __FILE__, __LINE__); // release the sighand signal_pending = !!(current->pending & ~current->blocked); @@ -199,7 +199,7 @@ noreturn void do_exit(int status) { } vfork_notify(current); - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); if(current != leader) { task_destroy(current, 1); } else { @@ -233,7 +233,7 @@ noreturn void do_exit_group(int status) { modify_locks_held_count(current, tmpvar); // Reset to zero -mke } - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); list_for_each_entry(&group->threads, task, group_links) { task->exiting = true; deliver_signal(task, SIGKILL_, SIGINFO_NIL); @@ -242,7 +242,7 @@ noreturn void do_exit_group(int status) { } unlock(&pids_lock); - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); unlock(&group->lock); //if(current->pid <= MAX_PID) // abort if crazy. -mke do_exit(status); @@ -365,7 +365,7 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage return _EINVAL; complex_lockt(&pids_lock, 0, __FILE__, __LINE__); - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); int err; bool got_signal = false; @@ -425,12 +425,12 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage info->sig = SIGCHLD_; found_something: - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); unlock(&pids_lock); return 0; error: - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); unlock(&pids_lock); return err; } diff --git a/kernel/futex.c b/kernel/futex.c index f8d39ea06a..40924df0e9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -301,10 +301,10 @@ dword_t sys_futex(addr_t uaddr, dword_t op, dword_t val, addr_t timeout_or_val2, switch (op & FUTEX_CMD_MASK_) { case FUTEX_WAIT_: STRACE("futex(FUTEX_WAIT, %#x, %d, 0x%x {%ds %dns}) = ...\n", uaddr, val, timeout_or_val2, timeout.tv_sec, timeout.tv_nsec); - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); dword_t return_val; return_val = futex_wait(uaddr, val, timeout_or_val2 ? &timeout : NULL); - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return return_val; case FUTEX_WAKE_: STRACE("futex(FUTEX_WAKE, %#x, %d)", uaddr, val); diff --git a/kernel/log.c b/kernel/log.c index 0b59397431..458f56ba45 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -117,11 +117,11 @@ static size_t do_syslog(int type, addr_t buf_addr, int_t len) { } } size_t sys_syslog(int_t type, addr_t buf_addr, int_t len) { - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); lock(&log_lock, 0); size_t retval = do_syslog(type, buf_addr, len); unlock(&log_lock); - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return retval; } @@ -232,53 +232,56 @@ void die(const char *msg, ...) { // fun little utility function int current_pid(void) { - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return current->pid; } else { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return -1; } } - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return -1; } int current_uid(void) { - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return current->uid; } else { - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return -1; } } - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return -1; } char * current_comm(void) { static char comm[16]; - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); if(current != NULL) { if(strcmp(current->comm, "")) { strncpy(comm, current->comm, 16); } else { + modify_critical_region_count(current, -1, __FILE__, __LINE__); return ""; } if (current->exiting != true) { + modify_critical_region_count(current, -1, __FILE__, __LINE__); return comm; } else { + modify_critical_region_count(current, -1, __FILE__, __LINE__); return ""; } } - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return ""; } diff --git a/kernel/mmap.c b/kernel/mmap.c index 57ce9994bc..cdbd274f86 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -106,11 +106,11 @@ static addr_t mmap_common(addr_t addr, dword_t len, dword_t prot, dword_t flags, if ((flags & MMAP_PRIVATE) && (flags & MMAP_SHARED)) return _EINVAL; - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); write_lock(¤t->mem->lock); addr_t res = do_mmap(addr, len, prot, flags, fd_no, offset); write_unlock(¤t->mem->lock, __FILE__, __LINE__); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return res; } @@ -156,11 +156,11 @@ int_t sys_munmap(addr_t addr, uint_t len) { if (len == 0) return _EINVAL; - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); write_lock(¤t->mem->lock); int err = pt_unmap_always(current->mem, PAGE(addr), PAGE_ROUND_UP(len)); write_unlock(¤t->mem->lock, __FILE__, __LINE__); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); if (err < 0) return _EINVAL; diff --git a/kernel/resource_locking.h b/kernel/resource_locking.h index 4c067c2a2c..eeab0e3a6a 100644 --- a/kernel/resource_locking.h +++ b/kernel/resource_locking.h @@ -2,8 +2,7 @@ // Because sometimes we can't #include "kernel/task.h" -mke extern unsigned critical_region_count(struct task*); -//#define modify_critical_region_counter(task, int) __modify_critical_region_count(task, int, __FILE__, __LINE__) -extern void modify_critical_region_counter(struct task*, int, char*, int); +extern void modify_critical_region_count(struct task*, int, char*, int); extern unsigned locks_held_count(struct task*); extern void modify_locks_held_count(struct task*, int); extern bool current_is_valid(void); diff --git a/kernel/signal.c b/kernel/signal.c index 11452401bb..30fab10b14 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -100,11 +100,11 @@ static void deliver_signal_unlocked(struct task *task, int sig, struct siginfo_ } void deliver_signal(struct task *task, int sig, struct siginfo_ info) { - ////modify_critical_region_counter(task, 1, __FILE__, __LINE__); // Doesn't work. -mke + ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); // Doesn't work. -mke lock(&task->sighand->lock, 0); deliver_signal_unlocked(task, sig, info); unlock(&task->sighand->lock); - ////modify_critical_region_counter(task, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); } void send_signal(struct task *task, int sig, struct siginfo_ info) { @@ -368,10 +368,10 @@ void receive_signals(void) { // Should this function have a check for critical_ int sig = sigqueue->info.sig; if (sigset_has(blocked, sig)) continue; - //modify_critical_region_counter(current, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); list_remove(&sigqueue->queue); sigset_del(¤t->pending, sig); - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); if (current->ptrace.traced && sig != SIGKILL_) { // This notifies the parent, goes to sleep, and waits for the diff --git a/kernel/task.c b/kernel/task.c index 749364b9f6..eb5a9d4a22 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -69,7 +69,7 @@ struct pid *pid_get_last_allocated(void) { } dword_t get_count_of_blocked_tasks(void) { - modify_critical_region_counter(current, 1, __FILE__, __LINE__); + modify_critical_region_count(current, 1, __FILE__, __LINE__); dword_t res = 0; struct pid *pid_entry; complex_lockt(&pids_lock, 0, __FILE__, __LINE__); @@ -78,7 +78,7 @@ dword_t get_count_of_blocked_tasks(void) { res++; } } - modify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); unlock(&pids_lock); return res; } diff --git a/kernel/task.h b/kernel/task.h index a4e8dc10fa..1c1821be65 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -226,14 +226,14 @@ void update_thread_name(void); // of functions which can block the task, we mark our task as blocked and // unblock it after the function is executed. __attribute__((always_inline)) inline int task_may_block_start(void) { - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(1, __FILE__, __LINE__); current->io_block = 1; return 0; } __attribute__((always_inline)) inline int task_may_block_end(void) { current->io_block = 0; - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); return 0; } diff --git a/kernel/time.c b/kernel/time.c index f78af239e2..635eaec719 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -182,9 +182,9 @@ static void itimer_notify(struct task *task) { struct siginfo_ info = { .code = SI_TIMER_, }; - ////modify_critical_region_counter(task, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); send_signal(task, SIGALRM_, info); - ////modify_critical_region_counter(task, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); } static long itimer_set(struct tgroup *group, int which, struct timer_spec spec, struct timer_spec *old_spec) { diff --git a/kernel/user.c b/kernel/user.c index 6413ca7325..e9fb018855 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -8,20 +8,20 @@ extern pthread_mutex_t extra_lock; static int __user_read_task(struct task *task, addr_t addr, void *buf, size_t count) { char *cbuf = (char *) buf; addr_t p = addr; - ////modify_critical_region_counter(task, 1, __FILE__, __LINE__); // Everyone who calls this function sets alrady + ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); // Everyone who calls this function sets alrady while (p < addr + count) { addr_t chunk_end = (PAGE(p) + 1) << PAGE_BITS; if (chunk_end > addr + count) chunk_end = addr + count; const char *ptr = mem_ptr(task->mem, p, MEM_READ); if (ptr == NULL) { - // //modify_critical_region_counter(task, -1, __FILE__, __LINE__); + // //mofify_critical_region_counter(task, -1, __FILE__, __LINE__); return 1; } memcpy(&cbuf[p - addr], ptr, chunk_end - p); p = chunk_end; } - ////modify_critical_region_counter(task, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); return 0; } @@ -53,9 +53,9 @@ static int __user_write_task(struct task *task, addr_t addr, const void *buf, si int user_read_task(struct task *task, addr_t addr, void *buf, size_t count) { read_lock(&task->mem->lock, __FILE__, __LINE__); - //modify_critical_region_counter(task, 1, __FILE__, __LINE__); + //mofify_critical_region_counter(task, 1, __FILE__, __LINE__); int res = __user_read_task(task, addr, buf, count); - //modify_critical_region_counter(task, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(task, -1, __FILE__, __LINE__); read_unlock(&task->mem->lock, __FILE__, __LINE__); return res; @@ -103,9 +103,9 @@ int user_read_string(addr_t addr, char *buf, size_t max) { } int user_write_string(addr_t addr, const char *buf) { - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if (addr == 0) { - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return 1; } read_lock(¤t->mem->lock, __FILE__, __LINE__); @@ -115,10 +115,10 @@ int user_write_string(addr_t addr, const char *buf) { read_unlock(¤t->mem->lock, __FILE__, __LINE__); return 1; } - //modify_critical_region_counter(current, -1, __FILE__, __LINE__); + //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); i++; } while (buf[i - 1] != '\0'); read_unlock(¤t->mem->lock, __FILE__, __LINE__); - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return 0; } diff --git a/util/fifo.c b/util/fifo.c index 0cebc0e5a4..f201def24f 100644 --- a/util/fifo.c +++ b/util/fifo.c @@ -11,9 +11,9 @@ void fifo_init(struct fifo *fifo, size_t capacity) { } void fifo_destroy(struct fifo *fifo) { - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); free(fifo->buf); - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } size_t fifo_capacity(struct fifo *fifo) { @@ -67,7 +67,7 @@ int fifo_read(struct fifo *fifo, void *buf, size_t size, int flags) { } void fifo_flush(struct fifo *fifo) { - ////modify_critical_region_counter(current, 1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); fifo->size = 0; - ////modify_critical_region_counter(current, -1, __FILE__, __LINE__); + ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } diff --git a/util/sync.c b/util/sync.c index efbc11fffe..b18aea7785 100644 --- a/util/sync.c +++ b/util/sync.c @@ -33,7 +33,7 @@ static bool is_signal_pending(lock_t *lock) { return pending; } -void modify_critical_region_counter(struct task *task, int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // value Should only be -1 or 1. -mke +void modify_critical_region_count(struct task *task, int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // value Should only be -1 or 1. -mke if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke return; @@ -49,7 +49,7 @@ void modify_critical_region_counter(struct task *task, int value, __attribute__( bool ilocked = false; - if (trylocknl(&task->general_lock) != _EBUSY) { + if (trylocknl(&task->general_lock, task->comm, task->pid) != _EBUSY) { ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. } @@ -74,10 +74,10 @@ void modify_critical_region_counter(struct task *task, int value, __attribute__( unlock(&task->general_lock); } -void modify_critical_region_counter_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { +void modify_critical_region_count_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // sync.h can't know about the definition of task struct due to recursive include files. -mke if((current != NULL) && (doEnableExtraLocking)) - modify_critical_region_counter(current, value, file, line); + modify_critical_region_count(current, value, file, line); return; } diff --git a/util/sync.h b/util/sync.h index 63827da6b7..d21e54e15b 100644 --- a/util/sync.h +++ b/util/sync.h @@ -20,7 +20,7 @@ extern int current_pid(void); extern int current_uid(void); extern char* current_comm(void); extern unsigned critical_region_count_wrapper(void); -extern void modify_critical_region_counter_wrapper(int, const char*, int); +extern void modify_critical_region_count_wrapper(int, const char*, int); extern unsigned locks_held_count_wrapper(void); extern void modify_locks_held_count_wrapper(int); extern struct pid *pid_get(dword_t id); @@ -113,7 +113,7 @@ static inline void atomic_l_lockf(char lname[16], int skiplog, const char *file, if(!doEnableExtraLocking) return; int res = 0; - // modify_critical_region_counter_wrapper(1, file, line); + // mofify_critical_region_counter_wrapper(1, file, line); if(atomic_l_lock.pid > 0) { if(current_pid() != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke res = pthread_mutex_lock(&atomic_l_lock.m); @@ -133,14 +133,14 @@ static inline void atomic_l_lockf(char lname[16], int skiplog, const char *file, //STRACE("atomic_l_lockf(%d)\n", count); // This is too verbose most of the time - // modify_critical_region_counter_wrapper(-1, file, line); + // mofify_critical_region_counter_wrapper(-1, file, line); } static inline void atomic_l_unlockf(void) { if(!doEnableExtraLocking) return; int res = 0; - //modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + //mofify_critical_region_counter_wrapper(1, __FILE__, __LINE__); strncpy((char *)&atomic_l_lock.lname,"\0", 1); res = pthread_mutex_unlock(&atomic_l_lock.m); if(res) { @@ -150,9 +150,9 @@ static inline void atomic_l_unlockf(void) { } modify_locks_held_count_wrapper(-1); - //modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + //mofify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); //STRACE("atomic_l_unlockf()\n"); - // modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + // mofify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); } static inline void complex_lockt(lock_t *lock, int log_lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { @@ -200,14 +200,14 @@ static inline void __lock(lock_t *lock, int log_lock, __attribute__((unused)) co unlock(lock); if(!log_lock) { - modify_critical_region_counter_wrapper(1,__FILE__, __LINE__); + modify_critical_region_count_wrapper(1,__FILE__, __LINE__); pthread_mutex_lock(&lock->m); modify_locks_held_count_wrapper(1); lock->owner = pthread_self(); lock->pid = current_pid(); lock->uid = current_uid(); strncpy(lock->comm, current_comm(), 16); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); } else { pthread_mutex_lock(&lock->m); lock->owner = pthread_self(); @@ -262,7 +262,7 @@ static inline void handle_lock_error(wrlock_t *lock, const char *file, int line, } static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, int is_write) { - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(1, __FILE__, __LINE__); modify_locks_held_count_wrapper(1); unsigned count = 0; @@ -282,7 +282,7 @@ static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0, __FILE__, __LINE__); } - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); } #define loop_lock_read(lock, file, line) loop_lock_generic(lock, file, line, 0) @@ -360,7 +360,7 @@ static inline void _write_lock(wrlock_t *lock, const char *file, int line) { } static inline int trylockw(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - //modify_critical_region_counter_wrapper(1,__FILE__, __LINE__); + //mofify_critical_region_counter_wrapper(1,__FILE__, __LINE__); atomic_l_lockf("trylockw\0", 0, __FILE__, __LINE__); int status = pthread_rwlock_trywrlock(&lock->l); atomic_l_unlockf(); @@ -384,7 +384,7 @@ static inline int trylockw(wrlock_t *lock, __attribute__((unused)) const char *f #define trylockw(lock) trylockw(lock, __FILE__, __LINE__) static inline int trylock(lock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - //modify_critical_region_counter_wrapper(1,__FILE__, __LINE__); + //mofify_critical_region_counter_wrapper(1,__FILE__, __LINE__); atomic_l_lockf("trylock\0", 0, __FILE__, __LINE__); int status = pthread_mutex_trylock(&lock->m); atomic_l_unlockf(); @@ -408,11 +408,9 @@ static inline int trylock(lock_t *lock, __attribute__((unused)) const char *file #define trylock(lock) trylock(lock, __FILE__, __LINE__) -static inline int trylocknl(lock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { +static inline int trylocknl(lock_t *lock, char *comm, int pid, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { //Don't log, avoid recursion - atomic_l_lockf("trylock\0", 1, __FILE__, __LINE__); int status = pthread_mutex_trylock(&lock->m); - atomic_l_unlockf(); #if LOCK_DEBUG if (!status) { lock->debug.file = file; @@ -421,17 +419,17 @@ static inline int trylocknl(lock_t *lock, __attribute__((unused)) const char *fi lock->debug.pid = current_pid(); } #endif - if((!status) && (current_pid() > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke + if(!status) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke modify_locks_held_count_wrapper(1); //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = current_pid(); - strncpy(lock->comm, current_comm(), 16); + lock->pid = pid; + strncpy(lock->comm, comm, 16); } return status; } -#define trylocknl(lock) trylocknl(lock, __FILE__, __LINE__) +#define trylocknl(lock, comm, pid) trylocknl(lock, comm, pid, __FILE__, __LINE__) // conditions, implemented using pthread conditions but hacked so you can also // be woken by a signal @@ -508,7 +506,7 @@ static inline void lock_destroy(wrlock_t *lock) { static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { loop_lock_read(lock, file, line); - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(1, __FILE__, __LINE__); //pthread_rwlock_rdlock(&lock->l); // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke if(lock->val) { @@ -523,7 +521,7 @@ static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char if(lock->val > 1000) { // We likely have a problem. printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); read_unlock_and_destroy(lock); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); return; } @@ -531,7 +529,7 @@ static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char lock->pid = current_pid(); if(lock->pid > 9) strncpy((char *)lock->comm, current_comm(), 16); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -544,30 +542,30 @@ static inline void read_lock(wrlock_t *lock, __attribute__((unused)) const char #define write_lock(lock) _write_lock(lock, __FILE__, __LINE__) static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("rtw_lock\0", 0, __FILE__, __LINE__); _read_unlock(lock, __FILE__, __LINE__); __write_lock(lock, __FILE__, __LINE__); atomic_l_unlockf(); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); } static inline void write_to_read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Try to atomically swap a Write lock to a RO lock. -mke - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("wtr_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, file, line); _read_lock(lock, file, line); atomic_l_unlockf(); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); } static inline void write_unlock_and_destroy(wrlock_t *lock) { - modify_critical_region_counter_wrapper(1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("wuad_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, __FILE__, __LINE__); _lock_destroy(lock); atomic_l_unlockf(); - modify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); + modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); } static inline void read_unlock_and_destroy(wrlock_t *lock) { From c7334750ac1e24f398bc941185e8be41a342b437 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Tue, 28 Nov 2023 16:53:17 -0800 Subject: [PATCH 06/23] o Code cleanup and fix to mitigate ongoing brain damage --- emu/memory.c | 19 ------------------- emu/tlb.c | 13 ------------- fs/proc/pid.c | 4 ---- fs/proc/root.c | 4 ++-- jit/jit.c | 19 ++----------------- kernel/calls.c | 1 - kernel/exec.c | 2 -- kernel/futex.c | 4 ++-- kernel/log.c | 4 +--- kernel/mmap.c | 4 ---- kernel/signal.c | 23 ++++------------------- kernel/time.c | 2 -- kernel/user.c | 9 --------- util/fifo.c | 4 ---- util/sync.c | 7 +++++-- util/sync.h | 8 -------- 16 files changed, 16 insertions(+), 111 deletions(-) diff --git a/emu/memory.c b/emu/memory.c index cdc0de3216..9a0ca47ac1 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -67,8 +67,6 @@ void mem_destroy(struct mem *mem) { free(mem->pgdir[i]); } - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); - do { nanosleep(&lock_pause, NULL); } while((critical_region_count(current) > 1) && (current->pid > 1) ); // Wait for now, task is in one or more critical sections @@ -79,8 +77,6 @@ void mem_destroy(struct mem *mem) { write_unlock_and_destroy(&mem->lock); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); - } #define PGDIR_TOP(page) ((page) >> 10) @@ -97,34 +93,26 @@ static struct pt_entry *mem_pt_new(struct mem *mem, page_t page) { struct pt_entry *mem_pt(struct mem *mem, page_t page) { - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); - if (mem->pgdir[PGDIR_TOP(page)] != NULL) { // Check if defined. Likely still leaves a potential race condition as no locking currently. -MKE FIXME struct pt_entry *pgdir = mem->pgdir[PGDIR_TOP(page)]; if (pgdir == NULL) { - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return NULL; } struct pt_entry *entry = &pgdir[PGDIR_BOTTOM(page)]; if (entry->data == NULL) { - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return NULL; } - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return entry; } else { mem->pgdir[PGDIR_TOP(page)] = NULL; - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return NULL; } - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } static void mem_pt_del(struct mem *mem, page_t page) { - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { while(critical_region_count(current) > 4) { // mark @@ -132,17 +120,14 @@ static void mem_pt_del(struct mem *mem, page_t page) { } entry->data = NULL; } - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } void mem_next_page(struct mem *mem, page_t *page) { (*page)++; if (*page >= MEM_PAGES) return; - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); while (*page < MEM_PAGES && mem->pgdir[PGDIR_TOP(*page)] == NULL) *page = (*page - PGDIR_BOTTOM(*page)) + MEM_PGDIR_SIZE; - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } page_t pt_find_hole(struct mem *mem, pages_t size) { @@ -346,8 +331,6 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { if (type != MEM_WRITE_PTRACE && !(entry->flags & P_WRITE)) return NULL; - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); - if (type == MEM_WRITE_PTRACE) { // TODO: Is P_WRITE really correct? The page shouldn't be writable without ptrace. entry->flags |= P_WRITE | P_COW; @@ -361,11 +344,9 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { if (entry->flags & P_COW) { lock(¤t->general_lock, 0); // prevent elf_exec from doing mm_release while we are in flight? -mke - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); read_to_write_lock(&mem->lock); void *copy = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); void *data = (char *) entry->data->data + entry->offset; - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); // copy/paste from above modify_critical_region_count(current, 1,__FILE__, __LINE__); diff --git a/emu/tlb.c b/emu/tlb.c index 4af0128068..8b8d96be47 100644 --- a/emu/tlb.c +++ b/emu/tlb.c @@ -5,16 +5,13 @@ #include "kernel/resource_locking.h" void tlb_refresh(struct tlb *tlb, struct mmu *mmu) { - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); // WORKING ON -mke if (tlb->mmu == mmu && tlb->mem_changes == mmu->changes) { - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return; } tlb->mmu = mmu; tlb->dirty_page = TLB_PAGE_EMPTY; tlb->mem_changes = mmu->changes; tlb_flush(tlb); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } void tlb_flush(struct tlb *tlb) { @@ -24,48 +21,38 @@ void tlb_flush(struct tlb *tlb) { } void tlb_free(struct tlb *tlb) { - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); free(tlb); - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } bool __tlb_read_cross_page(struct tlb *tlb, addr_t addr, char *value, unsigned size) { - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); char *ptr1 = __tlb_read_ptr(tlb, addr); if (ptr1 == NULL) { - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } char *ptr2 = __tlb_read_ptr(tlb, (PAGE(addr) + 1) << PAGE_BITS); if (ptr2 == NULL) { - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } size_t part1 = PAGE_SIZE - PGOFFSET(addr); assert(part1 < size); memcpy(value, ptr1, part1); memcpy(value + part1, ptr2, size - part1); - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return true; } bool __tlb_write_cross_page(struct tlb *tlb, addr_t addr, const char *value, unsigned size) { - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); char *ptr1 = __tlb_write_ptr(tlb, addr); if (ptr1 == NULL) { - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } char *ptr2 = __tlb_write_ptr(tlb, (PAGE(addr) + 1) << PAGE_BITS); if (ptr2 == NULL) { - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return false; } size_t part1 = PAGE_SIZE - PGOFFSET(addr); assert(part1 < size); memcpy(ptr1, value, part1); memcpy(ptr2, value + part1, size - part1); - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return true; } diff --git a/fs/proc/pid.c b/fs/proc/pid.c index 184e152999..2f1c4e1874 100644 --- a/fs/proc/pid.c +++ b/fs/proc/pid.c @@ -35,7 +35,6 @@ static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) { if ((task == NULL) || (task->exiting == true)) return _ESRCH; - ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); if(!strcmp(task->general_lock.lname, "task_creat_gen")) // Work around. Sometimes the general lock is locked when it shouldn't be unlock(&task->general_lock); lock(&task->general_lock, 0); @@ -117,7 +116,6 @@ static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) { //unlock(&task->sighand->lock); unlock(&task->group->lock); unlock(&task->general_lock); - ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); proc_put_task(task); return 0; } @@ -167,8 +165,6 @@ static int proc_pid_cmdline_show(struct proc_entry *entry, struct proc_data *buf if ((task == NULL) || (task->exiting == true)) return _ESRCH; - ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); - int err = 0; lock(&task->general_lock, 0); diff --git a/fs/proc/root.c b/fs/proc/root.c index d156dadd2e..1843724b0d 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -334,13 +334,13 @@ static bool proc_root_readdir(struct proc_entry *UNUSED(entry), unsigned long *i pid++; } while (pid <= MAX_PID && pid_get_task(pid) == NULL); //unlock(&pids_lock); - modify_critical_region_count(current, -1, __FILE__, __LINE__); if (pid > MAX_PID) { + modify_critical_region_count(current, -1, __FILE__, __LINE__); return false; } *next_entry = (struct proc_entry) {&proc_pid, .pid = pid}; *index = pid + PROC_ROOT_LEN; - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); + modify_critical_region_count(current, -1, __FILE__, __LINE__); return true; } diff --git a/jit/jit.c b/jit/jit.c index 0d9ab61087..9caed322e5 100644 --- a/jit/jit.c +++ b/jit/jit.c @@ -87,9 +87,9 @@ void jit_invalidate_page(struct jit *jit, page_t page) { while(critical_region_count(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke nanosleep(&lock_pause, NULL); } - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); + // mofify_critical_region_count(current, 1, __FILE__, __LINE__); jit_invalidate_range(jit, page, page + 1); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); + // mofify_critical_region_count(current, -1, __FILE__, __LINE__); } void jit_invalidate_all(struct jit *jit) { @@ -185,20 +185,11 @@ static void jit_block_disconnect(struct jit *jit, struct jit_block *block) { } static void jit_block_free(struct jit *jit, struct jit_block *block) { - // critical_region_count_increase(current); jit_block_disconnect(jit, block); free(block); - //critical_region_count_decrease(current); } static void jit_free_jetsam(struct jit *jit) { - /* if(!strcmp(current->comm, "go")) { - // Sleep for a bit if this is go. Kludge alert. -mke - struct timespec wait; - wait.tv_sec = 3; // Be anal and set both to zero. -mke - wait.tv_nsec = 0; - nanosleep(&wait, NULL); - } */ struct jit_block *block, *tmp; list_for_each_entry_safe(&jit->jetsam, block, tmp, jetsam) { list_remove(&block->jetsam); @@ -227,7 +218,6 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { addr_t ip = frame->cpu.eip; size_t cache_index = jit_cache_hash(ip); struct jit_block *block = cache[cache_index]; - //////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if (block == NULL || block->addr != ip) { lock(&jit->lock, 0); block = jit_lookup(jit, ip); @@ -240,7 +230,6 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { cache[cache_index] = block; unlock(&jit->lock); } - //////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); struct jit_block *last_block = frame->last_block; if (last_block != NULL && (last_block->jump_ip[0] != NULL || @@ -253,9 +242,7 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { if (last_block->jump_ip[i] != NULL && (*last_block->jump_ip[i] & 0xffffffff) == block->addr) { *last_block->jump_ip[i] = (unsigned long) block->code; - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); list_add(&block->jumps_from[i], &last_block->jumps_from_links[i]); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } } } @@ -263,8 +250,6 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { unlock(&jit->lock); } - //////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); - frame->last_block = block; // block may be jetsam, but that's ok, because it can't be freed until diff --git a/kernel/calls.c b/kernel/calls.c index a70f2b9207..c51fd77467 100644 --- a/kernel/calls.c +++ b/kernel/calls.c @@ -357,7 +357,6 @@ void handle_page_fault_interrupt(struct cpu_state *cpu) { .code = mem_segv_reason(current->mem, cpu->segfault_addr), .fault.addr = cpu->segfault_addr, }; - //current->zombie = true; dump_stack(8); deliver_signal(current, SIGSEGV_, info); } diff --git a/kernel/exec.c b/kernel/exec.c index b3448da4d1..c2a987b6f3 100644 --- a/kernel/exec.c +++ b/kernel/exec.c @@ -109,12 +109,10 @@ static int load_entry(struct prg_header ph, addr_t bias, struct fd *fd) { if (tail_size != 0) { // Unlock and lock the mem because the user functions must be // called without locking mem. - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if(trylockw(¤t->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke write_unlock(¤t->mem->lock, __FILE__, __LINE__); user_memset(file_end, 0, tail_size); write_lock(¤t->mem->lock); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } if (tail_size > bss_size) tail_size = bss_size; diff --git a/kernel/futex.c b/kernel/futex.c index 40924df0e9..3b3bc7fb72 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -301,10 +301,10 @@ dword_t sys_futex(addr_t uaddr, dword_t op, dword_t val, addr_t timeout_or_val2, switch (op & FUTEX_CMD_MASK_) { case FUTEX_WAIT_: STRACE("futex(FUTEX_WAIT, %#x, %d, 0x%x {%ds %dns}) = ...\n", uaddr, val, timeout_or_val2, timeout.tv_sec, timeout.tv_nsec); - modify_critical_region_count(current, 1, __FILE__, __LINE__); + //modify_critical_region_count(current, 1, __FILE__, __LINE__); dword_t return_val; return_val = futex_wait(uaddr, val, timeout_or_val2 ? &timeout : NULL); - modify_critical_region_count(current, -1, __FILE__, __LINE__); + //modify_critical_region_count(current, -1, __FILE__, __LINE__); return return_val; case FUTEX_WAKE_: STRACE("futex(FUTEX_WAKE, %#x, %d)", uaddr, val); diff --git a/kernel/log.c b/kernel/log.c index 458f56ba45..0c7a0fdffe 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -117,11 +117,9 @@ static size_t do_syslog(int type, addr_t buf_addr, int_t len) { } } size_t sys_syslog(int_t type, addr_t buf_addr, int_t len) { - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); lock(&log_lock, 0); size_t retval = do_syslog(type, buf_addr, len); unlock(&log_lock); - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return retval; } @@ -264,8 +262,8 @@ int current_uid(void) { } char * current_comm(void) { - static char comm[16]; modify_critical_region_count(current, 1, __FILE__, __LINE__); + static char comm[16]; if(current != NULL) { if(strcmp(current->comm, "")) { strncpy(comm, current->comm, 16); diff --git a/kernel/mmap.c b/kernel/mmap.c index cdbd274f86..ee7b7ae448 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -106,11 +106,9 @@ static addr_t mmap_common(addr_t addr, dword_t len, dword_t prot, dword_t flags, if ((flags & MMAP_PRIVATE) && (flags & MMAP_SHARED)) return _EINVAL; - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); write_lock(¤t->mem->lock); addr_t res = do_mmap(addr, len, prot, flags, fd_no, offset); write_unlock(¤t->mem->lock, __FILE__, __LINE__); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return res; } @@ -156,11 +154,9 @@ int_t sys_munmap(addr_t addr, uint_t len) { if (len == 0) return _EINVAL; - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); write_lock(¤t->mem->lock); int err = pt_unmap_always(current->mem, PAGE(addr), PAGE_ROUND_UP(len)); write_unlock(¤t->mem->lock, __FILE__, __LINE__); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); if (err < 0) return _EINVAL; diff --git a/kernel/signal.c b/kernel/signal.c index 30fab10b14..652538157b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -100,25 +100,18 @@ static void deliver_signal_unlocked(struct task *task, int sig, struct siginfo_ } void deliver_signal(struct task *task, int sig, struct siginfo_ info) { - ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); // Doesn't work. -mke lock(&task->sighand->lock, 0); deliver_signal_unlocked(task, sig, info); unlock(&task->sighand->lock); - ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); } void send_signal(struct task *task, int sig, struct siginfo_ info) { // signal zero is for testing whether a process exists - if(task->exiting) - return; // I'm not sure this is correct. -mke - - if(sig == 0) + if (sig == 0) return; - if(task->zombie) + if (task->zombie || task->exiting) return; - - //critical_region_count_increase(task); struct sighand *sighand = task->sighand; lock(&sighand->lock, 0); if ((signal_action(sighand, sig) != SIGNAL_IGNORE) && (task->pid <= MAX_PID)) { // Deal with normal and crazy. -mke @@ -132,8 +125,6 @@ void send_signal(struct task *task, int sig, struct siginfo_ info) { notify(&task->group->stopped_cond); unlock(&task->group->lock); } - - //critical_region_count_decrease(task); } bool try_self_signal(int sig) { @@ -368,10 +359,8 @@ void receive_signals(void) { // Should this function have a check for critical_ int sig = sigqueue->info.sig; if (sigset_has(blocked, sig)) continue; - //mofify_critical_region_counter(current, 1, __FILE__, __LINE__); list_remove(&sigqueue->queue); sigset_del(¤t->pending, sig); - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); if (current->ptrace.traced && sig != SIGKILL_) { // This notifies the parent, goes to sleep, and waits for the @@ -725,9 +714,7 @@ int_t sys_rt_sigtimedwait(addr_t set_addr, addr_t info_addr, addr_t timeout_addr } static int kill_task(struct task *task, dword_t sig) { - //while((critical_region_count(task) >1) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks - // nanosleep(&lock_pause, NULL); - //} + // FIXME: Need to check references to kernel here to be sure they are zero if (!superuser() && current->uid != task->uid && current->uid != task->suid && @@ -739,9 +726,7 @@ static int kill_task(struct task *task, dword_t sig) { .kill.pid = current->pid, .kill.uid = current->uid, }; - //while((critical_region_count(task)) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks - // nanosleep(&lock_pause, NULL); - //} + send_signal(task, sig, info); return 0; } diff --git a/kernel/time.c b/kernel/time.c index 635eaec719..6ef8fc283a 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -182,9 +182,7 @@ static void itimer_notify(struct task *task) { struct siginfo_ info = { .code = SI_TIMER_, }; - ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); send_signal(task, SIGALRM_, info); - ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); } static long itimer_set(struct tgroup *group, int which, struct timer_spec spec, struct timer_spec *old_spec) { diff --git a/kernel/user.c b/kernel/user.c index e9fb018855..21a9f8a357 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -8,20 +8,17 @@ extern pthread_mutex_t extra_lock; static int __user_read_task(struct task *task, addr_t addr, void *buf, size_t count) { char *cbuf = (char *) buf; addr_t p = addr; - ////mofify_critical_region_counter(task, 1, __FILE__, __LINE__); // Everyone who calls this function sets alrady while (p < addr + count) { addr_t chunk_end = (PAGE(p) + 1) << PAGE_BITS; if (chunk_end > addr + count) chunk_end = addr + count; const char *ptr = mem_ptr(task->mem, p, MEM_READ); if (ptr == NULL) { - // //mofify_critical_region_counter(task, -1, __FILE__, __LINE__); return 1; } memcpy(&cbuf[p - addr], ptr, chunk_end - p); p = chunk_end; } - ////mofify_critical_region_counter(task, -1, __FILE__, __LINE__); return 0; } @@ -53,9 +50,7 @@ static int __user_write_task(struct task *task, addr_t addr, const void *buf, si int user_read_task(struct task *task, addr_t addr, void *buf, size_t count) { read_lock(&task->mem->lock, __FILE__, __LINE__); - //mofify_critical_region_counter(task, 1, __FILE__, __LINE__); int res = __user_read_task(task, addr, buf, count); - //mofify_critical_region_counter(task, -1, __FILE__, __LINE__); read_unlock(&task->mem->lock, __FILE__, __LINE__); return res; @@ -103,9 +98,7 @@ int user_read_string(addr_t addr, char *buf, size_t max) { } int user_write_string(addr_t addr, const char *buf) { - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); if (addr == 0) { - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return 1; } read_lock(¤t->mem->lock, __FILE__, __LINE__); @@ -115,10 +108,8 @@ int user_write_string(addr_t addr, const char *buf) { read_unlock(¤t->mem->lock, __FILE__, __LINE__); return 1; } - //mofify_critical_region_counter(current, -1, __FILE__, __LINE__); i++; } while (buf[i - 1] != '\0'); read_unlock(¤t->mem->lock, __FILE__, __LINE__); - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); return 0; } diff --git a/util/fifo.c b/util/fifo.c index f201def24f..a2fa83c8ea 100644 --- a/util/fifo.c +++ b/util/fifo.c @@ -11,9 +11,7 @@ void fifo_init(struct fifo *fifo, size_t capacity) { } void fifo_destroy(struct fifo *fifo) { - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); free(fifo->buf); - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } size_t fifo_capacity(struct fifo *fifo) { @@ -67,7 +65,5 @@ int fifo_read(struct fifo *fifo, void *buf, size_t size, int flags) { } void fifo_flush(struct fifo *fifo) { - ////mofify_critical_region_counter(current, 1, __FILE__, __LINE__); fifo->size = 0; - ////mofify_critical_region_counter(current, -1, __FILE__, __LINE__); } diff --git a/util/sync.c b/util/sync.c index b18aea7785..8d3452e4d2 100644 --- a/util/sync.c +++ b/util/sync.c @@ -34,6 +34,7 @@ static bool is_signal_pending(lock_t *lock) { } void modify_critical_region_count(struct task *task, int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // value Should only be -1 or 1. -mke + // return; if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke return; @@ -202,11 +203,13 @@ void sigusr1_handler(void) { // Because sometimes we can't #include "kernel/task.h" -mke unsigned critical_region_count(struct task *task) { + // return 0; unsigned tmp = 0; pthread_mutex_lock(&task->critical_region.lock); // This would make more tmp = task->critical_region.count; - // if(tmp > 1000) // Not likely - // tmp = 0; + //printk("%d:%d\n", task, tmp); + if(tmp > 1000) // Not likely + tmp = 0; pthread_mutex_unlock(&task->critical_region.lock); return tmp; diff --git a/util/sync.h b/util/sync.h index d21e54e15b..03403fd3e9 100644 --- a/util/sync.h +++ b/util/sync.h @@ -113,7 +113,6 @@ static inline void atomic_l_lockf(char lname[16], int skiplog, const char *file, if(!doEnableExtraLocking) return; int res = 0; - // mofify_critical_region_counter_wrapper(1, file, line); if(atomic_l_lock.pid > 0) { if(current_pid() != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke res = pthread_mutex_lock(&atomic_l_lock.m); @@ -132,15 +131,12 @@ static inline void atomic_l_lockf(char lname[16], int skiplog, const char *file, } //STRACE("atomic_l_lockf(%d)\n", count); // This is too verbose most of the time - - // mofify_critical_region_counter_wrapper(-1, file, line); } static inline void atomic_l_unlockf(void) { if(!doEnableExtraLocking) return; int res = 0; - //mofify_critical_region_counter_wrapper(1, __FILE__, __LINE__); strncpy((char *)&atomic_l_lock.lname,"\0", 1); res = pthread_mutex_unlock(&atomic_l_lock.m); if(res) { @@ -150,9 +146,7 @@ static inline void atomic_l_unlockf(void) { } modify_locks_held_count_wrapper(-1); - //mofify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); //STRACE("atomic_l_unlockf()\n"); - // mofify_critical_region_counter_wrapper(-1, __FILE__, __LINE__); } static inline void complex_lockt(lock_t *lock, int log_lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { @@ -360,7 +354,6 @@ static inline void _write_lock(wrlock_t *lock, const char *file, int line) { } static inline int trylockw(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - //mofify_critical_region_counter_wrapper(1,__FILE__, __LINE__); atomic_l_lockf("trylockw\0", 0, __FILE__, __LINE__); int status = pthread_rwlock_trywrlock(&lock->l); atomic_l_unlockf(); @@ -384,7 +377,6 @@ static inline int trylockw(wrlock_t *lock, __attribute__((unused)) const char *f #define trylockw(lock) trylockw(lock, __FILE__, __LINE__) static inline int trylock(lock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - //mofify_critical_region_counter_wrapper(1,__FILE__, __LINE__); atomic_l_lockf("trylock\0", 0, __FILE__, __LINE__); int status = pthread_mutex_trylock(&lock->m); atomic_l_unlockf(); From 1e7afc07d7eef38ac749ab77469346c62637ace6 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Tue, 28 Nov 2023 17:10:33 -0800 Subject: [PATCH 07/23] o First phase of adding reference counts to tasks and memory --- app/iOSFS.m | 4 ++-- emu/memory.c | 22 +++++++++--------- emu/memory.h | 5 +++++ fs/poll.c | 6 ++--- fs/proc/pid.c | 4 ++-- fs/proc/root.c | 6 ++--- iSH-AOK.xcodeproj/project.pbxproj | 24 ++++++++++---------- jit/jit.c | 6 ++--- kernel/calls.c | 2 +- kernel/exit.c | 37 +++++++++++++------------------ kernel/fork.c | 2 +- kernel/futex.c | 2 -- kernel/log.c | 26 +++++++++++----------- kernel/mmap.c | 7 +++--- kernel/poll.c | 2 +- kernel/resource_locking.h | 4 ++-- kernel/signal.c | 4 ++-- kernel/task.c | 15 ++++++------- kernel/task.h | 11 +++++---- util/sync.c | 37 ++++++++++++++----------------- util/sync.h | 36 +++++++++++++++--------------- 21 files changed, 127 insertions(+), 135 deletions(-) diff --git a/app/iOSFS.m b/app/iOSFS.m index 71aedc982b..ae44304848 100644 --- a/app/iOSFS.m +++ b/app/iOSFS.m @@ -239,7 +239,7 @@ static int combine_error(NSError *coordinatorError, int err) { __block NSError *error = nil; __block struct fd *fd; __block dispatch_semaphore_t file_opened = dispatch_semaphore_create(0); - modify_critical_region_count_wrapper(1, __FILE__, __LINE__); + task_ref_count_wrapper(1, __FILE__, __LINE__); dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void){ void (^operation)(NSURL *url) = ^(NSURL *url) { fd = realfs_open(mount, path_for_url_in_mount(mount, url, path), flags, mode); @@ -265,7 +265,7 @@ static int combine_error(NSError *coordinatorError, int err) { } [coordinator coordinateReadingItemAtURL:url options:options error:&error byAccessor:operation]; }); - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); dispatch_semaphore_wait(file_opened, DISPATCH_TIME_FOREVER); diff --git a/emu/memory.c b/emu/memory.c index 9a0ca47ac1..a777a49a19 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -45,12 +45,12 @@ void mem_init(struct mem *mem) { void mem_destroy(struct mem *mem) { write_lock(&mem->lock); - while((critical_region_count(current) > 1) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks + while((task_reference_count(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } pt_unmap_always(mem, 0, MEM_PAGES); #if ENGINE_JIT - while((critical_region_count(current) > 1) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks + while((task_reference_count(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } jit_free(mem->mmu.jit); @@ -60,7 +60,7 @@ void mem_destroy(struct mem *mem) { do { count++; nanosleep(&lock_pause, NULL); - } while((critical_region_count(current) > 1) && (current->pid > 1) && (count < 5000000)); // Wait for now, task is in one or more critical sections + } while((task_reference_count(current)) && (current->pid > 1) && (count < 5000000)); // Wait for now, task is in one or more critical sections if (mem->pgdir[i] != NULL) @@ -69,7 +69,7 @@ void mem_destroy(struct mem *mem) { do { nanosleep(&lock_pause, NULL); - } while((critical_region_count(current) > 1) && (current->pid > 1) ); // Wait for now, task is in one or more critical sections + } while((task_reference_count(current) > 1) && (current->pid > 1) ); // Wait for now, task is in one or more critical sections free(mem->pgdir); @@ -115,7 +115,7 @@ struct pt_entry *mem_pt(struct mem *mem, page_t page) { static void mem_pt_del(struct mem *mem, page_t page) { struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { - while(critical_region_count(current) > 4) { // mark + while(task_reference_count(current) > 4) { // mark nanosleep(&lock_pause, NULL); } entry->data = NULL; @@ -196,7 +196,7 @@ int pt_unmap(struct mem *mem, page_t start, pages_t pages) { int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { for (page_t page = start; page < start + pages; mem_next_page(mem, &page)) { - while(critical_region_count(current) >3) { + while(task_reference_count(current) >3) { nanosleep(&lock_pause, NULL); } struct pt_entry *pt = mem_pt(mem, page); @@ -210,7 +210,7 @@ int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { if (--data->refcount == 0) { // vdso wasn't allocated with mmap, it's just in our data segment if (data->data != vdso_data) { - while(critical_region_count(current) > 3) { + while(task_reference_count(current) > 3) { nanosleep(&lock_pause, NULL); } int err = munmap(data->data, data->size); @@ -258,7 +258,7 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) { } int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) { - while(critical_region_count(current)) { // Wait for now, task is in one or more critical sections + while(task_reference_count(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } for (page_t page = start; page < start + pages; mem_next_page(src, &page)) { @@ -275,7 +275,7 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page dst_entry->offset = entry->offset; dst_entry->flags = entry->flags; } - while(critical_region_count(current)) { // Wait for now, task is in one or more critical sections + while(task_reference_count(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } mem_changed(src); @@ -349,10 +349,10 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { void *data = (char *) entry->data->data + entry->offset; // copy/paste from above - modify_critical_region_count(current, 1,__FILE__, __LINE__); + task_ref_count(current, 1,__FILE__, __LINE__); //read_to_write_lock(&mem->lock); memcpy(copy, data, PAGE_SIZE); //mkemkemke Crashes here a lot when running both the go and parallel make test. 01 June 2022 - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW); unlock(¤t->general_lock); write_to_read_lock(&mem->lock, __FILE__, __LINE__); diff --git a/emu/memory.h b/emu/memory.h index 1f44c096af..e458987de2 100644 --- a/emu/memory.h +++ b/emu/memory.h @@ -20,6 +20,11 @@ struct mem { struct jit *jit; #endif struct mmu mmu; + struct { + pthread_mutex_t lock; + int count; // If positive, don't delete yet, wait_to_delete + bool ready_to_be_freed; // Should be false initially + } reference; wrlock_t lock; }; diff --git a/fs/poll.c b/fs/poll.c index d9c46bf24b..473c5990cb 100644 --- a/fs/poll.c +++ b/fs/poll.c @@ -331,7 +331,7 @@ void poll_destroy(struct poll *poll) { struct poll_fd *poll_fd; struct poll_fd *tmp; - while(critical_region_count(current)) { + while(task_reference_count(current)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->poll_fds, poll_fd, tmp, fds) { @@ -342,12 +342,12 @@ void poll_destroy(struct poll *poll) { free(poll_fd); } - while(critical_region_count(current)) { + while(task_reference_count(current)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->pollfd_freelist, poll_fd, tmp, fds) { - while(critical_region_count(current)) { + while(task_reference_count(current)) { nanosleep(&lock_pause, NULL); } list_remove(&poll_fd->fds); diff --git a/fs/proc/pid.c b/fs/proc/pid.c index 2f1c4e1874..d2ea0785c4 100644 --- a/fs/proc/pid.c +++ b/fs/proc/pid.c @@ -135,7 +135,7 @@ static int proc_pid_auxv_show(struct proc_entry *entry, struct proc_data *buf) { struct task *task = proc_get_task(entry); if ((task == NULL) || (task->exiting == true)) return _ESRCH; - task->process_info_being_read = true; + // FIXME: Increment task->reference.count int err = 0; lock(&task->general_lock, 0); if (task->mm == NULL) @@ -153,8 +153,8 @@ static int proc_pid_auxv_show(struct proc_entry *entry, struct proc_data *buf) { out_free_task: unlock(&task->general_lock); - task->process_info_being_read = false; proc_put_task(task); + // FIXME: Decrement task->reference.count return err; } diff --git a/fs/proc/root.c b/fs/proc/root.c index 1843724b0d..25a8b12762 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -328,19 +328,19 @@ static bool proc_root_readdir(struct proc_entry *UNUSED(entry), unsigned long *i pid_t_ pid = *index - PROC_ROOT_LEN; if (pid <= MAX_PID) { - modify_critical_region_count(current, 1, __FILE__, __LINE__); + task_ref_count(current, 1, __FILE__, __LINE__); //lock(&pids_lock, 0); do { pid++; } while (pid <= MAX_PID && pid_get_task(pid) == NULL); //unlock(&pids_lock); if (pid > MAX_PID) { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return false; } *next_entry = (struct proc_entry) {&proc_pid, .pid = pid}; *index = pid + PROC_ROOT_LEN; - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return true; } diff --git a/iSH-AOK.xcodeproj/project.pbxproj b/iSH-AOK.xcodeproj/project.pbxproj index 5e5b12b36e..7d46e3072f 100644 --- a/iSH-AOK.xcodeproj/project.pbxproj +++ b/iSH-AOK.xcodeproj/project.pbxproj @@ -2555,7 +2555,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2581,7 +2581,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2616,7 +2616,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2640,7 +2640,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2665,7 +2665,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2776,7 +2776,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2794,7 +2794,7 @@ CODE_SIGN_ENTITLEMENTS = iSHFileProviderRelease.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2897,7 +2897,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2912,7 +2912,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2940,7 +2940,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2963,7 +2963,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2988,7 +2988,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 503; + CURRENT_PROJECT_VERSION = 505; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; diff --git a/jit/jit.c b/jit/jit.c index 9caed322e5..a493ccb5ea 100644 --- a/jit/jit.c +++ b/jit/jit.c @@ -39,7 +39,7 @@ void jit_free(struct jit *jit) { if (!jit) return; bool signal_pending = !!(current->pending & ~current->blocked); - while((critical_region_count(current) > 2) || (locks_held_count(current)) || (current->process_info_being_read) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight + while((task_reference_count(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); } @@ -84,7 +84,7 @@ void jit_invalidate_range(struct jit *jit, page_t start, page_t end) { } void jit_invalidate_page(struct jit *jit, page_t page) { - while(critical_region_count(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke + while(task_reference_count(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke nanosleep(&lock_pause, NULL); } // mofify_critical_region_count(current, 1, __FILE__, __LINE__); @@ -305,7 +305,7 @@ int cpu_run_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { unlock(&jit->lock); write_lock(&jit->jetsam_lock); lock(&jit->lock, 0); - while(critical_region_count(current) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke + while(task_reference_count(current) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke nanosleep(&lock_pause, NULL); // Yes, this has triggered at least once. Is it doing any good though? -mke } jit_free_jetsam(jit); diff --git a/kernel/calls.c b/kernel/calls.c index c51fd77467..064b7118da 100644 --- a/kernel/calls.c +++ b/kernel/calls.c @@ -340,7 +340,7 @@ void handle_syscall_interrupt(struct cpu_state *cpu) { return; } - STRACE("%d(%s) %d:%d call %-3d ", current->pid, current->comm, current->critical_region.count, current->locks_held.count, syscall_num); + STRACE("%d(%s) %d:%d call %-3d ", current->pid, current->comm, current->reference.count, current->locks_held.count, syscall_num); int result = syscall_table[syscall_num](cpu->ebx, cpu->ecx, cpu->edx, cpu->esi, cpu->edi, cpu->ebp); STRACE(" = 0x%x\n", result); cpu->eax = result; diff --git a/kernel/exit.c b/kernel/exit.c index 0494130ed1..8b0f6d2be8 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -17,7 +17,7 @@ extern const char extra_lock_comm; static void halt_system(void); static bool exit_tgroup(struct task *task) { - while((critical_region_count(task) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_reference_count(task) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } struct tgroup *group = task->group; @@ -65,9 +65,8 @@ noreturn void do_exit(int status) { bool signal_pending = !!(current->pending & ~current->blocked); // has to happen before mm_release - while((critical_region_count(current) > 1) || + while((task_reference_count(current) > 1) || (locks_held_count(current)) || - (current->process_info_being_read) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); @@ -83,17 +82,15 @@ noreturn void do_exit(int status) { do { nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); - } while((critical_region_count(current) > 1) || + } while((task_reference_count(current) > 1) || (locks_held_count(current)) || - (current->process_info_being_read) || (signal_pending)); // Wait for now, task is in one or more critical mm_release(current->mm); current->mm = NULL; signal_pending = !!(current->pending & ~current->blocked); - while((critical_region_count(current) > 1) || + while((task_reference_count(current) > 1) || (locks_held_count(current)) || - (current->process_info_being_read) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); @@ -101,9 +98,8 @@ noreturn void do_exit(int status) { fdtable_release(current->files); current->files = NULL; - while((critical_region_count(current) > 1) || + while((task_reference_count(current) > 1) || (locks_held_count(current)) || - (current->process_info_being_read) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); @@ -114,9 +110,8 @@ noreturn void do_exit(int status) { // sighand must be released below so it can be protected by pids_lock // since it can be accessed by other threads - while((critical_region_count(current) > 1) || + while((task_reference_count(current) > 1) || (locks_held_count(current)) || - (current->process_info_being_read) || (signal_pending)) { // Wait for now, task is in one or more critical// Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); @@ -130,13 +125,12 @@ noreturn void do_exit(int status) { unlock(¤t->group->lock); // the actual freeing needs pids_lock - modify_critical_region_count(current, 1, __FILE__, __LINE__); + task_ref_count(current, 1, __FILE__, __LINE__); complex_lockt(&pids_lock, 0, __FILE__, __LINE__); // release the sighand signal_pending = !!(current->pending & ~current->blocked); - while((critical_region_count(current) > 2) || + while((task_reference_count(current) > 2) || (locks_held_count(current)) || - (current->process_info_being_read) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); @@ -161,9 +155,8 @@ noreturn void do_exit(int status) { signal_pending = !!(current->pending & ~current->blocked); - while((critical_region_count(current) > 2) || + while((task_reference_count(current) > 2) || (locks_held_count(current)) || - (current->process_info_being_read) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); @@ -199,7 +192,7 @@ noreturn void do_exit(int status) { } vfork_notify(current); - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); if(current != leader) { task_destroy(current, 1); } else { @@ -233,7 +226,7 @@ noreturn void do_exit_group(int status) { modify_locks_held_count(current, tmpvar); // Reset to zero -mke } - modify_critical_region_count(current, 1, __FILE__, __LINE__); + task_ref_count(current, 1, __FILE__, __LINE__); list_for_each_entry(&group->threads, task, group_links) { task->exiting = true; deliver_signal(task, SIGKILL_, SIGINFO_NIL); @@ -242,7 +235,7 @@ noreturn void do_exit_group(int status) { } unlock(&pids_lock); - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); unlock(&group->lock); //if(current->pid <= MAX_PID) // abort if crazy. -mke do_exit(status); @@ -365,7 +358,7 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage return _EINVAL; complex_lockt(&pids_lock, 0, __FILE__, __LINE__); - modify_critical_region_count(current, 1, __FILE__, __LINE__); + task_ref_count(current, 1, __FILE__, __LINE__); int err; bool got_signal = false; @@ -425,12 +418,12 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage info->sig = SIGCHLD_; found_something: - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); unlock(&pids_lock); return 0; error: - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); unlock(&pids_lock); return err; } diff --git a/kernel/fork.c b/kernel/fork.c index c12a87ab60..e7dcdbbe59 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -127,7 +127,7 @@ static int copy_task(struct task *task, dword_t flags, addr_t stack, addr_t ptid return 0; fail_free_sighand: - while(critical_region_count(task)) { // Wait for now, task is in one or more critical sections + while(task_reference_count(task)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } sighand_release(task->sighand); diff --git a/kernel/futex.c b/kernel/futex.c index 3b3bc7fb72..8ad27f7264 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -301,10 +301,8 @@ dword_t sys_futex(addr_t uaddr, dword_t op, dword_t val, addr_t timeout_or_val2, switch (op & FUTEX_CMD_MASK_) { case FUTEX_WAIT_: STRACE("futex(FUTEX_WAIT, %#x, %d, 0x%x {%ds %dns}) = ...\n", uaddr, val, timeout_or_val2, timeout.tv_sec, timeout.tv_nsec); - //modify_critical_region_count(current, 1, __FILE__, __LINE__); dword_t return_val; return_val = futex_wait(uaddr, val, timeout_or_val2 ? &timeout : NULL); - //modify_critical_region_count(current, -1, __FILE__, __LINE__); return return_val; case FUTEX_WAKE_: STRACE("futex(FUTEX_WAKE, %#x, %d)", uaddr, val); diff --git a/kernel/log.c b/kernel/log.c index 0c7a0fdffe..15cac4888e 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -230,56 +230,56 @@ void die(const char *msg, ...) { // fun little utility function int current_pid(void) { - modify_critical_region_count(current, 1, __FILE__, __LINE__); + task_ref_count(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return current->pid; } else { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return -1; } } - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return -1; } int current_uid(void) { - modify_critical_region_count(current, 1, __FILE__, __LINE__); + task_ref_count(current, 1, __FILE__, __LINE__); if(current != NULL) { if (current->exiting != true) { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return current->uid; } else { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return -1; } } - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return -1; } char * current_comm(void) { - modify_critical_region_count(current, 1, __FILE__, __LINE__); static char comm[16]; + task_ref_count(current, 1, __FILE__, __LINE__); if(current != NULL) { if(strcmp(current->comm, "")) { strncpy(comm, current->comm, 16); } else { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return ""; } if (current->exiting != true) { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return comm; } else { - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return ""; } } - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); return ""; } diff --git a/kernel/mmap.c b/kernel/mmap.c index ee7b7ae448..227a577a25 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -45,12 +45,13 @@ void mm_release(struct mm *mm) { if (--mm->refcount == 0) { if (mm->exefile != NULL) fd_close(mm->exefile); - while((critical_region_count(current) > 1) || (current->process_info_being_read)) { // Wait for now, task is in one or more critical sections + while(task_reference_count(current)) { // FIXME: Should be locking current->reference.lock and updating + // current->reference.count before mem_destroy nanosleep(&lock_pause, NULL); } mem_destroy(&mm->mem); - while((critical_region_count(current) > 1) || (current->process_info_being_read)) { // Wait for now, task is in one or more critical sections + while(task_reference_count(current)) { //FIXME: Should now unlock after mem_destroy nanosleep(&lock_pause, NULL); } free(mm); @@ -181,7 +182,7 @@ int_t sys_mremap(addr_t addr, dword_t old_len, dword_t new_len, dword_t flags) { // shrinking always works if (new_pages <= old_pages) { - while(critical_region_count(current)) { + while(task_reference_count(current)) { nanosleep(&lock_pause, NULL); } int err = pt_unmap(current->mem, PAGE(addr) + new_pages, old_pages - new_pages); diff --git a/kernel/poll.c b/kernel/poll.c index ba01f70b2a..ac8e232c20 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -199,7 +199,7 @@ dword_t sys_poll(addr_t fds, dword_t nfds, int_t timeout) { TASK_MAY_BLOCK { res = poll_wait(poll, poll_event_callback, &context, timeout < 0 ? NULL : &timeout_ts); } - while(critical_region_count(current)) { // Wait for now, task is in one or more critical sections + while(task_reference_count(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } poll_destroy(poll); diff --git a/kernel/resource_locking.h b/kernel/resource_locking.h index eeab0e3a6a..7d1e5ef62a 100644 --- a/kernel/resource_locking.h +++ b/kernel/resource_locking.h @@ -1,8 +1,8 @@ //#include "util/sync.h" // Because sometimes we can't #include "kernel/task.h" -mke -extern unsigned critical_region_count(struct task*); -extern void modify_critical_region_count(struct task*, int, char*, int); +extern unsigned task_reference_count(struct task*); +extern void task_ref_count(struct task*, int, char*, int); extern unsigned locks_held_count(struct task*); extern void modify_locks_held_count(struct task*, int); extern bool current_is_valid(void); diff --git a/kernel/signal.c b/kernel/signal.c index 652538157b..ee5468c81d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -466,7 +466,7 @@ struct sighand *sighand_copy(struct sighand *sighand) { } void sighand_release(struct sighand *sighand) { - while(critical_region_count(current) > 2) { // Wait for now, task is in one or more critical sections + while(task_reference_count(current) > 2) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } if (--sighand->refcount == 0) { @@ -739,7 +739,7 @@ static int kill_group(pid_t_ pgid, dword_t sig) { } struct tgroup *tgroup; int err = _EPERM; - while((critical_region_count(current)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_reference_count(current)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } list_for_each_entry(&pid->pgroup, tgroup, pgroup) { diff --git a/kernel/task.c b/kernel/task.c index eb5a9d4a22..b39715cab4 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -69,7 +69,7 @@ struct pid *pid_get_last_allocated(void) { } dword_t get_count_of_blocked_tasks(void) { - modify_critical_region_count(current, 1, __FILE__, __LINE__); + task_ref_count(current, 1, __FILE__, __LINE__); dword_t res = 0; struct pid *pid_entry; complex_lockt(&pids_lock, 0, __FILE__, __LINE__); @@ -78,7 +78,7 @@ dword_t get_count_of_blocked_tasks(void) { res++; } } - modify_critical_region_count(current, -1, __FILE__, __LINE__); + task_ref_count(current, -1, __FILE__, __LINE__); unlock(&pids_lock); return res; } @@ -86,7 +86,7 @@ dword_t get_count_of_blocked_tasks(void) { void zero_critical_regions_count(void) { // If doEnableExtraLocking is changed to false, we need to zero out critical_region.count for active processes struct pid *pid_entry; list_for_each_entry(&alive_pids_list, pid_entry, alive) { - pid_entry->task->critical_region.count = 0; // Bad things happen if this isn't done. -mke + pid_entry->task->reference.count = 0; // Bad things happen if this isn't done. -mke } } @@ -153,14 +153,15 @@ struct task *task_create_(struct task *parent) { cond_init(&task->ptrace.cond); task->locks_held.count = 0; // counter used to keep track of pending locks associated with task. Do not delete when locks are present. -mke - task->critical_region.count = 0; // counter used to delay task deletion if positive. --mke + task->reference.count = 0; // counter used to delay task deletion if positive. --mke + task->reference.ready_to_be_freed = false; return task; } // We consolidate the check for whether the task is in a critical section, // holds locks, or has pending signals into a single function. bool should_wait(struct task *t) { - return critical_region_count(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); + return task_reference_count(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); } void task_destroy(struct task *task, int caller) { @@ -199,7 +200,7 @@ void task_destroy(struct task *task, int caller) { retry: // Free the task's resources. - if (!critical_region_count(task)) { + if (!task_reference_count(task)) { free(task); } else { goto retry; @@ -256,8 +257,6 @@ static void *task_thread(void *task) { current = task; - current->critical_region.count = 0; // Is this needed? -mke - update_thread_name(); task_run_current(); diff --git a/kernel/task.h b/kernel/task.h index 1c1821be65..f33961768a 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -19,14 +19,13 @@ struct task { pthread_t thread; uint64_t threadid; - bool process_info_being_read; // Set when something like ps, top, etc wants to access task info. -mke - pthread_mutex_t death_lock; // Set when process is about to be reaped. Immediately cease all activity on this task. -mke struct { pthread_mutex_t lock; - int count; // If positive, don't delete yet, wait_to_delete -mke - } critical_region; + int count; // If positive, don't delete yet, wait_to_delete + bool ready_to_be_freed; // Should be false initially + } reference; struct { pthread_mutex_t lock; @@ -226,14 +225,14 @@ void update_thread_name(void); // of functions which can block the task, we mark our task as blocked and // unblock it after the function is executed. __attribute__((always_inline)) inline int task_may_block_start(void) { - modify_critical_region_count_wrapper(1, __FILE__, __LINE__); + task_ref_count_wrapper(1, __FILE__, __LINE__); current->io_block = 1; return 0; } __attribute__((always_inline)) inline int task_may_block_end(void) { current->io_block = 0; - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); return 0; } diff --git a/util/sync.c b/util/sync.c index 8d3452e4d2..d024ce2e49 100644 --- a/util/sync.c +++ b/util/sync.c @@ -33,9 +33,8 @@ static bool is_signal_pending(lock_t *lock) { return pending; } -void modify_critical_region_count(struct task *task, int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // value Should only be -1 or 1. -mke - // return; - +void task_ref_count(struct task *task, int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // value Should only be -1 or 1. -mke + // Keep track of how many threads are referencing this task if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke return; } @@ -54,31 +53,31 @@ void modify_critical_region_count(struct task *task, int value, __attribute__((u ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. } - pthread_mutex_lock(&task->critical_region.lock); + pthread_mutex_lock(&task->reference.lock); - if(((task->critical_region.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke - printk("ERROR: Attempt to decrement critical_region count to be negative, ignoring(%s:%d) (%d - %d) (%s:%d)\n", task->comm, task->pid, task->critical_region.count, value, file, line); + if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke + printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d) (%s:%d)\n", task->comm, task->pid, task->reference.count, value, file, line); if(ilocked == true) unlock(&task->general_lock); - pthread_mutex_unlock(&task->critical_region.lock); + pthread_mutex_unlock(&task->reference.lock); return; } - task->critical_region.count = task->critical_region.count + value; + task->reference.count = task->reference.count + value; - pthread_mutex_unlock(&task->critical_region.lock); + pthread_mutex_unlock(&task->reference.lock); if(ilocked == true) unlock(&task->general_lock); } -void modify_critical_region_count_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { +void task_ref_count_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // sync.h can't know about the definition of task struct due to recursive include files. -mke if((current != NULL) && (doEnableExtraLocking)) - modify_critical_region_count(current, value, file, line); + task_ref_count(current, value, file, line); return; } @@ -202,21 +201,19 @@ void sigusr1_handler(void) { } // Because sometimes we can't #include "kernel/task.h" -mke -unsigned critical_region_count(struct task *task) { - // return 0; +unsigned task_reference_count(struct task *task) { unsigned tmp = 0; - pthread_mutex_lock(&task->critical_region.lock); // This would make more - tmp = task->critical_region.count; - //printk("%d:%d\n", task, tmp); - if(tmp > 1000) // Not likely + pthread_mutex_lock(&task->reference.lock); // This would make more + tmp = task->reference.count; + if(tmp > 1000) // Work around brain damage. Remove when said brain damage is fixed tmp = 0; - pthread_mutex_unlock(&task->critical_region.lock); + pthread_mutex_unlock(&task->reference.lock); return tmp; } -unsigned critical_region_count_wrapper(void) { // sync.h can't know about the definition of struct due to recursive include files. -mke - return(critical_region_count(current)); +unsigned task_reference_count_wrapper(void) { // sync.h can't know about the definition of struct due to recursive include files. -mke + return(task_reference_count(current)); } bool current_is_valid(void) { diff --git a/util/sync.h b/util/sync.h index 03403fd3e9..180a36c8e0 100644 --- a/util/sync.h +++ b/util/sync.h @@ -19,8 +19,8 @@ extern int current_pid(void); extern int current_uid(void); extern char* current_comm(void); -extern unsigned critical_region_count_wrapper(void); -extern void modify_critical_region_count_wrapper(int, const char*, int); +extern unsigned task_reference_count_wrapper(void); +extern void task_ref_count_wrapper(int, const char*, int); extern unsigned locks_held_count_wrapper(void); extern void modify_locks_held_count_wrapper(int); extern struct pid *pid_get(dword_t id); @@ -194,14 +194,14 @@ static inline void __lock(lock_t *lock, int log_lock, __attribute__((unused)) co unlock(lock); if(!log_lock) { - modify_critical_region_count_wrapper(1,__FILE__, __LINE__); + task_ref_count_wrapper(1,__FILE__, __LINE__); pthread_mutex_lock(&lock->m); modify_locks_held_count_wrapper(1); lock->owner = pthread_self(); lock->pid = current_pid(); lock->uid = current_uid(); strncpy(lock->comm, current_comm(), 16); - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); } else { pthread_mutex_lock(&lock->m); lock->owner = pthread_self(); @@ -256,7 +256,7 @@ static inline void handle_lock_error(wrlock_t *lock, const char *file, int line, } static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, int is_write) { - modify_critical_region_count_wrapper(1, __FILE__, __LINE__); + task_ref_count_wrapper(1, __FILE__, __LINE__); modify_locks_held_count_wrapper(1); unsigned count = 0; @@ -276,7 +276,7 @@ static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0, __FILE__, __LINE__); } - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); } #define loop_lock_read(lock, file, line) loop_lock_generic(lock, file, line, 0) @@ -474,12 +474,12 @@ static inline void wrlock_init(wrlock_t *lock) { } static inline void _lock_destroy(wrlock_t *lock) { - while((critical_region_count_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + while((task_reference_count_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } #ifdef JUSTLOG if (pthread_rwlock_destroy(&lock->l) != 0) { - printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),critical_region_count_wrapper()); + printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),task_reference_count_wrapper()); } #else if (pthread_rwlock_destroy(&lock->l) != 0) __builtin_trap(); @@ -487,7 +487,7 @@ static inline void _lock_destroy(wrlock_t *lock) { } static inline void lock_destroy(wrlock_t *lock) { - while((critical_region_count_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + while((task_reference_count_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } @@ -498,7 +498,7 @@ static inline void lock_destroy(wrlock_t *lock) { static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { loop_lock_read(lock, file, line); - modify_critical_region_count_wrapper(1, __FILE__, __LINE__); + task_ref_count_wrapper(1, __FILE__, __LINE__); //pthread_rwlock_rdlock(&lock->l); // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke if(lock->val) { @@ -513,7 +513,7 @@ static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char if(lock->val > 1000) { // We likely have a problem. printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); read_unlock_and_destroy(lock); - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); return; } @@ -521,7 +521,7 @@ static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char lock->pid = current_pid(); if(lock->pid > 9) strncpy((char *)lock->comm, current_comm(), 16); - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -534,30 +534,30 @@ static inline void read_lock(wrlock_t *lock, __attribute__((unused)) const char #define write_lock(lock) _write_lock(lock, __FILE__, __LINE__) static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke - modify_critical_region_count_wrapper(1, __FILE__, __LINE__); + task_ref_count_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("rtw_lock\0", 0, __FILE__, __LINE__); _read_unlock(lock, __FILE__, __LINE__); __write_lock(lock, __FILE__, __LINE__); atomic_l_unlockf(); - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); } static inline void write_to_read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Try to atomically swap a Write lock to a RO lock. -mke - modify_critical_region_count_wrapper(1, __FILE__, __LINE__); + task_ref_count_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("wtr_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, file, line); _read_lock(lock, file, line); atomic_l_unlockf(); - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); } static inline void write_unlock_and_destroy(wrlock_t *lock) { - modify_critical_region_count_wrapper(1, __FILE__, __LINE__); + task_ref_count_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("wuad_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, __FILE__, __LINE__); _lock_destroy(lock); atomic_l_unlockf(); - modify_critical_region_count_wrapper(-1, __FILE__, __LINE__); + task_ref_count_wrapper(-1, __FILE__, __LINE__); } static inline void read_unlock_and_destroy(wrlock_t *lock) { From 3dc764a770056862ad445793242fea8f08cfb39e Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Wed, 29 Nov 2023 13:36:11 -0800 Subject: [PATCH 08/23] o Very much a WIP, and very broken. --- app/UserPreferences.m | 6 ++-- app/iOSFS.m | 4 +-- emu/memory.c | 24 +++++-------- fs/poll.c | 6 ++-- fs/proc/root.c | 6 ++-- iSH-AOK.xcodeproj/project.pbxproj | 16 +++++++++ jit/jit.c | 6 ++-- kernel/exit.c | 30 ++++++++-------- kernel/fork.c | 2 +- kernel/log.c | 26 +++++++------- kernel/mmap.c | 6 ++-- kernel/poll.c | 2 +- kernel/resource_locking.h | 8 +++-- kernel/signal.c | 4 +-- kernel/task.c | 15 +++----- kernel/task.h | 6 ++-- util/ro_locks.c | 8 +++++ util/ro_locks.h | 44 ++++++++++++++++++++++++ util/rw_locks.h | 42 +++++++++++++++++++++++ util/sync.c | 57 +++++++++++++++++++++++++++---- util/sync.h | 34 +++++++++--------- 21 files changed, 247 insertions(+), 105 deletions(-) create mode 100644 util/ro_locks.c create mode 100644 util/ro_locks.h create mode 100644 util/rw_locks.h diff --git a/app/UserPreferences.m b/app/UserPreferences.m index ff4a55b033..9b38f7e2d3 100644 --- a/app/UserPreferences.m +++ b/app/UserPreferences.m @@ -455,9 +455,9 @@ - (void)setShouldEnableExtraLocking:(BOOL)dim { - (BOOL)validateShouldEnableExtraLocking:(id *)value error:(NSError **)error { // Should set task->critical_region.count to 0 for all active processes when this is set to false. Otherwise stuff blows up. -mke if(doEnableExtraLocking == true) { // This needs to be the opposite of what you would expect because of reasons. -mke - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); - zero_critical_regions_count(); - unlock(&pids_lock); +// complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + // zero_critical_regions_count(); + // unlock(&pids_lock); } return [*value isKindOfClass:NSNumber.class]; } diff --git a/app/iOSFS.m b/app/iOSFS.m index ae44304848..bc819204df 100644 --- a/app/iOSFS.m +++ b/app/iOSFS.m @@ -239,7 +239,7 @@ static int combine_error(NSError *coordinatorError, int err) { __block NSError *error = nil; __block struct fd *fd; __block dispatch_semaphore_t file_opened = dispatch_semaphore_create(0); - task_ref_count_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void){ void (^operation)(NSURL *url) = ^(NSURL *url) { fd = realfs_open(mount, path_for_url_in_mount(mount, url, path), flags, mode); @@ -265,7 +265,7 @@ static int combine_error(NSError *coordinatorError, int err) { } [coordinator coordinateReadingItemAtURL:url options:options error:&error byAccessor:operation]; }); - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); dispatch_semaphore_wait(file_opened, DISPATCH_TIME_FOREVER); diff --git a/emu/memory.c b/emu/memory.c index a777a49a19..b0884454ee 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -45,7 +45,7 @@ void mem_init(struct mem *mem) { void mem_destroy(struct mem *mem) { write_lock(&mem->lock); - while((task_reference_count(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks + while((mem_ref_cnt_val(mem)) && (current->pid > 1) ){ nanosleep(&lock_pause, NULL); } pt_unmap_always(mem, 0, MEM_PAGES); @@ -55,22 +55,16 @@ void mem_destroy(struct mem *mem) { } jit_free(mem->mmu.jit); #endif - int count = 0; for (int i = 0; i < MEM_PGDIR_SIZE; i++) { do { - count++; nanosleep(&lock_pause, NULL); - } while((task_reference_count(current)) && (current->pid > 1) && (count < 5000000)); // Wait for now, task is in one or more critical sections + } while(mem_ref_cnt_val(mem)); if (mem->pgdir[i] != NULL) free(mem->pgdir[i]); } - do { - nanosleep(&lock_pause, NULL); - } while((task_reference_count(current) > 1) && (current->pid > 1) ); // Wait for now, task is in one or more critical sections - free(mem->pgdir); mem->pgdir = NULL; //mkemkemke Trying something here @@ -115,7 +109,7 @@ struct pt_entry *mem_pt(struct mem *mem, page_t page) { static void mem_pt_del(struct mem *mem, page_t page) { struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { - while(task_reference_count(current) > 4) { // mark + while(task_ref_cnt_val(current) > 4) { // mark nanosleep(&lock_pause, NULL); } entry->data = NULL; @@ -196,7 +190,7 @@ int pt_unmap(struct mem *mem, page_t start, pages_t pages) { int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { for (page_t page = start; page < start + pages; mem_next_page(mem, &page)) { - while(task_reference_count(current) >3) { + while(task_ref_cnt_val(current) >3) { nanosleep(&lock_pause, NULL); } struct pt_entry *pt = mem_pt(mem, page); @@ -210,7 +204,7 @@ int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { if (--data->refcount == 0) { // vdso wasn't allocated with mmap, it's just in our data segment if (data->data != vdso_data) { - while(task_reference_count(current) > 3) { + while(task_ref_cnt_val(current) > 3) { nanosleep(&lock_pause, NULL); } int err = munmap(data->data, data->size); @@ -258,7 +252,7 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) { } int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) { - while(task_reference_count(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_val(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } for (page_t page = start; page < start + pages; mem_next_page(src, &page)) { @@ -275,7 +269,7 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page dst_entry->offset = entry->offset; dst_entry->flags = entry->flags; } - while(task_reference_count(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_val(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } mem_changed(src); @@ -349,10 +343,10 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { void *data = (char *) entry->data->data + entry->offset; // copy/paste from above - task_ref_count(current, 1,__FILE__, __LINE__); + mem_ref_cnt_mod(mem, 1,__FILE__, __LINE__); //read_to_write_lock(&mem->lock); memcpy(copy, data, PAGE_SIZE); //mkemkemke Crashes here a lot when running both the go and parallel make test. 01 June 2022 - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW); unlock(¤t->general_lock); write_to_read_lock(&mem->lock, __FILE__, __LINE__); diff --git a/fs/poll.c b/fs/poll.c index 473c5990cb..31a0c9be49 100644 --- a/fs/poll.c +++ b/fs/poll.c @@ -331,7 +331,7 @@ void poll_destroy(struct poll *poll) { struct poll_fd *poll_fd; struct poll_fd *tmp; - while(task_reference_count(current)) { + while(task_ref_cnt_val(current)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->poll_fds, poll_fd, tmp, fds) { @@ -342,12 +342,12 @@ void poll_destroy(struct poll *poll) { free(poll_fd); } - while(task_reference_count(current)) { + while(task_ref_cnt_val(current)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->pollfd_freelist, poll_fd, tmp, fds) { - while(task_reference_count(current)) { + while(task_ref_cnt_val(current)) { nanosleep(&lock_pause, NULL); } list_remove(&poll_fd->fds); diff --git a/fs/proc/root.c b/fs/proc/root.c index 25a8b12762..da527df118 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -328,19 +328,19 @@ static bool proc_root_readdir(struct proc_entry *UNUSED(entry), unsigned long *i pid_t_ pid = *index - PROC_ROOT_LEN; if (pid <= MAX_PID) { - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); //lock(&pids_lock, 0); do { pid++; } while (pid <= MAX_PID && pid_get_task(pid) == NULL); //unlock(&pids_lock); if (pid > MAX_PID) { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); return false; } *next_entry = (struct proc_entry) {&proc_pid, .pid = pid}; *index = pid + PROC_ROOT_LEN; - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); return true; } diff --git a/iSH-AOK.xcodeproj/project.pbxproj b/iSH-AOK.xcodeproj/project.pbxproj index 7d46e3072f..c977926370 100644 --- a/iSH-AOK.xcodeproj/project.pbxproj +++ b/iSH-AOK.xcodeproj/project.pbxproj @@ -104,6 +104,11 @@ 497F6D3D254E5EA600C82F46 /* main.c in Sources */ = {isa = PBXBuildFile; fileRef = BB7D93822087C2890008DA78 /* main.c */; }; 497F6D5C254E609700C82F46 /* main.c in Sources */ = {isa = PBXBuildFile; fileRef = BB7D93822087C2890008DA78 /* main.c */; }; 497F6D87254E62E100C82F46 /* libish.a in Frameworks */ = {isa = PBXBuildFile; fileRef = BB13F7DC200AD81D003D1C4D /* libish.a */; }; + 5D59DADC2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; + 5D59DADD2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; + 5D59DADE2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; + 5D59DADF2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; + 5D59DAE02B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; 5D8ACEFA284BF122003C50D3 /* net.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D8ACEF9284BF122003C50D3 /* net.c */; }; 5D8ACEFD284CE096003C50D3 /* sys.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D8ACEFC284CE096003C50D3 /* sys.c */; }; 5D8ACEFE284CE096003C50D3 /* sys.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D8ACEFC284CE096003C50D3 /* sys.c */; }; @@ -587,6 +592,9 @@ 497F6CE4254E5E4C00C82F46 /* MakeXcodeAutoCompleteWork */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = MakeXcodeAutoCompleteWork; sourceTree = BUILT_PRODUCTS_DIR; }; 497F6D47254E605F00C82F46 /* ish-AOK */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; name = "ish-AOK"; path = ish; sourceTree = BUILT_PRODUCTS_DIR; }; 5D272493277C20DF0005F2A8 /* iSHFileProviderRelease.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = iSHFileProviderRelease.entitlements; sourceTree = ""; }; + 5D59DAD92B17996100FA995C /* ro_locks.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ro_locks.h; sourceTree = ""; }; + 5D59DADA2B17999500FA995C /* rw_locks.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rw_locks.h; sourceTree = ""; }; + 5D59DADB2B179A2300FA995C /* ro_locks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = ro_locks.c; sourceTree = ""; }; 5D8ACEF9284BF122003C50D3 /* net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = net.c; sourceTree = ""; }; 5D8ACEFB284CE096003C50D3 /* sys.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sys.h; sourceTree = ""; }; 5D8ACEFC284CE096003C50D3 /* sys.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sys.c; sourceTree = ""; }; @@ -1126,6 +1134,9 @@ 497F6CCC254E5CC800C82F46 /* sync.h */, 497F6CCD254E5CC800C82F46 /* timer.c */, 497F6CD1254E5CC800C82F46 /* timer.h */, + 5D59DAD92B17996100FA995C /* ro_locks.h */, + 5D59DADA2B17999500FA995C /* rw_locks.h */, + 5D59DADB2B179A2300FA995C /* ro_locks.c */, ); path = util; sourceTree = ""; @@ -2121,6 +2132,7 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 5D59DADE2B179A2300FA995C /* ro_locks.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2135,6 +2147,7 @@ 5D8ACEFD284CE096003C50D3 /* sys.c in Sources */, BB28C7BA268975AA00BDC834 /* iOSFS.m in Sources */, BB28C7BC268975B000BDC834 /* PasteboardDevice.m in Sources */, + 5D59DADC2B179A2300FA995C /* ro_locks.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2156,6 +2169,7 @@ BBBFE94921C5CFF100509DD5 /* NSError+ISHErrno.m in Sources */, BB9C7B87240A2B1E00F5D4F0 /* AppGroup.m in Sources */, 5D8CFA872B1198B300D50E57 /* RTCDevice.m in Sources */, + 5D59DAE02B179A2300FA995C /* ro_locks.c in Sources */, BB88F4942154760800A341FD /* FileProviderExtension.m in Sources */, BB88F4972154760800A341FD /* FileProviderItem.m in Sources */, ); @@ -2169,6 +2183,7 @@ BBBF9CBD27C217B6002A30F7 /* PasteboardDeviceLinux.c in Sources */, BB8C3AFF26B7B8CF00E38DDC /* fakefs.c in Sources */, BBEEA9E8277D25090069495B /* LinuxRoot.c in Sources */, + 5D59DADF2B179A2300FA995C /* ro_locks.c in Sources */, BB123ACC26C9EFD900419CDA /* LinuxTTY.c in Sources */, BBEEA9EA277DAB400069495B /* LinuxPTY.c in Sources */, ); @@ -2199,6 +2214,7 @@ BB28C79B26896B1F00BDC834 /* FontPickerViewController.m in Sources */, BBC3863E276817A900CC8C2E /* UpgradeRootViewController.m in Sources */, BB28C79D26896B1F00BDC834 /* SceneDelegate.m in Sources */, + 5D59DADD2B179A2300FA995C /* ro_locks.c in Sources */, BB28C79E26896B1F00BDC834 /* AboutExternalKeyboardViewController.m in Sources */, BB28C79F26896B1F00BDC834 /* PassthroughView.m in Sources */, BB28C7A026896B1F00BDC834 /* UserPreferences.m in Sources */, diff --git a/jit/jit.c b/jit/jit.c index a493ccb5ea..8d0bb7a5e7 100644 --- a/jit/jit.c +++ b/jit/jit.c @@ -39,7 +39,7 @@ void jit_free(struct jit *jit) { if (!jit) return; bool signal_pending = !!(current->pending & ~current->blocked); - while((task_reference_count(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight + while((task_ref_cnt_val(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); } @@ -84,7 +84,7 @@ void jit_invalidate_range(struct jit *jit, page_t start, page_t end) { } void jit_invalidate_page(struct jit *jit, page_t page) { - while(task_reference_count(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke + while(task_ref_cnt_val(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke nanosleep(&lock_pause, NULL); } // mofify_critical_region_count(current, 1, __FILE__, __LINE__); @@ -305,7 +305,7 @@ int cpu_run_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { unlock(&jit->lock); write_lock(&jit->jetsam_lock); lock(&jit->lock, 0); - while(task_reference_count(current) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke + while(task_ref_cnt_mod(current) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke nanosleep(&lock_pause, NULL); // Yes, this has triggered at least once. Is it doing any good though? -mke } jit_free_jetsam(jit); diff --git a/kernel/exit.c b/kernel/exit.c index 8b0f6d2be8..fae1fa349b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -17,7 +17,7 @@ extern const char extra_lock_comm; static void halt_system(void); static bool exit_tgroup(struct task *task) { - while((task_reference_count(task) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_val(task) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } struct tgroup *group = task->group; @@ -65,7 +65,7 @@ noreturn void do_exit(int status) { bool signal_pending = !!(current->pending & ~current->blocked); // has to happen before mm_release - while((task_reference_count(current) > 1) || + while((task_ref_cnt_val(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -82,14 +82,14 @@ noreturn void do_exit(int status) { do { nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); - } while((task_reference_count(current) > 1) || + } while((task_ref_cnt_val(current) > 1) || (locks_held_count(current)) || (signal_pending)); // Wait for now, task is in one or more critical mm_release(current->mm); current->mm = NULL; signal_pending = !!(current->pending & ~current->blocked); - while((task_reference_count(current) > 1) || + while((task_ref_cnt_val(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -98,7 +98,7 @@ noreturn void do_exit(int status) { fdtable_release(current->files); current->files = NULL; - while((task_reference_count(current) > 1) || + while((task_ref_cnt_val(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -110,7 +110,7 @@ noreturn void do_exit(int status) { // sighand must be released below so it can be protected by pids_lock // since it can be accessed by other threads - while((task_reference_count(current) > 1) || + while((task_ref_cnt_val(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical// Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -125,11 +125,11 @@ noreturn void do_exit(int status) { unlock(¤t->group->lock); // the actual freeing needs pids_lock - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); complex_lockt(&pids_lock, 0, __FILE__, __LINE__); // release the sighand signal_pending = !!(current->pending & ~current->blocked); - while((task_reference_count(current) > 2) || + while((task_ref_cnt_val(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -155,7 +155,7 @@ noreturn void do_exit(int status) { signal_pending = !!(current->pending & ~current->blocked); - while((task_reference_count(current) > 2) || + while((task_ref_cnt_val(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -192,7 +192,7 @@ noreturn void do_exit(int status) { } vfork_notify(current); - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); if(current != leader) { task_destroy(current, 1); } else { @@ -226,7 +226,7 @@ noreturn void do_exit_group(int status) { modify_locks_held_count(current, tmpvar); // Reset to zero -mke } - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); list_for_each_entry(&group->threads, task, group_links) { task->exiting = true; deliver_signal(task, SIGKILL_, SIGINFO_NIL); @@ -235,7 +235,7 @@ noreturn void do_exit_group(int status) { } unlock(&pids_lock); - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1) unlock(&group->lock); //if(current->pid <= MAX_PID) // abort if crazy. -mke do_exit(status); @@ -358,7 +358,7 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage return _EINVAL; complex_lockt(&pids_lock, 0, __FILE__, __LINE__); - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); int err; bool got_signal = false; @@ -418,12 +418,12 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage info->sig = SIGCHLD_; found_something: - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); unlock(&pids_lock); return 0; error: - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current); unlock(&pids_lock); return err; } diff --git a/kernel/fork.c b/kernel/fork.c index e7dcdbbe59..3cec83a199 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -127,7 +127,7 @@ static int copy_task(struct task *task, dword_t flags, addr_t stack, addr_t ptid return 0; fail_free_sighand: - while(task_reference_count(task)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_val(task)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } sighand_release(task->sighand); diff --git a/kernel/log.c b/kernel/log.c index 15cac4888e..8718b03966 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -230,56 +230,56 @@ void die(const char *msg, ...) { // fun little utility function int current_pid(void) { - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); if(current != NULL) { if (current->exiting != true) { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1) return current->pid; } else { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); return -1; } } - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); return -1; } int current_uid(void) { - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); if(current != NULL) { if (current->exiting != true) { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current); return current->uid; } else { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current); return -1; } } - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); return -1; } char * current_comm(void) { static char comm[16]; - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); if(current != NULL) { if(strcmp(current->comm, "")) { strncpy(comm, current->comm, 16); } else { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current); return ""; } if (current->exiting != true) { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current); return comm; } else { - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current); return ""; } } - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); return ""; } diff --git a/kernel/mmap.c b/kernel/mmap.c index 227a577a25..9278df8e53 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -45,13 +45,13 @@ void mm_release(struct mm *mm) { if (--mm->refcount == 0) { if (mm->exefile != NULL) fd_close(mm->exefile); - while(task_reference_count(current)) { // FIXME: Should be locking current->reference.lock and updating + while(task_ref_cnt_val(current)) { // FIXME: Should be locking current->reference.lock and updating // current->reference.count before mem_destroy nanosleep(&lock_pause, NULL); } mem_destroy(&mm->mem); - while(task_reference_count(current)) { //FIXME: Should now unlock after mem_destroy + while(task_ref_cnt_val(current)) { //FIXME: Should now unlock after mem_destroy nanosleep(&lock_pause, NULL); } free(mm); @@ -182,7 +182,7 @@ int_t sys_mremap(addr_t addr, dword_t old_len, dword_t new_len, dword_t flags) { // shrinking always works if (new_pages <= old_pages) { - while(task_reference_count(current)) { + while(task_ref_cnt_val(current)) { nanosleep(&lock_pause, NULL); } int err = pt_unmap(current->mem, PAGE(addr) + new_pages, old_pages - new_pages); diff --git a/kernel/poll.c b/kernel/poll.c index ac8e232c20..ef58b5f89b 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -199,7 +199,7 @@ dword_t sys_poll(addr_t fds, dword_t nfds, int_t timeout) { TASK_MAY_BLOCK { res = poll_wait(poll, poll_event_callback, &context, timeout < 0 ? NULL : &timeout_ts); } - while(task_reference_count(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_val(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } poll_destroy(poll); diff --git a/kernel/resource_locking.h b/kernel/resource_locking.h index 7d1e5ef62a..6f2dfcfe32 100644 --- a/kernel/resource_locking.h +++ b/kernel/resource_locking.h @@ -1,8 +1,12 @@ //#include "util/sync.h" // Because sometimes we can't #include "kernel/task.h" -mke -extern unsigned task_reference_count(struct task*); -extern void task_ref_count(struct task*, int, char*, int); +extern void task_ref_cnt_mod(struct task *task, int value); +extern void task_ref_cnt_mod_wrapper(int, const char*, int); + +extern int task_ref_cnt_val(struct task *task); +extern void mem_ref_cnt_mod(struct mem*, int, char*, int); +extern int mem_ref_cnt_val(struct mem *mem); extern unsigned locks_held_count(struct task*); extern void modify_locks_held_count(struct task*, int); extern bool current_is_valid(void); diff --git a/kernel/signal.c b/kernel/signal.c index ee5468c81d..f09d44aa5e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -466,7 +466,7 @@ struct sighand *sighand_copy(struct sighand *sighand) { } void sighand_release(struct sighand *sighand) { - while(task_reference_count(current) > 2) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_val(current) > 2) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } if (--sighand->refcount == 0) { @@ -739,7 +739,7 @@ static int kill_group(pid_t_ pgid, dword_t sig) { } struct tgroup *tgroup; int err = _EPERM; - while((task_reference_count(current)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_val(current)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } list_for_each_entry(&pid->pgroup, tgroup, pgroup) { diff --git a/kernel/task.c b/kernel/task.c index b39715cab4..3779c374e7 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -69,7 +69,7 @@ struct pid *pid_get_last_allocated(void) { } dword_t get_count_of_blocked_tasks(void) { - task_ref_count(current, 1, __FILE__, __LINE__); + task_ref_cnt_mod(current, 1); dword_t res = 0; struct pid *pid_entry; complex_lockt(&pids_lock, 0, __FILE__, __LINE__); @@ -78,18 +78,11 @@ dword_t get_count_of_blocked_tasks(void) { res++; } } - task_ref_count(current, -1, __FILE__, __LINE__); + task_ref_cnt_mod(current, -1); unlock(&pids_lock); return res; } -void zero_critical_regions_count(void) { // If doEnableExtraLocking is changed to false, we need to zero out critical_region.count for active processes - struct pid *pid_entry; - list_for_each_entry(&alive_pids_list, pid_entry, alive) { - pid_entry->task->reference.count = 0; // Bad things happen if this isn't done. -mke - } -} - dword_t get_count_of_alive_tasks(void) { complex_lockt(&pids_lock, 0, __FILE__, __LINE__); dword_t res = 0; @@ -161,7 +154,7 @@ struct task *task_create_(struct task *parent) { // We consolidate the check for whether the task is in a critical section, // holds locks, or has pending signals into a single function. bool should_wait(struct task *t) { - return task_reference_count(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); + return task_ref_cnt_val(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); } void task_destroy(struct task *task, int caller) { @@ -200,7 +193,7 @@ void task_destroy(struct task *task, int caller) { retry: // Free the task's resources. - if (!task_reference_count(task)) { + if (!task_ref_cnt_val(task)) { free(task); } else { goto retry; diff --git a/kernel/task.h b/kernel/task.h index f33961768a..09fe4e8fde 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -11,6 +11,7 @@ #include "util/list.h" #include "util/timer.h" #include "util/sync.h" +#include "kernel/resource_locking.h" struct task { struct cpu_state cpu; @@ -204,7 +205,6 @@ struct task *pid_get_task_zombie(dword_t id); // don't return null if the task e dword_t get_count_of_blocked_tasks(void); dword_t get_count_of_alive_tasks(void); -void zero_critical_regions_count(void); #define MAX_PID (1 << 15) // oughta be enough @@ -225,14 +225,14 @@ void update_thread_name(void); // of functions which can block the task, we mark our task as blocked and // unblock it after the function is executed. __attribute__((always_inline)) inline int task_may_block_start(void) { - task_ref_count_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1); current->io_block = 1; return 0; } __attribute__((always_inline)) inline int task_may_block_end(void) { current->io_block = 0; - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1); return 0; } diff --git a/util/ro_locks.c b/util/ro_locks.c new file mode 100644 index 0000000000..b6b74c4021 --- /dev/null +++ b/util/ro_locks.c @@ -0,0 +1,8 @@ +// +// ro_locks.c +// iSH-AOK +// +// Created by Michael Miller on 11/29/23. +// + +#include diff --git a/util/ro_locks.h b/util/ro_locks.h new file mode 100644 index 0000000000..9269eee83a --- /dev/null +++ b/util/ro_locks.h @@ -0,0 +1,44 @@ +// +// ro_locks.h +// iSH-AOK +// +// Created by Michael Miller on 11/29/23. +// + +#ifndef ro_locks_h +#define ro_locks_h + + +#endif /* ro_locks_h */ + +#include +#include + +typedef struct { + pthread_mutex_t m; + pthread_t owner; + int pid; + int uid; + char comm[16]; + char lname[16]; + bool wait4; +#if LOCK_DEBUG + struct lock_debug { + const char *file; + int line; + int pid; + bool initialized; + } debug; +#endif +} lock_t; + +void lock_init(lock_t *lock, char lname[16]); +void unlock(lock_t *lock); +void atomic_l_lockf(char lname[16], int skiplog, const char *file, int line); +void atomic_l_unlockf(void); +void complex_lockt(lock_t *lock, int log_lock, const char *file, int line); +int trylock(lock_t *lock, const char *file, int line); +int trylocknl(lock_t *lock, char *comm, int pid, const char *file, int line); + +#endif // RO_LOCK_H + diff --git a/util/rw_locks.h b/util/rw_locks.h new file mode 100644 index 0000000000..06b6f93314 --- /dev/null +++ b/util/rw_locks.h @@ -0,0 +1,42 @@ +// +// rw_locks.h +// iSH-AOK +// +// Created by Michael Miller on 11/29/23. +// + +#ifndef rw_locks_h +#define rw_locks_h + + +#endif /* rw_locks_h */ + +#ifndef RW_LOCK_H +#define RW_LOCK_H + +#include +#include + +typedef struct { + pthread_rwlock_t l; + atomic_int val; + int favor_read; + const char *file; + int line; + int pid; + char comm[16]; + char lname[16]; +} wrlock_t; + +void wrlock_init(wrlock_t *lock); +void read_lock(wrlock_t *lock, const char *file, int line); +void write_lock(wrlock_t *lock, const char *file, int line); +void read_unlock(wrlock_t *lock, const char *file, int line); +void write_unlock(wrlock_t *lock, const char *file, int line); +void read_to_write_lock(wrlock_t *lock); +void write_to_read_lock(wrlock_t *lock, const char *file, int line); +void write_unlock_and_destroy(wrlock_t *lock); +void read_unlock_and_destroy(wrlock_t *lock); +void lock_destroy(wrlock_t *lock); + +#endif // RW_LOCK_H diff --git a/util/sync.c b/util/sync.c index d024ce2e49..7d4c9710b1 100644 --- a/util/sync.c +++ b/util/sync.c @@ -33,7 +33,7 @@ static bool is_signal_pending(lock_t *lock) { return pending; } -void task_ref_count(struct task *task, int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // value Should only be -1 or 1. -mke +void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke // Keep track of how many threads are referencing this task if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke return; @@ -56,7 +56,7 @@ void task_ref_count(struct task *task, int value, __attribute__((unused)) const pthread_mutex_lock(&task->reference.lock); if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke - printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d) (%s:%d)\n", task->comm, task->pid, task->reference.count, value, file, line); + printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d)\n", task->comm, task->pid, task->reference.count, value); if(ilocked == true) unlock(&task->general_lock); @@ -74,14 +74,57 @@ void task_ref_count(struct task *task, int value, __attribute__((unused)) const unlock(&task->general_lock); } -void task_ref_count_wrapper(int value, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { +void task_ref_cnt_mod_wrapper(int value) { // sync.h can't know about the definition of task struct due to recursive include files. -mke if((current != NULL) && (doEnableExtraLocking)) - task_ref_count(current, value, file, line); + task_ref_cnt_mod(current, value); return; } +int task_ref_cnt_val(struct task *task) { + pthread_mutex_lock(&task->reference.lock); + int cnt = task->reference.count; + pthread_mutex_unlock(&task->reference.lock); + return cnt; +} + +int task_ref_cnt_val_wrapper(void) { + return(task_ref_cnt_val(current)); +} + +void mem_ref_cnt_mod(struct mem *mem, int value) { // value Should only be -1 or 1. -mke + // Keep track of how many threads are referencing this task + if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke + return; + } + + if(mem == NULL) { + return; + } + + pthread_mutex_lock(&mem->reference.lock); + + if(((mem->reference.count + value) < 0)) { // Prevent our unsigned value attempting to go negative. -mke + printk("ERROR: Attempt to decrement mem reference count to be negative, ignoring(%d:%d)\n", mem->reference.count, value); + pthread_mutex_unlock(&mem->reference.lock); + + return; + } + + + mem->reference.count = mem->reference.count + value; + + pthread_mutex_unlock(&mem->reference.lock); +} + +int mem_ref_cnt_val(struct mem *mem) { + pthread_mutex_lock(&mem->reference.lock); + int cnt = mem->reference.count; + pthread_mutex_unlock(&mem->reference.lock); + return cnt; +} + void modify_locks_held_count(struct task *task, int value) { // value Should only be -1 or 1. -mke if((task == NULL) && (current != NULL)) { task = current; @@ -201,7 +244,7 @@ void sigusr1_handler(void) { } // Because sometimes we can't #include "kernel/task.h" -mke -unsigned task_reference_count(struct task *task) { +unsigned task_ref_cnt_mod(struct task *task) { unsigned tmp = 0; pthread_mutex_lock(&task->reference.lock); // This would make more tmp = task->reference.count; @@ -212,8 +255,8 @@ unsigned task_reference_count(struct task *task) { return tmp; } -unsigned task_reference_count_wrapper(void) { // sync.h can't know about the definition of struct due to recursive include files. -mke - return(task_reference_count(current)); +void task_ref_cnt_mod_wrapper(int, const char*, int) { // sync.h can't know about the definition of struct due to recursive include files. -mke + return(task_ref_count(current, )); } bool current_is_valid(void) { diff --git a/util/sync.h b/util/sync.h index 180a36c8e0..fefd48d264 100644 --- a/util/sync.h +++ b/util/sync.h @@ -11,7 +11,6 @@ #include "debug.h" #include - // locks, implemented using pthread #define LOCK_DEBUG 0 @@ -19,8 +18,7 @@ extern int current_pid(void); extern int current_uid(void); extern char* current_comm(void); -extern unsigned task_reference_count_wrapper(void); -extern void task_ref_count_wrapper(int, const char*, int); + extern unsigned locks_held_count_wrapper(void); extern void modify_locks_held_count_wrapper(int); extern struct pid *pid_get(dword_t id); @@ -194,14 +192,14 @@ static inline void __lock(lock_t *lock, int log_lock, __attribute__((unused)) co unlock(lock); if(!log_lock) { - task_ref_count_wrapper(1,__FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1,__FILE__, __LINE__); pthread_mutex_lock(&lock->m); modify_locks_held_count_wrapper(1); lock->owner = pthread_self(); lock->pid = current_pid(); lock->uid = current_uid(); strncpy(lock->comm, current_comm(), 16); - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); } else { pthread_mutex_lock(&lock->m); lock->owner = pthread_self(); @@ -256,7 +254,7 @@ static inline void handle_lock_error(wrlock_t *lock, const char *file, int line, } static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, int is_write) { - task_ref_count_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); modify_locks_held_count_wrapper(1); unsigned count = 0; @@ -276,7 +274,7 @@ static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0, __FILE__, __LINE__); } - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); } #define loop_lock_read(lock, file, line) loop_lock_generic(lock, file, line, 0) @@ -474,7 +472,7 @@ static inline void wrlock_init(wrlock_t *lock) { } static inline void _lock_destroy(wrlock_t *lock) { - while((task_reference_count_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + while((task_ref_cnt_val_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } #ifdef JUSTLOG @@ -487,7 +485,7 @@ static inline void _lock_destroy(wrlock_t *lock) { } static inline void lock_destroy(wrlock_t *lock) { - while((task_reference_count_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + while((task_ref_cnt_val() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } @@ -498,7 +496,7 @@ static inline void lock_destroy(wrlock_t *lock) { static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { loop_lock_read(lock, file, line); - task_ref_count_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); //pthread_rwlock_rdlock(&lock->l); // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke if(lock->val) { @@ -513,7 +511,7 @@ static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char if(lock->val > 1000) { // We likely have a problem. printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); read_unlock_and_destroy(lock); - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); return; } @@ -521,7 +519,7 @@ static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char lock->pid = current_pid(); if(lock->pid > 9) strncpy((char *)lock->comm, current_comm(), 16); - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -534,30 +532,30 @@ static inline void read_lock(wrlock_t *lock, __attribute__((unused)) const char #define write_lock(lock) _write_lock(lock, __FILE__, __LINE__) static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke - task_ref_count_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("rtw_lock\0", 0, __FILE__, __LINE__); _read_unlock(lock, __FILE__, __LINE__); __write_lock(lock, __FILE__, __LINE__); atomic_l_unlockf(); - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); } static inline void write_to_read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Try to atomically swap a Write lock to a RO lock. -mke - task_ref_count_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("wtr_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, file, line); _read_lock(lock, file, line); atomic_l_unlockf(); - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); } static inline void write_unlock_and_destroy(wrlock_t *lock) { - task_ref_count_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); atomic_l_lockf("wuad_lock\0", 0, __FILE__, __LINE__); _write_unlock(lock, __FILE__, __LINE__); _lock_destroy(lock); atomic_l_unlockf(); - task_ref_count_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); } static inline void read_unlock_and_destroy(wrlock_t *lock) { From c40c878f886f7b263242f6abc23107debb553d98 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Wed, 6 Dec 2023 11:22:00 -0800 Subject: [PATCH 09/23] o Refactor code in preperation for implementing reference tracking in task and memory allocations, maybe other stuff as well. Very broken, but compiles and links (for me anyway) --- app/AppDelegate.m | 22 +- app/Terminal.m | 1 + app/UpgradeRootViewController.m | 5 +- app/UserPreferences.m | 1 - app/iOSFS.m | 5 +- emu/memory.c | 57 +++- emu/memory.h | 4 + emu/tlb.c | 1 - fs/fd.h | 1 + fs/inode.h | 1 + fs/lock.c | 4 +- fs/poll.c | 7 +- fs/proc/pid.c | 9 +- fs/proc/root.c | 1 - fs/tty.c | 9 +- iSH-AOK.xcodeproj/project.pbxproj | 22 +- jit/jit.c | 16 +- kernel/BatteryStatus.m | 11 +- kernel/calls.c | 10 +- kernel/exec.c | 10 +- kernel/exit.c | 31 +- kernel/fork.c | 10 +- kernel/fs.c | 1 - kernel/futex.c | 8 +- kernel/getset.c | 25 +- kernel/group.c | 17 +- kernel/log.c | 39 +-- kernel/mmap.c | 20 +- kernel/poll.c | 3 +- kernel/resource.c | 4 +- kernel/resource_locking.h | 17 +- kernel/signal.c | 12 +- kernel/signal.h | 1 + kernel/task.c | 126 ++++++- kernel/task.h | 13 +- kernel/time.c | 1 - kernel/user.c | 25 +- main.c | 2 +- util/fifo.c | 1 - util/ro_locks.c | 203 ++++++++++- util/ro_locks.h | 24 +- util/rw_locks.c | 273 +++++++++++++++ util/rw_locks.h | 25 +- util/sync.c | 165 +-------- util/sync.h | 539 ++---------------------------- 45 files changed, 877 insertions(+), 905 deletions(-) create mode 100644 util/rw_locks.c diff --git a/app/AppDelegate.m b/app/AppDelegate.m index dbb7e91909..bada4d63be 100644 --- a/app/AppDelegate.m +++ b/app/AppDelegate.m @@ -28,6 +28,7 @@ #include "fs/devices.h" #include "fs/path.h" #include "app/RTCDevice.h" +#include "util/sync.h" #if ISH_LINUX #import "LinuxInterop.h" @@ -45,7 +46,7 @@ static void ios_handle_exit(struct task *task, int code) { // we are interested in init and in children of init // this is called with pids_lock as an implementation side effect, please do not cite as an example of good API design lock(&task->general_lock, 0); - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); if(task->pid > MAX_PID) {// Corruption printk("ERROR: Insane PID in ios_handle_exit(%d)\n", task->pid); unlock(&pids_lock); @@ -71,16 +72,13 @@ static void ios_handle_exit(struct task *task, int code) { }); } -const char* getCurrentTimestamp(void); - -const char* getCurrentTimestamp(void) { +const char* getRenameRunDirString(void) { NSDate *currentDate = [NSDate date]; NSDateFormatter *dateFormatter = [[NSDateFormatter alloc] init]; - [dateFormatter setDateFormat:@"yyyy-MM-dd_HH:mm:ss"]; + [dateFormatter setDateFormat:@"yyyy-MM-dd_HH-mm-ss"]; NSString *timestamp = [dateFormatter stringFromDate:currentDate]; - // Prepending "/tmp/" to the timestamp - NSString *prefixedTimestamp = [NSString stringWithFormat:@"/tmp/%@", timestamp]; + NSString *prefixedTimestamp = [NSString stringWithFormat:@"/tmp/old-run/%@", timestamp]; // Convert to const char* and return return [prefixedTimestamp UTF8String]; @@ -149,11 +147,13 @@ - (intptr_t)boot { // Permissions on / have been broken for a while, let's fix them generic_setattrat(AT_PWD, "/", (struct attr) {.type = attr_mode, .mode = 0755}, false); - // Create a unique directory in /tmp and link to /var/run - const char *timestamp = getCurrentTimestamp(); - generic_mkdirat(AT_PWD, timestamp, 0755); + // mv current /run to /tmp/run-old/[timestamp], create new /run and link to /var/run + generic_mkdirat(AT_PWD, "/tmp/old-run", 0755); + const char *rename = getRenameRunDirString(); + generic_renameat(AT_PWD, "/run", AT_PWD, rename); + generic_mkdirat(AT_PWD, "/run", 0755); generic_unlinkat(AT_PWD, "/var/run"); - generic_symlinkat(timestamp, AT_PWD, "/var/run"); + generic_symlinkat("/run", AT_PWD, "/var/run"); // Create directories/links to simulate /sys stuff for battery monitoring generic_mkdirat(AT_PWD, "/sys/class", 0755); diff --git a/app/Terminal.m b/app/Terminal.m index e91cef7876..aa896b3ae5 100644 --- a/app/Terminal.m +++ b/app/Terminal.m @@ -12,6 +12,7 @@ #include "fs/devices.h" #include "fs/tty.h" #include "fs/devices.h" +#include "util/ro_locks.h" extern struct tty_driver ios_pty_driver; diff --git a/app/UpgradeRootViewController.m b/app/UpgradeRootViewController.m index 1d54edcd2e..b01e38ad78 100644 --- a/app/UpgradeRootViewController.m +++ b/app/UpgradeRootViewController.m @@ -12,6 +12,7 @@ #include "kernel/calls.h" #include "kernel/init.h" #include "fs/devices.h" +#include "util/sync.h" @interface UpgradeRootViewController () @@ -30,7 +31,7 @@ - (void)viewDidLoad { #if !ISH_LINUX [NSNotificationCenter.defaultCenter addObserver:self selector:@selector(processExited:) name:ProcessExitedNotification object:nil]; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); current = pid_get_task(1); // pray unlock(&pids_lock); self.terminal = [Terminal createPseudoTerminal:&self->_tty]; @@ -74,7 +75,7 @@ - (void)processExited:(NSNotification *)notif { if (code != 0) { [self showAlertWithTitle:@"Upgrade failed" message:@"exit status %d", code]; } else { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); current = pid_get_task(1); // pray unlock(&pids_lock); FsUpdateRepositories(); diff --git a/app/UserPreferences.m b/app/UserPreferences.m index 9b38f7e2d3..a2a4790b48 100644 --- a/app/UserPreferences.m +++ b/app/UserPreferences.m @@ -7,7 +7,6 @@ #import "UserPreferences.h" #import "fs/proc/ish.h" -#include "sync.h" #include "task.h" // Stuff to allow for cleaning up when doEnableExtraLocking is disabled. -mke diff --git a/app/iOSFS.m b/app/iOSFS.m index bc819204df..d9ea705944 100644 --- a/app/iOSFS.m +++ b/app/iOSFS.m @@ -12,6 +12,7 @@ #include "iOSFS.h" #include "kernel/fs.h" #include "kernel/errno.h" +#include "kernel/task.h" #include "fs/path.h" #include "fs/real.h" @@ -239,7 +240,7 @@ static int combine_error(NSError *coordinatorError, int err) { __block NSError *error = nil; __block struct fd *fd; __block dispatch_semaphore_t file_opened = dispatch_semaphore_create(0); - task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(1); dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void){ void (^operation)(NSURL *url) = ^(NSURL *url) { fd = realfs_open(mount, path_for_url_in_mount(mount, url, path), flags, mode); @@ -265,7 +266,7 @@ static int combine_error(NSError *coordinatorError, int err) { } [coordinator coordinateReadingItemAtURL:url options:options error:&error byAccessor:operation]; }); - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); + task_ref_cnt_mod_wrapper(-1); dispatch_semaphore_wait(file_opened, DISPATCH_TIME_FOREVER); diff --git a/emu/memory.c b/emu/memory.c index b0884454ee..3591f9c66b 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -14,7 +14,6 @@ #include "jit/jit.h" #include "kernel/vdso.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" #include "fs/fd.h" #include "util/sync.h" @@ -45,12 +44,12 @@ void mem_init(struct mem *mem) { void mem_destroy(struct mem *mem) { write_lock(&mem->lock); - while((mem_ref_cnt_val(mem)) && (current->pid > 1) ){ + while((mem_ref_cnt_val_get(mem)) && (current->pid > 1) ){ nanosleep(&lock_pause, NULL); } pt_unmap_always(mem, 0, MEM_PAGES); #if ENGINE_JIT - while((task_reference_count(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_get(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } jit_free(mem->mmu.jit); @@ -58,7 +57,7 @@ void mem_destroy(struct mem *mem) { for (int i = 0; i < MEM_PGDIR_SIZE; i++) { do { nanosleep(&lock_pause, NULL); - } while(mem_ref_cnt_val(mem)); + } while(mem_ref_cnt_val_get(mem)); if (mem->pgdir[i] != NULL) @@ -109,7 +108,7 @@ struct pt_entry *mem_pt(struct mem *mem, page_t page) { static void mem_pt_del(struct mem *mem, page_t page) { struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { - while(task_ref_cnt_val(current) > 4) { // mark + while(task_ref_cnt_get(current) > 4) { // mark nanosleep(&lock_pause, NULL); } entry->data = NULL; @@ -190,7 +189,7 @@ int pt_unmap(struct mem *mem, page_t start, pages_t pages) { int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { for (page_t page = start; page < start + pages; mem_next_page(mem, &page)) { - while(task_ref_cnt_val(current) >3) { + while(task_ref_cnt_get(current) >3) { nanosleep(&lock_pause, NULL); } struct pt_entry *pt = mem_pt(mem, page); @@ -204,7 +203,7 @@ int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { if (--data->refcount == 0) { // vdso wasn't allocated with mmap, it's just in our data segment if (data->data != vdso_data) { - while(task_ref_cnt_val(current) > 3) { + while(task_ref_cnt_get(current) > 3) { nanosleep(&lock_pause, NULL); } int err = munmap(data->data, data->size); @@ -252,7 +251,7 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) { } int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) { - while(task_ref_cnt_val(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } for (page_t page = start; page < start + pages; mem_next_page(src, &page)) { @@ -269,7 +268,7 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page dst_entry->offset = entry->offset; dst_entry->flags = entry->flags; } - while(task_ref_cnt_val(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } mem_changed(src); @@ -315,7 +314,7 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { // which changes memory maps. read_to_write_lock(&mem->lock); pt_map_nothing(mem, page, 1, P_WRITE | P_GROWSDOWN); - write_to_read_lock(&mem->lock, __FILE__, __LINE__); + write_to_read_lock(&mem->lock); entry = mem_pt(mem, page); } @@ -343,13 +342,13 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { void *data = (char *) entry->data->data + entry->offset; // copy/paste from above - mem_ref_cnt_mod(mem, 1,__FILE__, __LINE__); + mem_ref_cnt_mod(mem, 1); //read_to_write_lock(&mem->lock); memcpy(copy, data, PAGE_SIZE); //mkemkemke Crashes here a lot when running both the go and parallel make test. 01 June 2022 task_ref_cnt_mod(current, -1); pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW); unlock(¤t->general_lock); - write_to_read_lock(&mem->lock, __FILE__, __LINE__); + write_to_read_lock(&mem->lock); } @@ -409,3 +408,37 @@ void mem_coredump(struct mem *mem, const char *file) { printk("WARNING: dumped %d pages\n", pages); close(fd); } + +void mem_ref_cnt_mod(struct mem *mem, int value) { // value Should only be -1 or 1. -mke + // Keep track of how many threads are referencing this task + if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke + return; + } + + if(mem == NULL) { + return; + } + + pthread_mutex_lock(&mem->reference.lock); + + if(((mem->reference.count + value) < 0)) { // Prevent our unsigned value attempting to go negative. -mke + printk("ERROR: Attempt to decrement mem reference count to be negative, ignoring(%d:%d)\n", mem->reference.count, value); + pthread_mutex_unlock(&mem->reference.lock); + + return; + } + + + mem->reference.count = mem->reference.count + value; + + pthread_mutex_unlock(&mem->reference.lock); +} + +int mem_ref_cnt_val_get(struct mem *mem) { + pthread_mutex_lock(&mem->reference.lock); + int cnt = mem->reference.count; + pthread_mutex_unlock(&mem->reference.lock); + if((cnt < 0) || ( cnt > 1000)) // Stupid kluge while I fix this brain damage + cnt = 0; + return cnt; +} diff --git a/emu/memory.h b/emu/memory.h index e458987de2..67759598de 100644 --- a/emu/memory.h +++ b/emu/memory.h @@ -106,6 +106,10 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page void *mem_ptr(struct mem *mem, addr_t addr, int type); int mem_segv_reason(struct mem *mem, addr_t addr); +// Reference counting is important +void mem_ref_cnt_mod(struct mem *mem, int value); +int mem_ref_cnt_val_get(struct mem *mem); + extern size_t real_page_size; #endif diff --git a/emu/tlb.c b/emu/tlb.c index 8b8d96be47..46bf4be2bf 100644 --- a/emu/tlb.c +++ b/emu/tlb.c @@ -2,7 +2,6 @@ #include "emu/tlb.h" #include "kernel/signal.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" void tlb_refresh(struct tlb *tlb, struct mmu *mmu) { if (tlb->mmu == mmu && tlb->mem_changes == mmu->changes) { diff --git a/fs/fd.h b/fs/fd.h index 7336417215..8608f41890 100644 --- a/fs/fd.h +++ b/fs/fd.h @@ -3,6 +3,7 @@ #include #include "emu/memory.h" #include "util/list.h" +#include "util/ro_locks.h" #include "util/sync.h" #include "util/bits.h" #include "fs/stat.h" diff --git a/fs/inode.h b/fs/inode.h index 802f1b8d03..cbd4173d03 100644 --- a/fs/inode.h +++ b/fs/inode.h @@ -4,6 +4,7 @@ #include "misc.h" #include "util/list.h" #include "util/sync.h" + struct mount; struct fd; diff --git a/fs/lock.c b/fs/lock.c index f8f09e64cd..9e9ea7ff29 100644 --- a/fs/lock.c +++ b/fs/lock.c @@ -146,11 +146,11 @@ static int file_lock_from_flock(struct fd *fd, struct flock_ *flock, struct file offset = 0; break; case LSEEK_CUR: - lock(&fd->lock, 0); + mylock(&fd->lock, 0); offset = fd->ops->lseek(fd, 0, LSEEK_CUR); unlock(&fd->lock); if (offset < 0) - return offset; + return (int)offset; break; case LSEEK_END: { struct statbuf stat; diff --git a/fs/poll.c b/fs/poll.c index 31a0c9be49..60212c3ad2 100644 --- a/fs/poll.c +++ b/fs/poll.c @@ -7,7 +7,6 @@ #include "util/list.h" #include "kernel/errno.h" #include "kernel/fs.h" -#include "kernel/resource_locking.h" #include "fs/fd.h" #include "fs/poll.h" #include "fs/real.h" @@ -331,7 +330,7 @@ void poll_destroy(struct poll *poll) { struct poll_fd *poll_fd; struct poll_fd *tmp; - while(task_ref_cnt_val(current)) { + while(task_ref_cnt_get(current)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->poll_fds, poll_fd, tmp, fds) { @@ -342,12 +341,12 @@ void poll_destroy(struct poll *poll) { free(poll_fd); } - while(task_ref_cnt_val(current)) { + while(task_ref_cnt_get(current)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->pollfd_freelist, poll_fd, tmp, fds) { - while(task_ref_cnt_val(current)) { + while(task_ref_cnt_get(current)) { nanosleep(&lock_pause, NULL); } list_remove(&poll_fd->fds); diff --git a/fs/proc/pid.c b/fs/proc/pid.c index d2ea0785c4..9d02d72050 100644 --- a/fs/proc/pid.c +++ b/fs/proc/pid.c @@ -7,7 +7,6 @@ #include "fs/tty.h" #include "kernel/fs.h" #include "kernel/vdso.h" -#include "kernel/resource_locking.h" #include "util/sync.h" extern pthread_mutex_t extra_lock; @@ -20,7 +19,7 @@ static void proc_pid_getname(struct proc_entry *entry, char *buf) { } static struct task *proc_get_task(struct proc_entry *entry) { - complex_lockt(&pids_lock, 1, __FILE__, __LINE__); + complex_lockt(&pids_lock, 1); struct task *task = pid_get_task(entry->pid); if (task == NULL) unlock(&pids_lock); @@ -193,7 +192,7 @@ void proc_maps_dump(struct task *task, struct proc_data *buf) { if (mem == NULL) return; - read_lock(&mem->lock, __FILE__, __LINE__); + read_lock(&mem->lock); page_t page = 0; while (page < MEM_PAGES) { // find a region @@ -240,7 +239,7 @@ void proc_maps_dump(struct task *task, struct proc_data *buf) { 0, // inode path); } - read_unlock(&mem->lock, __FILE__, __LINE__); + read_unlock(&mem->lock); } static int proc_pid_maps_show(struct proc_entry *entry, struct proc_data *buf) { @@ -338,7 +337,7 @@ static int proc_pid_cwd_readlink(struct proc_entry *entry, char *buf) { struct task *task = proc_get_task(entry); if (task == NULL) return _ESRCH; - complex_lockt(&task->fs->lock, 0, __FILE__, __LINE__); + complex_lockt(&task->fs->lock, 0); int err = generic_getpath(task->fs->pwd, buf); unlock(&task->fs->lock); diff --git a/fs/proc/root.c b/fs/proc/root.c index da527df118..a89f2305cf 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -3,7 +3,6 @@ #include #include "kernel/calls.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" #include "fs/proc.h" #include "fs/proc/net.h" #include "platform/platform.h" diff --git a/fs/tty.c b/fs/tty.c index 8be108115a..3207e88fde 100644 --- a/fs/tty.c +++ b/fs/tty.c @@ -5,6 +5,7 @@ #include "fs/poll.h" #include "fs/tty.h" #include "fs/devices.h" +#include "util/sync.h" extern struct tty_driver pty_master; extern struct tty_driver pty_slave; @@ -150,7 +151,7 @@ int tty_open(struct tty *tty, struct fd *fd) { // Make this our controlling terminal if: // - the terminal doesn't already have a session // - we're a session leader - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); lock(&tty->lock, 0); if (tty->session == 0 && current->group->sid == current->pid) tty_set_controlling(current->group, tty); @@ -448,7 +449,7 @@ static ssize_t tty_read(struct fd *fd, void *buf, size_t bufsize) { int err = 0; struct tty *tty = fd->tty; - complex_lockt(&pids_lock, 1, __FILE__, __LINE__); // MKEMKE + complex_lockt(&pids_lock, 1); // MKEMKE lock(&tty->lock, 0); if (tty->hung_up) { unlock(&pids_lock); @@ -629,7 +630,7 @@ static int tiocsctty(struct tty *tty, int force) { unlock(&tty->lock); //aaaaaaaa // it's safe because literally nothing happens between that unlock and the last lock, and repulsive for the same reason // locking is ***hard** - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); lock(&tty->lock, 0); // do nothing if this is already our controlling tty if (current->group->sid == current->pid && current->group->sid == tty->session) @@ -765,7 +766,7 @@ static int tty_ioctl(struct fd *fd, int cmd, void *arg) { case TIOCSPGRP_: // see "aaaaaaaa" comment above unlock(&tty->lock); - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); lock(&tty->lock, 0); pid_t_ sid = current->group->sid; unlock(&pids_lock); diff --git a/iSH-AOK.xcodeproj/project.pbxproj b/iSH-AOK.xcodeproj/project.pbxproj index c977926370..b62368d8da 100644 --- a/iSH-AOK.xcodeproj/project.pbxproj +++ b/iSH-AOK.xcodeproj/project.pbxproj @@ -104,11 +104,10 @@ 497F6D3D254E5EA600C82F46 /* main.c in Sources */ = {isa = PBXBuildFile; fileRef = BB7D93822087C2890008DA78 /* main.c */; }; 497F6D5C254E609700C82F46 /* main.c in Sources */ = {isa = PBXBuildFile; fileRef = BB7D93822087C2890008DA78 /* main.c */; }; 497F6D87254E62E100C82F46 /* libish.a in Frameworks */ = {isa = PBXBuildFile; fileRef = BB13F7DC200AD81D003D1C4D /* libish.a */; }; - 5D59DADC2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; - 5D59DADD2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; - 5D59DADE2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; - 5D59DADF2B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; - 5D59DAE02B179A2300FA995C /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; + 5D4E7DF82B1E68EB006BA7D4 /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; + 5D4E7DFA2B1E68F6006BA7D4 /* rw_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DAE32B17EB1600FA995C /* rw_locks.c */; }; + 5D7E7E152B1FD4890087F2E0 /* ro_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DADB2B179A2300FA995C /* ro_locks.c */; }; + 5D7E7E162B1FD4970087F2E0 /* rw_locks.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D59DAE32B17EB1600FA995C /* rw_locks.c */; }; 5D8ACEFA284BF122003C50D3 /* net.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D8ACEF9284BF122003C50D3 /* net.c */; }; 5D8ACEFD284CE096003C50D3 /* sys.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D8ACEFC284CE096003C50D3 /* sys.c */; }; 5D8ACEFE284CE096003C50D3 /* sys.c in Sources */ = {isa = PBXBuildFile; fileRef = 5D8ACEFC284CE096003C50D3 /* sys.c */; }; @@ -593,8 +592,9 @@ 497F6D47254E605F00C82F46 /* ish-AOK */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; name = "ish-AOK"; path = ish; sourceTree = BUILT_PRODUCTS_DIR; }; 5D272493277C20DF0005F2A8 /* iSHFileProviderRelease.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = iSHFileProviderRelease.entitlements; sourceTree = ""; }; 5D59DAD92B17996100FA995C /* ro_locks.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ro_locks.h; sourceTree = ""; }; - 5D59DADA2B17999500FA995C /* rw_locks.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rw_locks.h; sourceTree = ""; }; + 5D59DADA2B17999500FA995C /* rw_locks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rw_locks.h; sourceTree = ""; }; 5D59DADB2B179A2300FA995C /* ro_locks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = ro_locks.c; sourceTree = ""; }; + 5D59DAE32B17EB1600FA995C /* rw_locks.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rw_locks.c; sourceTree = ""; }; 5D8ACEF9284BF122003C50D3 /* net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = net.c; sourceTree = ""; }; 5D8ACEFB284CE096003C50D3 /* sys.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sys.h; sourceTree = ""; }; 5D8ACEFC284CE096003C50D3 /* sys.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sys.c; sourceTree = ""; }; @@ -1137,6 +1137,7 @@ 5D59DAD92B17996100FA995C /* ro_locks.h */, 5D59DADA2B17999500FA995C /* rw_locks.h */, 5D59DADB2B179A2300FA995C /* ro_locks.c */, + 5D59DAE32B17EB1600FA995C /* rw_locks.c */, ); path = util; sourceTree = ""; @@ -2071,7 +2072,9 @@ 497F6D10254E5EA600C82F46 /* pty.c in Sources */, 497F6D11254E5EA600C82F46 /* real.c in Sources */, 497F6D12254E5EA600C82F46 /* sock.c in Sources */, + 5D4E7DF82B1E68EB006BA7D4 /* ro_locks.c in Sources */, 497F6D13254E5EA600C82F46 /* sockrestart.c in Sources */, + 5D4E7DFA2B1E68F6006BA7D4 /* rw_locks.c in Sources */, 497F6D14254E5EA600C82F46 /* stat.c in Sources */, 497F6D15254E5EA600C82F46 /* tmp.c in Sources */, 497F6D16254E5EA600C82F46 /* tty-real.c in Sources */, @@ -2132,7 +2135,6 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 5D59DADE2B179A2300FA995C /* ro_locks.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2147,7 +2149,6 @@ 5D8ACEFD284CE096003C50D3 /* sys.c in Sources */, BB28C7BA268975AA00BDC834 /* iOSFS.m in Sources */, BB28C7BC268975B000BDC834 /* PasteboardDevice.m in Sources */, - 5D59DADC2B179A2300FA995C /* ro_locks.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2169,7 +2170,6 @@ BBBFE94921C5CFF100509DD5 /* NSError+ISHErrno.m in Sources */, BB9C7B87240A2B1E00F5D4F0 /* AppGroup.m in Sources */, 5D8CFA872B1198B300D50E57 /* RTCDevice.m in Sources */, - 5D59DAE02B179A2300FA995C /* ro_locks.c in Sources */, BB88F4942154760800A341FD /* FileProviderExtension.m in Sources */, BB88F4972154760800A341FD /* FileProviderItem.m in Sources */, ); @@ -2183,7 +2183,6 @@ BBBF9CBD27C217B6002A30F7 /* PasteboardDeviceLinux.c in Sources */, BB8C3AFF26B7B8CF00E38DDC /* fakefs.c in Sources */, BBEEA9E8277D25090069495B /* LinuxRoot.c in Sources */, - 5D59DADF2B179A2300FA995C /* ro_locks.c in Sources */, BB123ACC26C9EFD900419CDA /* LinuxTTY.c in Sources */, BBEEA9EA277DAB400069495B /* LinuxPTY.c in Sources */, ); @@ -2194,6 +2193,7 @@ buildActionMask = 2147483647; files = ( BB28C79226896B1F00BDC834 /* AboutAppearanceViewController.m in Sources */, + 5D7E7E162B1FD4970087F2E0 /* rw_locks.c in Sources */, BB28C79326896B1F00BDC834 /* AltIconViewController.m in Sources */, 491B31E62883BF22008EEFB0 /* ThemeViewController.m in Sources */, 5D8CFA862B1198B300D50E57 /* RTCDevice.m in Sources */, @@ -2214,7 +2214,7 @@ BB28C79B26896B1F00BDC834 /* FontPickerViewController.m in Sources */, BBC3863E276817A900CC8C2E /* UpgradeRootViewController.m in Sources */, BB28C79D26896B1F00BDC834 /* SceneDelegate.m in Sources */, - 5D59DADD2B179A2300FA995C /* ro_locks.c in Sources */, + 5D7E7E152B1FD4890087F2E0 /* ro_locks.c in Sources */, BB28C79E26896B1F00BDC834 /* AboutExternalKeyboardViewController.m in Sources */, BB28C79F26896B1F00BDC834 /* PassthroughView.m in Sources */, BB28C7A026896B1F00BDC834 /* UserPreferences.m in Sources */, diff --git a/jit/jit.c b/jit/jit.c index 8d0bb7a5e7..d75aa20d5a 100644 --- a/jit/jit.c +++ b/jit/jit.c @@ -6,9 +6,9 @@ #include "emu/cpu.h" #include "emu/memory.h" #include "emu/interrupt.h" -#include "util/list.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" +#include "util/list.h" +#include "util/sync.h" extern int current_pid(void); @@ -39,7 +39,7 @@ void jit_free(struct jit *jit) { if (!jit) return; bool signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_val(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight + while((task_ref_cnt_get(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); } @@ -84,7 +84,7 @@ void jit_invalidate_range(struct jit *jit, page_t start, page_t end) { } void jit_invalidate_page(struct jit *jit, page_t page) { - while(task_ref_cnt_val(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke + while(task_ref_cnt_get(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke nanosleep(&lock_pause, NULL); } // mofify_critical_region_count(current, 1, __FILE__, __LINE__); @@ -205,7 +205,7 @@ static inline size_t jit_cache_hash(addr_t ip) { static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { struct jit *jit = cpu->mmu->jit; - read_lock(&jit->jetsam_lock, __FILE__, __LINE__); + read_lock(&jit->jetsam_lock); struct jit_block **cache = calloc(JIT_CACHE_SIZE, sizeof(*cache)); struct jit_frame *frame = malloc(sizeof(struct jit_frame)); @@ -267,7 +267,7 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { free(frame); free(cache); - read_unlock(&jit->jetsam_lock, __FILE__, __LINE__); + read_unlock(&jit->jetsam_lock); return interrupt; } @@ -305,11 +305,11 @@ int cpu_run_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { unlock(&jit->lock); write_lock(&jit->jetsam_lock); lock(&jit->lock, 0); - while(task_ref_cnt_mod(current) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke + while(task_ref_cnt_get(current) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke nanosleep(&lock_pause, NULL); // Yes, this has triggered at least once. Is it doing any good though? -mke } jit_free_jetsam(jit); - write_unlock(&jit->jetsam_lock, __FILE__, __LINE__); + write_unlock(&jit->jetsam_lock); unlock(&jit->lock); return interrupt; } diff --git a/kernel/BatteryStatus.m b/kernel/BatteryStatus.m index 78998c9a58..e636d9a6cb 100644 --- a/kernel/BatteryStatus.m +++ b/kernel/BatteryStatus.m @@ -18,12 +18,13 @@ BOOL lowPowerModeEnabled = [[NSProcessInfo processInfo] isLowPowerModeEnabled]; NSString *stateString = @""; + // Charging, Discharging, Full, Unknown, or Not charging. switch (batteryState) { case UIDeviceBatteryStateUnknown: stateString = @"Unknown"; break; case UIDeviceBatteryStateUnplugged: - stateString = @"Unplugged"; + stateString = @"Discharging"; break; case UIDeviceBatteryStateCharging: stateString = @"Charging"; @@ -31,15 +32,17 @@ case UIDeviceBatteryStateFull: stateString = @"Full"; break; + default: + stateString = @"Not Available"; // Handle any unexpected cases + break; } if(type == 3) { NSString *formattedOutput = [NSString stringWithFormat: @"battery_level: %.2f\n" "battery_state: %@\n" - "low_power_mode: %@\n" - "battery_health: %.2f\n", - batteryLevel * 100, stateString, lowPowerModeEnabled ? @"Enabled" : @"Disabled", batteryLevel * 100]; + "low_power_mode: %@\n", + batteryLevel * 100, stateString, lowPowerModeEnabled ? @"Enabled" : @"Disabled"]; return (char *)[formattedOutput UTF8String]; } else if(type == 2) { // Capacity NSString *formattedOutput = [NSString stringWithFormat: diff --git a/kernel/calls.c b/kernel/calls.c index 064b7118da..d615e81db1 100644 --- a/kernel/calls.c +++ b/kernel/calls.c @@ -5,7 +5,7 @@ #include "emu/memory.h" #include "kernel/signal.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" +#include "util/sync.h" extern bool isGlibC; @@ -347,9 +347,9 @@ void handle_syscall_interrupt(struct cpu_state *cpu) { } void handle_page_fault_interrupt(struct cpu_state *cpu) { - read_lock(¤t->mem->lock, __FILE__, __LINE__); + read_lock(¤t->mem->lock); void *ptr = mem_ptr(current->mem, cpu->segfault_addr, cpu->segfault_was_write ? MEM_WRITE : MEM_READ); - read_unlock(¤t->mem->lock, __FILE__, __LINE__); + read_unlock(¤t->mem->lock); if (ptr == NULL) { printk("ERROR: %d(%s) page fault on 0x%x at 0x%x\n", current->pid, current->comm, cpu->segfault_addr, cpu->eip); @@ -398,7 +398,7 @@ void handle_interrupt(int interrupt) { handle_illegal_instruction_interrupt(cpu); break; case INT_BREAKPOINT: - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); send_signal(current, SIGTRAP_, (struct siginfo_) { .sig = SIGTRAP_, .code = SI_KERNEL_, @@ -406,7 +406,7 @@ void handle_interrupt(int interrupt) { unlock(&pids_lock); break; case INT_DEBUG: - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); send_signal(current, SIGTRAP_, (struct siginfo_) { .sig = SIGTRAP_, .code = TRAP_TRACE_, diff --git a/kernel/exec.c b/kernel/exec.c index c2a987b6f3..f7c847ca3b 100644 --- a/kernel/exec.c +++ b/kernel/exec.c @@ -16,8 +16,8 @@ #include "fs/fd.h" #include "kernel/elf.h" #include "kernel/vdso.h" -#include "kernel/resource_locking.h" #include "tools/ptraceomatic-config.h" +#include "util/sync.h" #define ARGV_MAX 32 * PAGE_SIZE @@ -110,7 +110,7 @@ static int load_entry(struct prg_header ph, addr_t bias, struct fd *fd) { // Unlock and lock the mem because the user functions must be // called without locking mem. if(trylockw(¤t->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke - write_unlock(¤t->mem->lock, __FILE__, __LINE__); + write_unlock(¤t->mem->lock); user_memset(file_end, 0, tail_size); write_lock(¤t->mem->lock); } @@ -290,7 +290,7 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, if ((err = pt_map_nothing(current->mem, 0xffffd, 1, P_WRITE | P_GROWSDOWN)) < 0) goto beyond_hope; // that was the last memory mapping - write_unlock(¤t->mem->lock, __FILE__, __LINE__); + write_unlock(¤t->mem->lock); dword_t sp = 0xffffe000; // on 32-bit linux, there's 4 empty bytes at the very bottom of the stack. // on 64-bit linux, there's 8. make ptraceomatic happy. (a major theme in this file) @@ -422,7 +422,7 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, beyond_hope: // TODO force sigsegv - write_unlock(¤t->mem->lock, __FILE__, __LINE__); + write_unlock(¤t->mem->lock); goto out_free_interp; } @@ -626,7 +626,7 @@ int __do_execve(const char *file, struct exec_args argv, struct exec_args envp) vfork_notify(current); if (current->ptrace.traced) { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); send_signal(current, SIGTRAP_, (struct siginfo_) { .code = SI_USER_, .kill.pid = current->pid, diff --git a/kernel/exit.c b/kernel/exit.c index fae1fa349b..87159074bd 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -5,7 +5,8 @@ #include "kernel/mm.h" #include "kernel/futex.h" #include "kernel/ptrace.h" -#include "kernel/resource_locking.h" +#include "kernel/task.h" +#include "util/sync.h" #include "fs/fd.h" #include "fs/tty.h" @@ -17,7 +18,7 @@ extern const char extra_lock_comm; static void halt_system(void); static bool exit_tgroup(struct task *task) { - while((task_ref_cnt_val(task) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_get(task) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } struct tgroup *group = task->group; @@ -65,7 +66,7 @@ noreturn void do_exit(int status) { bool signal_pending = !!(current->pending & ~current->blocked); // has to happen before mm_release - while((task_ref_cnt_val(current) > 1) || + while((task_ref_cnt_get(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -82,14 +83,14 @@ noreturn void do_exit(int status) { do { nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); - } while((task_ref_cnt_val(current) > 1) || + } while((task_ref_cnt_get(current) > 1) || (locks_held_count(current)) || (signal_pending)); // Wait for now, task is in one or more critical mm_release(current->mm); current->mm = NULL; signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_val(current) > 1) || + while((task_ref_cnt_get(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -98,7 +99,7 @@ noreturn void do_exit(int status) { fdtable_release(current->files); current->files = NULL; - while((task_ref_cnt_val(current) > 1) || + while((task_ref_cnt_get(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -110,7 +111,7 @@ noreturn void do_exit(int status) { // sighand must be released below so it can be protected by pids_lock // since it can be accessed by other threads - while((task_ref_cnt_val(current) > 1) || + while((task_ref_cnt_get(current) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical// Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -126,10 +127,10 @@ noreturn void do_exit(int status) { // the actual freeing needs pids_lock task_ref_cnt_mod(current, 1); - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); // release the sighand signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_val(current) > 2) || + while((task_ref_cnt_get(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -155,7 +156,7 @@ noreturn void do_exit(int status) { signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_val(current) > 2) || + while((task_ref_cnt_get(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -208,7 +209,7 @@ EXIT:pthread_exit(NULL); noreturn void do_exit_group(int status) { struct tgroup *group = current->group; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); lock(&group->lock, 0); if (!group->doing_group_exit) { group->doing_group_exit = true; @@ -235,7 +236,7 @@ noreturn void do_exit_group(int status) { } unlock(&pids_lock); - task_ref_cnt_mod(current, -1) + task_ref_cnt_mod(current, -1); unlock(&group->lock); //if(current->pid <= MAX_PID) // abort if crazy. -mke do_exit(status); @@ -319,7 +320,7 @@ static bool reap_if_zombie(struct task *task, struct siginfo_ *info_out, struct static bool notify_if_stopped(struct task *task, struct siginfo_ *info_out) { - complex_lockt(&task->group->lock, 0, __FILE__, __LINE__); + complex_lockt(&task->group->lock, 0); bool stopped = task->group->stopped; unlock(&task->group->lock); if (!stopped || task->group->group_exit_code == 0) @@ -357,7 +358,7 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage if (options & ~(WNOHANG_|WUNTRACED_|WEXITED_|WCONTINUED_|WNOWAIT_|__WALL_)) return _EINVAL; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); task_ref_cnt_mod(current, 1); int err; bool got_signal = false; @@ -423,7 +424,7 @@ int do_wait(int idtype, pid_t_ id, struct siginfo_ *info, struct rusage_ *rusage return 0; error: - task_ref_cnt_mod(current); + task_ref_cnt_mod(current, -1); unlock(&pids_lock); return err; } diff --git a/kernel/fork.c b/kernel/fork.c index 3cec83a199..a1915952ed 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1,11 +1,11 @@ #include "debug.h" #include "kernel/task.h" #include "fs/fd.h" -#include "kernel/calls.h" #include "fs/tty.h" +#include "kernel/calls.h" #include "kernel/mm.h" #include "kernel/ptrace.h" -#include "kernel/resource_locking.h" +#include "util/sync.h" #define CSIGNAL_ 0x000000ff #define CLONE_VM_ 0x00000100 @@ -95,7 +95,7 @@ static int copy_task(struct task *task, dword_t flags, addr_t stack, addr_t ptid } struct tgroup *old_group = task->group; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); lock(&old_group->lock, 0); if (!(flags & CLONE_THREAD_)) { task->group = tgroup_copy(old_group); @@ -127,7 +127,7 @@ static int copy_task(struct task *task, dword_t flags, addr_t stack, addr_t ptid return 0; fail_free_sighand: - while(task_ref_cnt_val(task)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(task)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } sighand_release(task->sighand); @@ -160,7 +160,7 @@ dword_t sys_clone(dword_t flags, addr_t stack, addr_t ptid, addr_t tls, addr_t c // some other thread could get a pointer to the task. // FIXME: task_destroy doesn't free all aspects of the task, which // could cause leaks - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); task_destroy(task, 3); unlock(&pids_lock); diff --git a/kernel/fs.c b/kernel/fs.c index f89b5ac885..dae3581694 100644 --- a/kernel/fs.c +++ b/kernel/fs.c @@ -5,7 +5,6 @@ #include "kernel/errno.h" #include "kernel/task.h" #include "kernel/fs.h" -#include "kernel/resource_locking.h" #include "fs/fd.h" #include "fs/path.h" #include "fs/dev.h" diff --git a/kernel/futex.c b/kernel/futex.c index 8ad27f7264..5a0f6a1e56 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1,7 +1,7 @@ #include "kernel/calls.h" -#include "kernel/resource_locking.h" #include #include "futex.h" +#include "util/sync.h" // Apple doesn't implement futex, so we have to fake it #define FUTEX_WAIT_ 0 #define FUTEX_WAKE_ 1 @@ -115,9 +115,9 @@ static void futex_put(struct futex *futex) { static int futex_load(struct futex *futex, dword_t *out) { assert(futex->mem == current->mem); - read_lock(¤t->mem->lock, __FILE__, __LINE__); + read_lock(¤t->mem->lock); dword_t *ptr = mem_ptr(current->mem, futex->addr, MEM_READ); - read_unlock(¤t->mem->lock, __FILE__, __LINE__); + read_unlock(¤t->mem->lock); if (ptr == NULL) return 1; *out = *ptr; @@ -371,7 +371,7 @@ int_t sys_set_robust_list(addr_t robust_list, dword_t len) { int_t sys_get_robust_list(pid_t_ pid, addr_t robust_list_ptr, addr_t len_ptr) { STRACE("get_robust_list(%d, %#x, %#x)", pid, robust_list_ptr, len_ptr); - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock,0); struct task *task = pid_get_task(pid); unlock(&pids_lock); if (task != current) diff --git a/kernel/getset.c b/kernel/getset.c index af6c8568c3..2845e6fca0 100644 --- a/kernel/getset.c +++ b/kernel/getset.c @@ -1,19 +1,20 @@ #include "kernel/calls.h" #include "kernel/task.h" #include "kernel/personality.h" +#include "util/sync.h" -pid_t_ sys_getpid() { +pid_t_ sys_getpid(void) { STRACE("getpid()"); return current->tgid; } -pid_t_ sys_gettid() { +pid_t_ sys_gettid(void) { STRACE("gettid()"); return current->pid; } -pid_t_ sys_getppid() { +pid_t_ sys_getppid(void) { STRACE("getppid()"); pid_t_ ppid; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); if (current->parent != NULL) ppid = current->parent->pid; else @@ -22,20 +23,20 @@ pid_t_ sys_getppid() { return ppid; } -dword_t sys_getuid32() { +dword_t sys_getuid32(void) { STRACE("getuid32()"); return current->uid; } -dword_t sys_getuid() { +dword_t sys_getuid(void) { STRACE("getuid()"); return current->uid & 0xffff; } -dword_t sys_geteuid32() { +dword_t sys_geteuid32(void) { STRACE("geteuid32()"); return current->euid; } -dword_t sys_geteuid() { +dword_t sys_geteuid(void) { STRACE("geteuid()"); return current->euid & 0xffff; } @@ -87,20 +88,20 @@ int_t sys_setreuid(uid_t_ ruid, uid_t_ euid) { return sys_setresuid(ruid, euid, -1); } -dword_t sys_getgid32() { +dword_t sys_getgid32(void) { STRACE("getgid32()"); return current->gid; } -dword_t sys_getgid() { +dword_t sys_getgid(void) { STRACE("getgid()"); return current->gid & 0xffff; } -dword_t sys_getegid32() { +dword_t sys_getegid32(void) { STRACE("getegid32()"); return current->egid; } -dword_t sys_getegid() { +dword_t sys_getegid(void) { STRACE("getegid()"); return current->egid & 0xffff; } diff --git a/kernel/group.c b/kernel/group.c index 2768be3df4..efe9175db1 100644 --- a/kernel/group.c +++ b/kernel/group.c @@ -1,4 +1,5 @@ #include "util/list.h" +#include "util/sync.h" #include "kernel/calls.h" #include "kernel/task.h" #include "fs/tty.h" @@ -10,7 +11,7 @@ dword_t sys_setpgid(pid_t_ id, pid_t_ pgid) { id = current->pid; if (pgid == 0) pgid = id; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); struct pid *pid = pid_get(id); err = _ESRCH; if (pid == NULL) @@ -55,13 +56,13 @@ dword_t sys_setpgid(pid_t_ id, pid_t_ pgid) { return err; } -dword_t sys_setpgrp() { +dword_t sys_setpgrp(void) { return sys_setpgid(0, 0); } pid_t_ sys_getpgid(pid_t_ pid) { STRACE("getpgid(%d)", pid); - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); struct task *task = current; if (pid != 0) task = pid_get_task(pid); @@ -73,7 +74,7 @@ pid_t_ sys_getpgid(pid_t_ pid) { unlock(&pids_lock); return pid; } -pid_t_ sys_getpgrp() { +pid_t_ sys_getpgrp(void) { return sys_getpgid(0); } @@ -95,7 +96,7 @@ void task_leave_session(struct task *task) { } pid_t_ task_setsid(struct task *task) { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); struct tgroup *group = task->group; pid_t_ new_sid = group->leader->pid; if (group->pgid == new_sid || group->sid == new_sid) { @@ -116,14 +117,14 @@ pid_t_ task_setsid(struct task *task) { return new_sid; } -dword_t sys_setsid() { +dword_t sys_setsid(void) { STRACE("setsid()"); return task_setsid(current); } -dword_t sys_getsid() { +dword_t sys_getsid(void) { STRACE("getsid()"); - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock,0); pid_t_ sid = current->group->sid; unlock(&pids_lock); return sid; diff --git a/kernel/log.c b/kernel/log.c index 8718b03966..5b41d97a74 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -10,7 +10,6 @@ #include "util/sync.h" #include "util/fifo.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" #include "misc.h" #define LOG_BUF_SHIFT 20 @@ -132,24 +131,6 @@ static void log_buf_append(const char *msg) { static void log_line(const char *line); -/* static void output_line(const char *line) { - time_t t=time(NULL); - char* c_time_string; - c_time_string = ctime(&t); - c_time_string[strcspn(c_time_string, "\n")] = 0; // Remove trailing newline - //double tstamp = difftime(t, (time_t) 0); - int mybuff_size = 512; - char tmpbuff[mybuff_size]; - //sprintf(tmpbuff, "[ %f] %s", tstamp, line); - sprintf(tmpbuff, "[ %s] %s", c_time_string, line); - // send it to stdout or wherever - if(strcmp(tmpbuff, "") != 0) { // Don't log empty string - log_line(tmpbuff); - // add it to the circular buffer - log_buf_append(tmpbuff); - log_buf_append("\n"); - } -} */ static void output_line(const char *line) { time_t t = time(NULL); char* c_time_string = ctime(&t); @@ -178,7 +159,7 @@ void ish_vprintk(const char *msg, va_list args) { buf_size += vsprintf(buf + buf_size, msg, args); // output up to the last newline, leave the rest in the buffer - complex_lockt(&log_lock, 1, __FILE__, __LINE__); + complex_lockt(&log_lock, 1); char *b = buf; char *p; while ((p = strchr(b, '\n')) != NULL) { @@ -229,11 +210,11 @@ void die(const char *msg, ...) { } // fun little utility function -int current_pid(void) { +inline int current_pid(void) { task_ref_cnt_mod(current, 1); if(current != NULL) { if (current->exiting != true) { - task_ref_cnt_mod(current, -1) + task_ref_cnt_mod(current, -1); return current->pid; } else { task_ref_cnt_mod(current, -1); @@ -245,14 +226,14 @@ int current_pid(void) { return -1; } -int current_uid(void) { +inline int current_uid(void) { task_ref_cnt_mod(current, 1); if(current != NULL) { if (current->exiting != true) { - task_ref_cnt_mod(current); + task_ref_cnt_mod(current, -1); return current->uid; } else { - task_ref_cnt_mod(current); + task_ref_cnt_mod(current, -1); return -1; } } @@ -261,21 +242,21 @@ int current_uid(void) { return -1; } -char * current_comm(void) { +inline char * current_comm(void) { static char comm[16]; task_ref_cnt_mod(current, 1); if(current != NULL) { if(strcmp(current->comm, "")) { strncpy(comm, current->comm, 16); } else { - task_ref_cnt_mod(current); + task_ref_cnt_mod(current, -1); return ""; } if (current->exiting != true) { - task_ref_cnt_mod(current); + task_ref_cnt_mod(current, -1); return comm; } else { - task_ref_cnt_mod(current); + task_ref_cnt_mod(current, -1); return ""; } } diff --git a/kernel/mmap.c b/kernel/mmap.c index 9278df8e53..8dcd8c653f 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -6,11 +6,11 @@ #include "fs/fd.h" #include "emu/memory.h" #include "kernel/mm.h" -#include "kernel/resource_locking.h" +#include "util/sync.h" extern bool doEnableExtraLocking; -struct mm *mm_new() { +struct mm *mm_new(void) { struct mm *mm = malloc(sizeof(struct mm)); if (mm == NULL) return NULL; @@ -33,7 +33,7 @@ struct mm *mm_copy(struct mm *mm) { fd_retain(new_mm->exefile); write_lock(&mm->mem.lock); pt_copy_on_write(&mm->mem, &new_mm->mem, 0, MEM_PAGES); - write_unlock(&mm->mem.lock, __FILE__, __LINE__); + write_unlock(&mm->mem.lock); return new_mm; } @@ -45,13 +45,13 @@ void mm_release(struct mm *mm) { if (--mm->refcount == 0) { if (mm->exefile != NULL) fd_close(mm->exefile); - while(task_ref_cnt_val(current)) { // FIXME: Should be locking current->reference.lock and updating + while(task_ref_cnt_get(current)) { // FIXME: Should be locking current->reference.lock and updating // current->reference.count before mem_destroy nanosleep(&lock_pause, NULL); } mem_destroy(&mm->mem); - while(task_ref_cnt_val(current)) { //FIXME: Should now unlock after mem_destroy + while(task_ref_cnt_get(current)) { //FIXME: Should now unlock after mem_destroy nanosleep(&lock_pause, NULL); } free(mm); @@ -109,7 +109,7 @@ static addr_t mmap_common(addr_t addr, dword_t len, dword_t prot, dword_t flags, write_lock(¤t->mem->lock); addr_t res = do_mmap(addr, len, prot, flags, fd_no, offset); - write_unlock(¤t->mem->lock, __FILE__, __LINE__); + write_unlock(¤t->mem->lock); return res; } @@ -157,7 +157,7 @@ int_t sys_munmap(addr_t addr, uint_t len) { write_lock(¤t->mem->lock); int err = pt_unmap_always(current->mem, PAGE(addr), PAGE_ROUND_UP(len)); - write_unlock(¤t->mem->lock, __FILE__, __LINE__); + write_unlock(¤t->mem->lock); if (err < 0) return _EINVAL; @@ -182,7 +182,7 @@ int_t sys_mremap(addr_t addr, dword_t old_len, dword_t new_len, dword_t flags) { // shrinking always works if (new_pages <= old_pages) { - while(task_ref_cnt_val(current)) { + while(task_ref_cnt_get(current)) { nanosleep(&lock_pause, NULL); } int err = pt_unmap(current->mem, PAGE(addr) + new_pages, old_pages - new_pages); @@ -223,7 +223,7 @@ int_t sys_mprotect(addr_t addr, uint_t len, int_t prot) { pages_t pages = PAGE_ROUND_UP(len); write_lock(¤t->mem->lock); int err = pt_set_flags(current->mem, PAGE(addr), pages, prot); - write_unlock(¤t->mem->lock, __FILE__, __LINE__); + write_unlock(¤t->mem->lock); return err; } @@ -287,6 +287,6 @@ addr_t sys_brk(addr_t new_brk) { mm->brk = new_brk; out:; addr_t brk = mm->brk; - write_unlock(&mm->mem.lock, __FILE__, __LINE__); + write_unlock(&mm->mem.lock); return brk; } diff --git a/kernel/poll.c b/kernel/poll.c index ef58b5f89b..06bdbe33f7 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -4,7 +4,6 @@ #include "fs/fd.h" #include "fs/poll.h" #include "kernel/calls.h" -#include "kernel/resource_locking.h" static int user_read_or_zero(addr_t addr, void *data, size_t size) { if (addr == 0) @@ -199,7 +198,7 @@ dword_t sys_poll(addr_t fds, dword_t nfds, int_t timeout) { TASK_MAY_BLOCK { res = poll_wait(poll, poll_event_callback, &context, timeout < 0 ? NULL : &timeout_ts); } - while(task_ref_cnt_val(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } poll_destroy(poll); diff --git a/kernel/resource.c b/kernel/resource.c index e94230a35f..4785f94869 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -12,7 +12,7 @@ #include #include #include "kernel/calls.h" -#include "kernel/resource_locking.h" +#include "util/sync.h" static bool resource_valid(int resource) { return resource >= 0 && resource < RLIMIT_NLIMITS_; @@ -207,7 +207,7 @@ int_t sys_sched_getaffinity(pid_t_ pid, dword_t cpusetsize, addr_t cpuset_addr) // Handle pid check separately for clarity if (pid != 0) { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); struct task *task = pid_get_task(pid); unlock(&pids_lock); if (task == NULL) diff --git a/kernel/resource_locking.h b/kernel/resource_locking.h index 6f2dfcfe32..86ea394fc8 100644 --- a/kernel/resource_locking.h +++ b/kernel/resource_locking.h @@ -1,13 +1,14 @@ //#include "util/sync.h" // Because sometimes we can't #include "kernel/task.h" -mke -extern void task_ref_cnt_mod(struct task *task, int value); -extern void task_ref_cnt_mod_wrapper(int, const char*, int); +// Deprecated -extern int task_ref_cnt_val(struct task *task); -extern void mem_ref_cnt_mod(struct mem*, int, char*, int); -extern int mem_ref_cnt_val(struct mem *mem); -extern unsigned locks_held_count(struct task*); -extern void modify_locks_held_count(struct task*, int); -extern bool current_is_valid(void); +/* void task_ref_cnt_mod(struct task *task, int value); +void task_ref_cnt_mod_wrapper(int); +int task_ref_cnt_get(struct task *task); +void mem_ref_cnt_mod(struct mem*, int, char*, int); +int mem_ref_cnt_val_get(struct mem *mem); +unsigned locks_held_count(struct task*); +void modify_locks_held_count(struct task*, int); +bool current_is_valid(void); */ diff --git a/kernel/signal.c b/kernel/signal.c index f09d44aa5e..b1298be576 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -5,8 +5,8 @@ #include "kernel/signal.h" #include "kernel/task.h" #include "kernel/vdso.h" -#include "kernel/resource_locking.h" #include "emu/interrupt.h" +#include "util/sync.h" #if is_gcc(9) #pragma GCC diagnostic ignored "-Waddress-of-packed-member" @@ -141,7 +141,7 @@ bool try_self_signal(int sig) { } int send_group_signal(dword_t pgid, int sig, struct siginfo_ info) { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); struct pid *pid = pid_get(pgid); if (pid == NULL) { unlock(&pids_lock); @@ -382,7 +382,7 @@ void receive_signals(void) { // Should this function have a check for critical_ bool now_stopped = current->group->stopped; unlock(¤t->group->lock); if (now_stopped) { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); notify(¤t->parent->group->child_exit); // TODO add siginfo send_signal(current->parent, current->group->leader->exit_signal, SIGINFO_NIL); @@ -466,7 +466,7 @@ struct sighand *sighand_copy(struct sighand *sighand) { } void sighand_release(struct sighand *sighand) { - while(task_ref_cnt_val(current) > 2) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current) > 2) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } if (--sighand->refcount == 0) { @@ -739,7 +739,7 @@ static int kill_group(pid_t_ pgid, dword_t sig) { } struct tgroup *tgroup; int err = _EPERM; - while((task_ref_cnt_val(current)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_get(current)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } list_for_each_entry(&pid->pgroup, tgroup, pgroup) { @@ -772,7 +772,7 @@ static int do_kill(pid_t_ pid, dword_t sig, pid_t_ tgid) { pid = -current->group->pgid; int err; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); if (pid == -1) { err = kill_everything(sig); diff --git a/kernel/signal.h b/kernel/signal.h index 7bb31e1843..20204cbbb1 100644 --- a/kernel/signal.h +++ b/kernel/signal.h @@ -4,6 +4,7 @@ #include "misc.h" #include "util/list.h" #include "util/sync.h" +#include struct task; typedef qword_t sigset_t_; diff --git a/kernel/task.c b/kernel/task.c index 3779c374e7..52d567f414 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -4,7 +4,6 @@ #include #include "kernel/calls.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" #include "emu/memory.h" #include "emu/tlb.h" #include "platform/platform.h" @@ -15,7 +14,7 @@ pthread_mutex_t multicore_lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t extra_lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t delay_lock = PTHREAD_MUTEX_INITIALIZER; -lock_t atomic_l_lock; +extern lock_t atomic_l_lock; pthread_mutex_t wait_for_lock = PTHREAD_MUTEX_INITIALIZER; time_t boot_time; // Store the boot time. -mke @@ -72,7 +71,7 @@ dword_t get_count_of_blocked_tasks(void) { task_ref_cnt_mod(current, 1); dword_t res = 0; struct pid *pid_entry; - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); list_for_each_entry(&alive_pids_list, pid_entry, alive) { if (pid_entry->task->io_block) { res++; @@ -84,7 +83,7 @@ dword_t get_count_of_blocked_tasks(void) { } dword_t get_count_of_alive_tasks(void) { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); dword_t res = 0; struct list *item; list_for_each(&alive_pids_list, item) { @@ -95,7 +94,7 @@ dword_t get_count_of_alive_tasks(void) { } struct task *task_create_(struct task *parent) { - complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + complex_lockt(&pids_lock, 0); do { last_allocated_pid++; if (last_allocated_pid > MAX_PID) last_allocated_pid = 1; @@ -154,7 +153,7 @@ struct task *task_create_(struct task *parent) { // We consolidate the check for whether the task is in a critical section, // holds locks, or has pending signals into a single function. bool should_wait(struct task *t) { - return task_ref_cnt_val(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); + return task_ref_cnt_get(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); } void task_destroy(struct task *task, int caller) { @@ -193,7 +192,7 @@ void task_destroy(struct task *task, int caller) { retry: // Free the task's resources. - if (!task_ref_cnt_val(task)) { + if (!task_ref_cnt_get(task)) { free(task); } else { goto retry; @@ -222,7 +221,7 @@ void task_run_current(void) { tlb_refresh(&tlb, ¤t->mem->mmu); while (true) { - read_lock(¤t->mem->lock, __FILE__, __LINE__); + read_lock(¤t->mem->lock); if(!doEnableMulticore) { pthread_mutex_lock(&multicore_lock); @@ -230,7 +229,7 @@ void task_run_current(void) { int interrupt = cpu_run_to_interrupt(cpu, &tlb); - read_unlock(¤t->mem->lock, __FILE__, __LINE__); + read_unlock(¤t->mem->lock); if(!doEnableMulticore) pthread_mutex_unlock(&multicore_lock); @@ -297,3 +296,112 @@ void update_thread_name(void) { pthread_setname_np(pthread_self(), name); #endif } + +void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke + // Keep track of how many threads are referencing this task + if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke + return; + } + + if(task == NULL) { + if(current != NULL) { + task = current; + } else { + return; + } + } + + bool ilocked = false; + + if (trylocknl(&task->general_lock, task->comm, task->pid) != _EBUSY) { + ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. + } + + pthread_mutex_lock(&task->reference.lock); + + if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke + printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d)\n", task->comm, task->pid, task->reference.count, value); + if(ilocked == true) + unlock(&task->general_lock); + + pthread_mutex_unlock(&task->reference.lock); + + return; + } + + + task->reference.count = task->reference.count + value; + + pthread_mutex_unlock(&task->reference.lock); + + if(ilocked == true) + unlock(&task->general_lock); +} + +void task_ref_cnt_mod_wrapper(int value) { + // sync.h can't know about the definition of task struct due to recursive include files. -mke + if((current != NULL) && (doEnableExtraLocking)) + task_ref_cnt_mod(current, value); + + return; +} + +void modify_locks_held_count(struct task *task, int value) { // value Should only be -1 or 1. -mke + if((task == NULL) && (current != NULL)) { + task = current; + } else { + return; + } + + pthread_mutex_lock(&task->locks_held.lock); + if((task->locks_held.count + value < 0) && task->pid > 9) { + // if((task->pid > 2) && (!strcmp(task->comm, "init"))) // Why ask why? -mke + printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); + return; + } + task->locks_held.count = task->locks_held.count + value; + pthread_mutex_unlock(&task->locks_held.lock); +} + +unsigned task_ref_cnt_get(struct task *task) { + unsigned tmp = 0; + pthread_mutex_lock(&task->reference.lock); // This would make more + tmp = task->reference.count; + if(tmp > 1000) // Work around brain damage. Remove when said brain damage is fixed + tmp = 0; + pthread_mutex_unlock(&task->reference.lock); + + return tmp; +} + +unsigned task_ref_cnt_get_wrapper(void) { + return(task_ref_cnt_get(current)); +} + +bool current_is_valid(void) { + if(current != NULL) + return true; + + return false; +} + +unsigned locks_held_count(struct task *task) { + // return 0; // Short circuit for now + if(task->pid < 10) // Here be monsters. -mke + return 0; + if(task->locks_held.count > 0) { + return(task->locks_held.count -1); + } + unsigned tmp = 0; + pthread_mutex_lock(&task->locks_held.lock); + tmp = task->locks_held.count; + pthread_mutex_unlock(&task->locks_held.lock); + + return tmp; +} + +void modify_locks_held_count_wrapper(int value) { // sync.h can't know about the definition of struct due to recursive include files. -mke + if(current != NULL) + modify_locks_held_count(current, value); + return; +} diff --git a/kernel/task.h b/kernel/task.h index 09fe4e8fde..ab936835d4 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -9,9 +9,10 @@ #include "kernel/resource.h" #include "fs/sockrestart.h" #include "util/list.h" -#include "util/timer.h" #include "util/sync.h" -#include "kernel/resource_locking.h" +#include "util/timer.h" + +extern void task_ref_cnt_mod_wrapper(int value); struct task { struct cpu_state cpu; @@ -238,4 +239,12 @@ __attribute__((always_inline)) inline int task_may_block_end(void) { #define TASK_MAY_BLOCK for (int i = task_may_block_start(); i < 1; task_may_block_end(), i++) +void task_ref_cnt_mod(struct task *task, int value); +void task_ref_cnt_mod_wrapper(int value); +unsigned task_ref_cnt_get(struct task *task); +unsigned task_ref_cnt_get_wrapper(void); +void modify_locks_held_count(struct task *task, int value); +void modify_locks_held_count_wrapper(int value); +unsigned locks_held_count(struct task *task); + #endif diff --git a/kernel/time.c b/kernel/time.c index 6ef8fc283a..285a3fbaa5 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -10,7 +10,6 @@ #include "kernel/errno.h" #include "kernel/resource.h" #include "kernel/time.h" -#include "kernel/resource_locking.h" #include "fs/poll.h" #include diff --git a/kernel/user.c b/kernel/user.c index 21a9f8a357..d0c81a6a92 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -1,6 +1,5 @@ #include #include "kernel/calls.h" -#include "kernel/resource_locking.h" extern bool doEnableExtraLocking; extern pthread_mutex_t extra_lock; @@ -48,11 +47,11 @@ static int __user_write_task(struct task *task, addr_t addr, const void *buf, si } int user_read_task(struct task *task, addr_t addr, void *buf, size_t count) { - read_lock(&task->mem->lock, __FILE__, __LINE__); + read_lock(&task->mem->lock); int res = __user_read_task(task, addr, buf, count); - read_unlock(&task->mem->lock, __FILE__, __LINE__); + read_unlock(&task->mem->lock); return res; } @@ -61,16 +60,16 @@ int user_read(addr_t addr, void *buf, size_t count) { } int user_write_task(struct task *task, addr_t addr, const void *buf, size_t count) { - read_lock(&task->mem->lock, __FILE__, __LINE__); + read_lock(&task->mem->lock); int res = __user_write_task(task, addr, buf, count, false); - read_unlock(&task->mem->lock, __FILE__, __LINE__); + read_unlock(&task->mem->lock); return res; } int user_write_task_ptrace(struct task *task, addr_t addr, const void *buf, size_t count) { - read_lock(&task->mem->lock, __FILE__, __LINE__); + read_lock(&task->mem->lock); int res = __user_write_task(task, addr, buf, count, true); - read_unlock(&task->mem->lock, __FILE__, __LINE__); + read_unlock(&task->mem->lock); return res; } @@ -82,18 +81,18 @@ int user_read_string(addr_t addr, char *buf, size_t max) { if (addr == 0) { return 1; } - read_lock(¤t->mem->lock, __FILE__, __LINE__); + read_lock(¤t->mem->lock); size_t i = 0; while (i < max) { if (__user_read_task(current, addr + i, &buf[i], sizeof(buf[i])), false) { - read_unlock(¤t->mem->lock, __FILE__, __LINE__); + read_unlock(¤t->mem->lock); return 1; } if (buf[i] == '\0') break; i++; } - read_unlock(¤t->mem->lock, __FILE__, __LINE__); + read_unlock(¤t->mem->lock); return 0; } @@ -101,15 +100,15 @@ int user_write_string(addr_t addr, const char *buf) { if (addr == 0) { return 1; } - read_lock(¤t->mem->lock, __FILE__, __LINE__); + read_lock(¤t->mem->lock); size_t i = 0; do { if (__user_write_task(current, addr + i, &buf[i], sizeof(buf[i]), false)) { - read_unlock(¤t->mem->lock, __FILE__, __LINE__); + read_unlock(¤t->mem->lock); return 1; } i++; } while (buf[i - 1] != '\0'); - read_unlock(¤t->mem->lock, __FILE__, __LINE__); + read_unlock(¤t->mem->lock); return 0; } diff --git a/main.c b/main.c index 86d6d31213..1e703409e9 100644 --- a/main.c +++ b/main.c @@ -18,7 +18,7 @@ static void handler(int signo, siginfo_t *sigaction, void *context) { signal(signo, SIG_DFL); } -static void gen_exception() { +static void gen_exception(void) { printk("WARNING: gen_exception in.\n"); *(int *)0 = 0; printk("WARNING: gen_exception out.\n"); diff --git a/util/fifo.c b/util/fifo.c index a2fa83c8ea..022f2670b1 100644 --- a/util/fifo.c +++ b/util/fifo.c @@ -2,7 +2,6 @@ #include #include "util/fifo.h" #include "kernel/task.h" -#include "kernel/resource_locking.h" void fifo_init(struct fifo *fifo, size_t capacity) { fifo->buf = malloc(capacity); diff --git a/util/ro_locks.c b/util/ro_locks.c index b6b74c4021..b3249b1140 100644 --- a/util/ro_locks.c +++ b/util/ro_locks.c @@ -5,4 +5,205 @@ // Created by Michael Miller on 11/29/23. // -#include +#include +#include "misc.h" +#include "debug.h" +#include "kernel/errno.h" +#include "kernel/task.h" +#include "util/sync.h" + +// The following are in log.c. There should probably be in a log.h that gets included instead. +extern int current_pid(void); +extern int current_uid(void); +extern char* current_comm(void); +bool current_is_valid(void); + +// Lock to lock locks. Used to assure transition between RO<->RW is automic for RW locks +lock_t atomic_l_lock; + +// Function signatures and placeholders for implementation + +void lock_init(lock_t *lock, char lname[16]) { + int ret = pthread_mutex_init(&lock->m, NULL); + if (ret != 0) { + // Handle the error according to your application's needs + printk("ERROR: Failed to initialize mutex: %s:(%s)\n", lname, strerror(ret)); + // Depending on how critical this failure is, you might choose to exit, return, or take other actions. + return; + } + + if(lname != NULL) { + strncpy(lock->lname, lname, 16); + } else { + strncpy(lock->lname, "WTF", 16); + } + lock->wait4 = false; +#if LOCK_DEBUG + lock->debug = (struct lock_debug) { + .initialized = true, + }; +#endif + lock->comm[0] = 0; + lock->uid = -1; +} + +void unlock(lock_t *lock) { + //pid_t pid = current_pid(); + + lock->owner = zero_init(pthread_t); + pthread_mutex_unlock(&lock->m); + lock->pid = -1; // + lock->comm[0] = 0; + modify_locks_held_count_wrapper(-1); + +#if LOCK_DEBUG + assert(lock->debug.initialized); + assert(lock->debug.file && "Attempting to unlock an unlocked lock"); + lock->debug = (struct lock_debug) { .initialized = true }; +#endif + return; +} + +void atomic_l_lockf(char lname[16], int skiplog) { + if(!doEnableExtraLocking) + return; + int res = 0; + if(atomic_l_lock.pid > 0) { + if(current_pid() != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke + res = pthread_mutex_lock(&atomic_l_lock.m); + atomic_l_lock.pid = current_pid(); + } else if(!skiplog) { + printk("WARNING: Odd attempt by process (%s:%d) to attain same locking lock twice. Ignoring\n", current_comm(), current_pid()); + res = 0; + } + } + if(!res) { + strlcpy((char *)&atomic_l_lock.comm, current_comm(), 16); + strlcpy((char *)&atomic_l_lock.lname, lname, 16); + modify_locks_held_count_wrapper(1); + } else if (!skiplog) { + printk("Error on locking lock (%s) Called from %s:%d\n", lname); + } +} + +void mylock(lock_t *lock, int log_lock) { + if(!strcmp(lock->lname, "task_creat_gen")) // kluge. This means the lock is new, and SHOULD be unlocked + unlock(lock); + + if(!log_lock) { + task_ref_cnt_mod_wrapper(1); + pthread_mutex_lock(&lock->m); + modify_locks_held_count_wrapper(1); + lock->owner = pthread_self(); + lock->pid = current_pid(); + lock->uid = current_uid(); + strlcpy(lock->comm, current_comm(), 16); + task_ref_cnt_mod_wrapper(-1); + } else { + pthread_mutex_lock(&lock->m); + lock->owner = pthread_self(); + lock->pid = current_pid(); + lock->uid = current_uid(); + strncpy(lock->comm, current_comm(), 16); + } + return; +} + +void atomic_l_unlockf(void) { + if(!doEnableExtraLocking) + return; + int res = 0; + strncpy((char *)&atomic_l_lock.lname,"\0", 1); + res = pthread_mutex_unlock(&atomic_l_lock.m); + if(res) { + printk("ERROR: unlocking locking lock\n"); + } else { + atomic_l_lock.pid = -1; // Reset + } + + modify_locks_held_count_wrapper(-1); +} + +void complex_lockt(lock_t *lock, int log_lock) { + if (lock->pid == current_pid()) + return; + + unsigned int count = 0; + int random_wait = WAIT_SLEEP + rand() % WAIT_SLEEP; + struct timespec lock_pause = {0, random_wait}; + long count_max = (WAIT_MAX_UPPER - random_wait); + + while (pthread_mutex_trylock(&lock->m)) { + count++; + if (nanosleep(&lock_pause, NULL) == -1) { + // Handle error + } + if (count > count_max) { + if (!log_lock) { + printk("ERROR: Possible deadlock, aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d)\n", + current_pid(), current_comm(), lock->comm, lock->pid); + pthread_mutex_unlock(&lock->m); + modify_locks_held_count_wrapper(-1); + } + return; + } + } + + modify_locks_held_count_wrapper(1); + + if (count > count_max * 0.90) { + if (!log_lock) + printk("Warning: large lock attempt count (%d), aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d) \n", + count, current_pid(), current_comm(), lock->comm, lock->pid); + } + + lock->owner = pthread_self(); + lock->pid = current_pid(); + lock->uid = current_uid(); + strncpy(lock->comm, current_comm(), sizeof(lock->comm) - 1); + lock->comm[sizeof(lock->comm) - 1] = '\0'; // Null-terminate just in case +} + +int trylock(lock_t *lock) { + atomic_l_lockf("trylock\0", 0); + int status = pthread_mutex_trylock(&lock->m); + atomic_l_unlockf(); +#if LOCK_DEBUG + if (!status) { + lock->debug.file = file; + lock->debug.line = line; + extern int current_pid(void); + lock->debug.pid = current_pid(); + } +#endif + if((!status) && (current_pid() > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke + modify_locks_held_count_wrapper(1); + + //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + lock->pid = current_pid(); + strncpy(lock->comm, current_comm(), 16); + } + return status; +} + +int trylocknl(lock_t *lock, char *comm, int pid) { + //Don't log, avoid recursion + int status = pthread_mutex_trylock(&lock->m); +#if LOCK_DEBUG + if (!status) { + lock->debug.file = file; + lock->debug.line = line; + extern int current_pid(void); + lock->debug.pid = current_pid(); + } +#endif + if(!status) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke + modify_locks_held_count_wrapper(1); + + //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + lock->pid = pid; + strncpy(lock->comm, comm, 16); + } + return status; +} + diff --git a/util/ro_locks.h b/util/ro_locks.h index 9269eee83a..119aef93f0 100644 --- a/util/ro_locks.h +++ b/util/ro_locks.h @@ -4,15 +4,12 @@ // // Created by Michael Miller on 11/29/23. // - -#ifndef ro_locks_h -#define ro_locks_h - - -#endif /* ro_locks_h */ +#ifndef RO_LOCKS_H +#define RO_LOCKS_H #include #include +#include typedef struct { pthread_mutex_t m; @@ -32,13 +29,18 @@ typedef struct { #endif } lock_t; + void lock_init(lock_t *lock, char lname[16]); void unlock(lock_t *lock); -void atomic_l_lockf(char lname[16], int skiplog, const char *file, int line); +void atomic_l_lockf(char lname[16], int skiplog); +void mylock(lock_t *lock, int log_lock); void atomic_l_unlockf(void); -void complex_lockt(lock_t *lock, int log_lock, const char *file, int line); -int trylock(lock_t *lock, const char *file, int line); -int trylocknl(lock_t *lock, char *comm, int pid, const char *file, int line); +void complex_lockt(lock_t *lock, int log_lock); +int trylock(lock_t *lock); +int trylocknl(lock_t *lock, char *comm, int pid); -#endif // RO_LOCK_H +#define lock(lock, log_lock) mylock(lock, log_lock) +//#define trylock(lock) trylock(lock, __FILE__, __LINE__) +//#define trylocknl(lock, comm, pid) trylocknl(lock, comm, pid, __FILE__, __LINE__) +#endif diff --git a/util/rw_locks.c b/util/rw_locks.c new file mode 100644 index 0000000000..780842e59c --- /dev/null +++ b/util/rw_locks.c @@ -0,0 +1,273 @@ +// +// rw_locks.c +// iSH-AOK +// +// Created by Michael Miller on 11/29/23. +// + +#include "kernel/task.h" +#include "util/sync.h" + +// The following are in log.c. There should probably be in a log.h that gets included instead. +extern int current_pid(void); +extern int current_uid(void); +extern char* current_comm(void); +bool current_is_valid(void); + +// this is a read-write lock that prefers writers, i.e. if there are any +// writers waiting a read lock will block. +// on darwin pthread_rwlock_t is already like this, on linux you can configure +// it to prefer writers. not worrying about anything else right now. + +void loop_lock_generic(wrlock_t *lock, int is_write) { + task_ref_cnt_mod_wrapper(1); + modify_locks_held_count_wrapper(1); + + unsigned count = 0; + int random_wait = is_write ? WAIT_SLEEP + rand() % 100 : WAIT_SLEEP + rand() % WAIT_SLEEP/4; + struct timespec lock_pause = {0, random_wait}; + long count_max = (WAIT_MAX_UPPER - random_wait); + count_max = (is_write && count_max < 25000) ? 25000 : count_max; + + while((is_write ? pthread_rwlock_trywrlock(&lock->l) : pthread_rwlock_tryrdlock(&lock->l))) { + count++; + if(count > count_max) { + handle_lock_error(lock, is_write ? "loop_lock_write" : "loop_lock_read"); + count = 0; + } + atomic_l_unlockf(); + nanosleep(&lock_pause, NULL); + atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0); + } + + task_ref_cnt_mod_wrapper(-1); +} + + + +void _read_lock(wrlock_t *lock) { + loop_lock_read(lock); + task_ref_cnt_mod_wrapper(1); + //pthread_rwlock_rdlock(&lock->l); + // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke + if(lock->val) { + lock->val++; + } else if (lock->val > -1){ // Deal with insanity. -mke + lock->val++; + } else { + printk("ERROR: _read_lock() val is %d\n", lock->val); + lock->val++; + } + + if(lock->val > 1000) { // We likely have a problem. + printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); + read_unlock_and_destroy(lock); + task_ref_cnt_mod_wrapper(-1); + //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + return; + } + + lock->pid = current_pid(); + if(lock->pid > 9) + strncpy((char *)lock->comm, current_comm(), 16); + task_ref_cnt_mod_wrapper(-1); + //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); +} + +void read_lock(wrlock_t *lock) { // Wrapper so that external calls lock, internal calls using _read_unlock() don't -mke + atomic_l_lockf("r_lock\0", 0); + _read_lock(lock); + atomic_l_unlockf(); +} + +void _read_unlock(wrlock_t *lock) { + if(lock->val <= 0) { + printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(), current_comm(), lock->val); + lock->val = 0; + lock->pid = -1; + lock->comm[0] = 0; + modify_locks_held_count_wrapper(-1); + //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + return; + } + assert(lock->val > 0); + if (pthread_rwlock_unlock(&lock->l) != 0) + printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(), current_comm()); + lock->val--; + modify_locks_held_count_wrapper(-1); + //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); +} + +void read_unlock(wrlock_t *lock) { + if(lock->pid != current_pid() && (lock->pid != -1)) { + atomic_l_lockf("r_unlock\0", 0); + _read_unlock(lock); + } else { // We can unlock our own lock without additional locking. -mke + _read_unlock(lock); + return; + } + if(lock->pid != current_pid() && (lock->pid != -1)) + atomic_l_unlockf(); +} + +void _write_unlock(wrlock_t *lock) { + if(pthread_rwlock_unlock(&lock->l) != 0) + printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(), current_comm()); + if(lock->val != -1) { + printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(), current_comm()); + } + //assert(lock->val == -1); + lock->val = lock->line = lock->pid = 0; + lock->pid = -1; + lock->comm[0] = 0; + //STRACE("write_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + lock->file = NULL; + modify_locks_held_count_wrapper(-1); +} + +void write_unlock(wrlock_t *lock) { // Wrap it. External calls lock, internal calls using _write_unlock() don't -mke + atomic_l_lockf("w_unlock\0", 0); + _write_unlock(lock); + atomic_l_unlockf(); + return; +} + +void wrlock_init(wrlock_t *lock) { + pthread_rwlockattr_t *pattr = NULL; +#if defined(__GLIBC__) + pthread_rwlockattr_t attr; + pattr = &attr; + pthread_rwlockattr_init(pattr); + pthread_rwlockattr_setkind_np(pattr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); +#endif +#ifdef JUSTLOG + if (pthread_rwlock_init(&lock->l, pattr)) + printk("URGENT: wrlock_init() error(PID: %d Process: %s)\n",current_pid(), current_comm()); +#else + if (pthread_rwlock_init(&lock->l, pattr)) __builtin_trap(); +#endif + lock->val = lock->line = lock->pid = 0; + lock->file = NULL; +} + +void _lock_destroy(wrlock_t *lock) { + while((task_ref_cnt_get(current) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + nanosleep(&lock_pause, NULL); + } +#ifdef JUSTLOG + if (pthread_rwlock_destroy(&lock->l) != 0) { + printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),task_ref_cnt_get(current)); + } +#else + if (pthread_rwlock_destroy(&lock->l) != 0) __builtin_trap(); +#endif +} + +void lock_destroy(wrlock_t *lock) { + while((task_ref_cnt_get(current) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + nanosleep(&lock_pause, NULL); + } + + atomic_l_lockf("l_destroy\0", 0); + _lock_destroy(lock); + atomic_l_unlockf(); +} + +void _write_lock(wrlock_t *lock) { // Write lock + loop_lock_write(lock); + + // assert(lock->val == 0); + lock->val = -1; + // lock->file = file; + // lock->line = line; + lock->pid = current_pid(); + if(lock->pid > 9) + strncpy((char *)lock->comm, current_comm(), 16); + //STRACE("write_lock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); +} + +void write_lock(wrlock_t *lock) { + atomic_l_lockf("_w_lock", 0); + _write_lock(lock); + atomic_l_unlockf(); +} + +void handle_lock_error(wrlock_t *lock, const char *func) { + printk("ERROR: %s(%x) tries exceeded %d, dealing with likely deadlock. (Lock held by PID: %d Process: %s) \n", + func, lock, WAIT_MAX_UPPER, lock->pid, lock->comm); + + if(pid_get((dword_t)lock->pid) == NULL) { + printk("ERROR: %s(%x) locking PID(%d) is gone for task %s\n", func, lock, lock->pid, lock->comm); + pthread_rwlock_unlock(&lock->l); + } else { + printk("ERROR: %s(%x) locking PID(%d), %s is apparently wedged\n", func, lock, lock->pid, lock->comm); + pthread_rwlock_unlock(&lock->l); + } + + if(lock->val > 1) { + lock->val--; + } else if(lock->val == 1) { + _read_unlock(lock); + } else if(lock->val < 0) { + _write_unlock(lock); + } +} + +void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke + task_ref_cnt_mod_wrapper(1); + atomic_l_lockf("rtw_lock\0", 0); + _read_unlock(lock); + _write_lock(lock); + atomic_l_unlockf(); + task_ref_cnt_mod_wrapper(-1); +} + +void write_to_read_lock(wrlock_t *lock) { // Try to atomically swap a Write lock to a RO lock. -mke + task_ref_cnt_mod_wrapper(1); + atomic_l_lockf("wtr_lock\0", 0); + _write_unlock(lock); + _read_lock(lock); + atomic_l_unlockf(); + task_ref_cnt_mod_wrapper(-1); +} + +void write_unlock_and_destroy(wrlock_t *lock) { + task_ref_cnt_mod_wrapper(1); + atomic_l_lockf("wuad_lock\0", 0); + _write_unlock(lock); + _lock_destroy(lock); + atomic_l_unlockf(); + task_ref_cnt_mod_wrapper(-1); +} + +void read_unlock_and_destroy(wrlock_t *lock) { + atomic_l_lockf("ruad_lock", 0); + if(trylockw(lock)) // It should be locked, but just in case. Likely masking underlying issue. -mke + _read_unlock(lock); + _lock_destroy(lock); + atomic_l_unlockf(); +} + +int trylockw(wrlock_t *lock) { + atomic_l_lockf("trylockw\0", 0); + int status = pthread_rwlock_trywrlock(&lock->l); + atomic_l_unlockf(); +#if LOCK_DEBUG + if (!status) { + lock->debug.file = file; + lock->debug.line = line; + extern int current_pid(void); + lock->debug.pid = current_pid(); + } +#endif + if(status == 0) { + modify_locks_held_count_wrapper(1); + //STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + lock->pid = current_pid(); + strncpy(lock->comm, current_comm(), 16); + } + return status; +} + +//#define trylockw(lock) trylockw(lock, __FILE__, __LINE__) + diff --git a/util/rw_locks.h b/util/rw_locks.h index 06b6f93314..ca632bbd4b 100644 --- a/util/rw_locks.h +++ b/util/rw_locks.h @@ -5,12 +5,6 @@ // Created by Michael Miller on 11/29/23. // -#ifndef rw_locks_h -#define rw_locks_h - - -#endif /* rw_locks_h */ - #ifndef RW_LOCK_H #define RW_LOCK_H @@ -29,14 +23,21 @@ typedef struct { } wrlock_t; void wrlock_init(wrlock_t *lock); -void read_lock(wrlock_t *lock, const char *file, int line); -void write_lock(wrlock_t *lock, const char *file, int line); -void read_unlock(wrlock_t *lock, const char *file, int line); -void write_unlock(wrlock_t *lock, const char *file, int line); +void read_lock(wrlock_t *lock); +void write_lock(wrlock_t *lock); +void read_unlock(wrlock_t *lock); +void write_unlock(wrlock_t *lock); void read_to_write_lock(wrlock_t *lock); -void write_to_read_lock(wrlock_t *lock, const char *file, int line); +void write_to_read_lock(wrlock_t *lock); void write_unlock_and_destroy(wrlock_t *lock); void read_unlock_and_destroy(wrlock_t *lock); -void lock_destroy(wrlock_t *lock); +void read_to_write_lock(wrlock_t *lock); +void read_unlock_and_destroy(wrlock_t *lock); +// void lock_destroy(wrlock_t *lock); // Not used outside of rw_locks.c, no need to be exposed +void handle_lock_error(wrlock_t *lock, const char *func); +int trylockw(wrlock_t *lock); + +#define loop_lock_read(lock) loop_lock_generic(lock, 0) +#define loop_lock_write(lock) loop_lock_generic(lock, 1) #endif // RW_LOCK_H diff --git a/util/sync.c b/util/sync.c index 7d4c9710b1..d9e51f0390 100644 --- a/util/sync.c +++ b/util/sync.c @@ -33,120 +33,6 @@ static bool is_signal_pending(lock_t *lock) { return pending; } -void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke - // Keep track of how many threads are referencing this task - if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke - return; - } - - if(task == NULL) { - if(current != NULL) { - task = current; - } else { - return; - } - } - - bool ilocked = false; - - if (trylocknl(&task->general_lock, task->comm, task->pid) != _EBUSY) { - ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. - } - - pthread_mutex_lock(&task->reference.lock); - - if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke - printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d)\n", task->comm, task->pid, task->reference.count, value); - if(ilocked == true) - unlock(&task->general_lock); - - pthread_mutex_unlock(&task->reference.lock); - - return; - } - - - task->reference.count = task->reference.count + value; - - pthread_mutex_unlock(&task->reference.lock); - - if(ilocked == true) - unlock(&task->general_lock); -} - -void task_ref_cnt_mod_wrapper(int value) { - // sync.h can't know about the definition of task struct due to recursive include files. -mke - if((current != NULL) && (doEnableExtraLocking)) - task_ref_cnt_mod(current, value); - - return; -} - -int task_ref_cnt_val(struct task *task) { - pthread_mutex_lock(&task->reference.lock); - int cnt = task->reference.count; - pthread_mutex_unlock(&task->reference.lock); - return cnt; -} - -int task_ref_cnt_val_wrapper(void) { - return(task_ref_cnt_val(current)); -} - -void mem_ref_cnt_mod(struct mem *mem, int value) { // value Should only be -1 or 1. -mke - // Keep track of how many threads are referencing this task - if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke - return; - } - - if(mem == NULL) { - return; - } - - pthread_mutex_lock(&mem->reference.lock); - - if(((mem->reference.count + value) < 0)) { // Prevent our unsigned value attempting to go negative. -mke - printk("ERROR: Attempt to decrement mem reference count to be negative, ignoring(%d:%d)\n", mem->reference.count, value); - pthread_mutex_unlock(&mem->reference.lock); - - return; - } - - - mem->reference.count = mem->reference.count + value; - - pthread_mutex_unlock(&mem->reference.lock); -} - -int mem_ref_cnt_val(struct mem *mem) { - pthread_mutex_lock(&mem->reference.lock); - int cnt = mem->reference.count; - pthread_mutex_unlock(&mem->reference.lock); - return cnt; -} - -void modify_locks_held_count(struct task *task, int value) { // value Should only be -1 or 1. -mke - if((task == NULL) && (current != NULL)) { - task = current; - } else { - return; - } - - pthread_mutex_lock(&task->locks_held.lock); - if((task->locks_held.count + value < 0) && task->pid > 9) { - // if((task->pid > 2) && (!strcmp(task->comm, "init"))) // Why ask why? -mke - printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); - return; - } - task->locks_held.count = task->locks_held.count + value; - pthread_mutex_unlock(&task->locks_held.lock); -} - -void modify_locks_held_count_wrapper(int value) { // sync.h can't know about the definition of struct due to recursive include files. -mke - if(current != NULL) - modify_locks_held_count(current, value); - return; -} int wait_for(cond_t *cond, lock_t *lock, struct timespec *timeout) { if (is_signal_pending(lock)) @@ -236,57 +122,18 @@ void notify_once(cond_t *cond) { __thread sigjmp_buf unwind_buf; __thread bool should_unwind = false; -void sigusr1_handler(void) { - if (should_unwind) { - should_unwind = false; - siglongjmp(unwind_buf, 1); - } -} - -// Because sometimes we can't #include "kernel/task.h" -mke -unsigned task_ref_cnt_mod(struct task *task) { - unsigned tmp = 0; - pthread_mutex_lock(&task->reference.lock); // This would make more - tmp = task->reference.count; - if(tmp > 1000) // Work around brain damage. Remove when said brain damage is fixed - tmp = 0; - pthread_mutex_unlock(&task->reference.lock); - - return tmp; -} - -void task_ref_cnt_mod_wrapper(int, const char*, int) { // sync.h can't know about the definition of struct due to recursive include files. -mke - return(task_ref_count(current, )); -} - -bool current_is_valid(void) { - if(current != NULL) - return true; - - return false; -} - -unsigned locks_held_count(struct task *task) { - // return 0; // Short circuit for now - if(task->pid < 10) // Here be monsters. -mke - return 0; - if(task->locks_held.count > 0) { - return(task->locks_held.count -1); - } - unsigned tmp = 0; - pthread_mutex_lock(&task->locks_held.lock); - tmp = task->locks_held.count; - pthread_mutex_unlock(&task->locks_held.lock); - - return tmp; -} - unsigned locks_held_count_wrapper(void) { // sync.h can't know about the definition of struct due to recursive include files. -mke if(current != NULL) return(locks_held_count(current)); return 0; } +void sigusr1_handler(int sig) { + if (should_unwind) { + should_unwind = false; + siglongjmp(unwind_buf, 1); + } +} // This is how you would mitigate the unlock/wait race if the wait // is async signal safe. wait_for *should* be safe from this race diff --git a/util/sync.h b/util/sync.h index fefd48d264..4141c0da71 100644 --- a/util/sync.h +++ b/util/sync.h @@ -2,431 +2,50 @@ #define UTIL_SYNC_H #define JUSTLOG 1 -#include -#include -#include +#include +#include +#include "util/ro_locks.h" +#include "util/rw_locks.h" +#include "debug.h" +#include "kernel/errno.h" +#include #include -#include #include "misc.h" -#include "debug.h" -#include // locks, implemented using pthread #define LOCK_DEBUG 0 +// The following are in log.c. There should probably be in a log.h that gets included instead. extern int current_pid(void); extern int current_uid(void); extern char* current_comm(void); +bool current_is_valid(void); + +unsigned locks_held_count_wrapper(void); -extern unsigned locks_held_count_wrapper(void); -extern void modify_locks_held_count_wrapper(int); +// The following is in task.c extern struct pid *pid_get(dword_t id); -extern bool current_is_valid(void); extern bool doEnableExtraLocking; extern struct timespec lock_pause; - -typedef struct { - pthread_mutex_t m; - pthread_t owner; - //const char *comm; - int pid; - int uid; - char comm[16]; - char lname[16]; // The name of the lock. -mke - bool wait4; // Is this lock in use by wait4 -#if LOCK_DEBUG - struct lock_debug { - const char *file; // doubles as locked - int line; - int pid; - bool initialized; - } debug; -#endif -} lock_t; - extern lock_t atomic_l_lock; // Used to make all lock operations atomic, even read->write and right->read -mke -static inline void lock_init(lock_t *lock, char lname[16]) { - int ret = pthread_mutex_init(&lock->m, NULL); - if (ret != 0) { - // Handle the error according to your application's needs - printk("ERROR: Failed to initialize mutex: %s:(%s)\n", lname, strerror(ret)); - // Depending on how critical this failure is, you might choose to exit, return, or take other actions. - return; - } - - if(lname != NULL) { - strncpy(lock->lname, lname, 16); - } else { - strncpy(lock->lname, "WTF", 16); - } - lock->wait4 = false; -#if LOCK_DEBUG - lock->debug = (struct lock_debug) { - .initialized = true, - }; -#endif - lock->comm[0] = 0; - lock->uid = -1; -} - #if LOCK_DEBUG #define LOCK_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, 0, { .initialized = true }} #else #define LOCK_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, 0} #endif -static inline void unlock(lock_t *lock) { - //pid_t pid = current_pid(); - - /* if ((pthread_mutex_trylock(&lock->m) == 0) && (pid > 0)) { // Sometimes pid = -1 when it shouldn't be, ignore - printk("WARNING: Process with PID %d trying to unlock an already unlocked lock\n", pid); - //pthread_mutex_unlock(&lock->m); // unlock it again - //return; - } - if ((lock->pid != pid) && (pid > 0) && (lock->pid > 0)) { // Sometimes pid = -1 when it shouldn't be, ignore - printk("WARNING: Process with PID %d trying to unlock a lock owned by PID %d\n", pid, lock->pid); - //return; // Return early or handle the discrepancy in another manner if required - } - */ - lock->owner = zero_init(pthread_t); - pthread_mutex_unlock(&lock->m); - lock->pid = -1; // - lock->comm[0] = 0; - modify_locks_held_count_wrapper(-1); - -#if LOCK_DEBUG - assert(lock->debug.initialized); - assert(lock->debug.file && "Attempting to unlock an unlocked lock"); - lock->debug = (struct lock_debug) { .initialized = true }; -#endif - return; -} - -static inline void atomic_l_lockf(char lname[16], int skiplog, const char *file, int line) { // Make all locks atomic by wrapping them. -mke - if(!doEnableExtraLocking) - return; - int res = 0; - if(atomic_l_lock.pid > 0) { - if(current_pid() != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke - res = pthread_mutex_lock(&atomic_l_lock.m); - atomic_l_lock.pid = current_pid(); - } else if(!skiplog) { - printk("WARNING: Odd attempt by process (%s:%d) to attain same locking lock twice. Ignoring\n", current_comm(), current_pid()); - res = 0; - } - } - if(!res) { - strncpy((char *)&atomic_l_lock.comm, current_comm(), 16); - strncpy((char *)&atomic_l_lock.lname, lname, 16); - modify_locks_held_count_wrapper(1); - } else if (!skiplog) { - printk("Error on locking lock (%s) Called from %s:%d\n", lname, file, line); - } - - //STRACE("atomic_l_lockf(%d)\n", count); // This is too verbose most of the time -} - -static inline void atomic_l_unlockf(void) { - if(!doEnableExtraLocking) - return; - int res = 0; - strncpy((char *)&atomic_l_lock.lname,"\0", 1); - res = pthread_mutex_unlock(&atomic_l_lock.m); - if(res) { - printk("ERROR: unlocking locking lock\n"); - } else { - atomic_l_lock.pid = -1; // Reset - } - - modify_locks_held_count_wrapper(-1); - //STRACE("atomic_l_unlockf()\n"); -} - -static inline void complex_lockt(lock_t *lock, int log_lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - if (lock->pid == current_pid()) - return; - - unsigned int count = 0; - int random_wait = WAIT_SLEEP + rand() % WAIT_SLEEP; - struct timespec lock_pause = {0, random_wait}; - long count_max = (WAIT_MAX_UPPER - random_wait); - - while (pthread_mutex_trylock(&lock->m)) { - count++; - if (nanosleep(&lock_pause, NULL) == -1) { - // Handle error - } - if (count > count_max) { - if (!log_lock) { - printk("ERROR: Possible deadlock, aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d) (Called By:%s:%d)\n", - current_pid(), current_comm(), lock->comm, lock->pid, file, line); - pthread_mutex_unlock(&lock->m); - modify_locks_held_count_wrapper(-1); - } - return; - } - } - - modify_locks_held_count_wrapper(1); - - if (count > count_max * 0.90) { - if (!log_lock) - printk("Warning: large lock attempt count (%d), aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d) (Called By:%s:%d)\n", - count, current_pid(), current_comm(), lock->comm, lock->pid, file, line); - } - - lock->owner = pthread_self(); - lock->pid = current_pid(); - lock->uid = current_uid(); - strncpy(lock->comm, current_comm(), sizeof(lock->comm) - 1); - lock->comm[sizeof(lock->comm) - 1] = '\0'; // Null-terminate just in case -} - -static inline void __lock(lock_t *lock, int log_lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - if(!strcmp(lock->lname, "task_creat_gen")) // kluge. This means the lock is new, and SHOULD be unlocked - unlock(lock); - - if(!log_lock) { - task_ref_cnt_mod_wrapper(1,__FILE__, __LINE__); - pthread_mutex_lock(&lock->m); - modify_locks_held_count_wrapper(1); - lock->owner = pthread_self(); - lock->pid = current_pid(); - lock->uid = current_uid(); - strncpy(lock->comm, current_comm(), 16); - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); - } else { - pthread_mutex_lock(&lock->m); - lock->owner = pthread_self(); - lock->pid = current_pid(); - lock->uid = current_uid(); - strncpy(lock->comm, current_comm(), 16); - } - return; -} - -#define lock(lock, log_lock) __lock(lock, log_lock, __FILE__, __LINE__) - - - -typedef struct { - pthread_rwlock_t l; - // 0: unlocked - // -1: write-locked - // >0: read-locked with this many readers - atomic_int val; - int favor_read; // Increment this up each time a write lock is gained, down when a read lock is gained - const char *file; - int line; - int pid; - char comm[16]; - char lname[16]; -} wrlock_t; - -static inline void _read_unlock(wrlock_t *lock, const char*, int); -static inline void _write_unlock(wrlock_t *lock, const char*, int); -static inline void write_unlock_and_destroy(wrlock_t *lock); - -static inline void handle_lock_error(wrlock_t *lock, const char *file, int line, const char *func) { - printk("ERROR: %s(%x) tries exceeded %d, dealing with likely deadlock. (Lock held by PID: %d Process: %s) (%s:%d)\n", - func, lock, WAIT_MAX_UPPER, lock->pid, lock->comm, file, line); - - if(pid_get((dword_t)lock->pid) == NULL) { - printk("ERROR: %s(%x) locking PID(%d) is gone for task %s\n", func, lock, lock->pid, lock->comm); - pthread_rwlock_unlock(&lock->l); - } else { - printk("ERROR: %s(%x) locking PID(%d), %s is apparently wedged\n", func, lock, lock->pid, lock->comm); - pthread_rwlock_unlock(&lock->l); - } - - if(lock->val > 1) { - lock->val--; - } else if(lock->val == 1) { - _read_unlock(lock, __FILE__, __LINE__); - } else if(lock->val < 0) { - _write_unlock(lock, __FILE__, __LINE__); - } -} - -static inline void loop_lock_generic(wrlock_t *lock, const char *file, int line, int is_write) { - task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); - modify_locks_held_count_wrapper(1); - - unsigned count = 0; - int random_wait = is_write ? WAIT_SLEEP + rand() % 100 : WAIT_SLEEP + rand() % WAIT_SLEEP/4; - struct timespec lock_pause = {0, random_wait}; - long count_max = (WAIT_MAX_UPPER - random_wait); - count_max = (is_write && count_max < 25000) ? 25000 : count_max; - - while((is_write ? pthread_rwlock_trywrlock(&lock->l) : pthread_rwlock_tryrdlock(&lock->l))) { - count++; - if(count > count_max) { - handle_lock_error(lock, file, line, is_write ? "loop_lock_write" : "loop_lock_read"); - count = 0; - } - atomic_l_unlockf(); - nanosleep(&lock_pause, NULL); - atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0, __FILE__, __LINE__); - } - - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); -} - -#define loop_lock_read(lock, file, line) loop_lock_generic(lock, file, line, 0) -#define loop_lock_write(lock, file, line) loop_lock_generic(lock, file, line, 1) - -static inline void _read_unlock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - if(lock->val <= 0) { - printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(), current_comm(), lock->val, file, line); - lock->val = 0; - lock->pid = -1; - lock->comm[0] = 0; - modify_locks_held_count_wrapper(-1); - //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - return; - } - assert(lock->val > 0); - if (pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: read_unlock(%x) error(PID: %d Process: %s) (%s:%d)\n", lock, current_pid(), current_comm(), file, line); - lock->val--; - modify_locks_held_count_wrapper(-1); - //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); -} - -static inline void read_unlock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - if(lock->pid != current_pid() && (lock->pid != -1)) { - atomic_l_lockf("r_unlock\0", 0, __FILE__, __LINE__); - _read_unlock(lock, file, line); - } else { // We can unlock our own lock without additional locking. -mke - _read_unlock(lock, file, line); - return; - } - if(lock->pid != current_pid() && (lock->pid != -1)) - atomic_l_unlockf(); -} - -static inline void _write_unlock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - if(pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) (%s:%d)\n", lock, lock->val, current_pid(), current_comm(), file, line); - if(lock->val != -1) { - printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s (%s:%d))\n", lock, lock->val, current_pid(), current_comm(), file, line); - } - //assert(lock->val == -1); - lock->val = lock->line = lock->pid = 0; - lock->pid = -1; - lock->comm[0] = 0; - //STRACE("write_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->file = NULL; - modify_locks_held_count_wrapper(-1); -} - -static inline void write_unlock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Wrap it. External calls lock, internal calls using _write_unlock() don't -mke - atomic_l_lockf("w_unlock\0", 0, __FILE__, __LINE__); - _write_unlock(lock, file, line); - atomic_l_unlockf(); - return; -} - -static inline void __write_lock(wrlock_t *lock, const char *file, int line) { // Write lock - loop_lock_write(lock, file, line); - - // assert(lock->val == 0); - lock->val = -1; - lock->file = file; - lock->line = line; - lock->pid = current_pid(); - if(lock->pid > 9) - strncpy((char *)lock->comm, current_comm(), 16); - //STRACE("write_lock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); -} - -static inline void _write_lock(wrlock_t *lock, const char *file, int line) { - atomic_l_lockf("_w_lock", 0, __FILE__, __LINE__); - __write_lock(lock, file, line); - atomic_l_unlockf(); -} - -static inline int trylockw(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - atomic_l_lockf("trylockw\0", 0, __FILE__, __LINE__); - int status = pthread_rwlock_trywrlock(&lock->l); - atomic_l_unlockf(); -#if LOCK_DEBUG - if (!status) { - lock->debug.file = file; - lock->debug.line = line; - extern int current_pid(void); - lock->debug.pid = current_pid(); - } -#endif - if(status == 0) { - modify_locks_held_count_wrapper(1); - //STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = current_pid(); - strncpy(lock->comm, current_comm(), 16); - } - return status; -} - -#define trylockw(lock) trylockw(lock, __FILE__, __LINE__) - -static inline int trylock(lock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - atomic_l_lockf("trylock\0", 0, __FILE__, __LINE__); - int status = pthread_mutex_trylock(&lock->m); - atomic_l_unlockf(); -#if LOCK_DEBUG - if (!status) { - lock->debug.file = file; - lock->debug.line = line; - extern int current_pid(void); - lock->debug.pid = current_pid(); - } -#endif - if((!status) && (current_pid() > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke - modify_locks_held_count_wrapper(1); - - //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = current_pid(); - strncpy(lock->comm, current_comm(), 16); - } - return status; -} - -#define trylock(lock) trylock(lock, __FILE__, __LINE__) - -static inline int trylocknl(lock_t *lock, char *comm, int pid, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - //Don't log, avoid recursion - int status = pthread_mutex_trylock(&lock->m); -#if LOCK_DEBUG - if (!status) { - lock->debug.file = file; - lock->debug.line = line; - extern int current_pid(void); - lock->debug.pid = current_pid(); - } -#endif - if(!status) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke - modify_locks_held_count_wrapper(1); - - //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = pid; - strncpy(lock->comm, comm, 16); - } - return status; -} - -#define trylocknl(lock, comm, pid) trylocknl(lock, comm, pid, __FILE__, __LINE__) - // conditions, implemented using pthread conditions but hacked so you can also // be woken by a signal typedef struct { pthread_cond_t cond; } cond_t; + #define COND_INITIALIZER ((cond_t) {PTHREAD_COND_INITIALIZER}) // Must call before using the condition @@ -445,127 +64,6 @@ void notify(cond_t *cond); // Wake up one waiter. void notify_once(cond_t *cond); -static inline void read_to_write_lock(wrlock_t *lock); -static inline void read_unlock_and_destroy(wrlock_t *lock); - -// this is a read-write lock that prefers writers, i.e. if there are any -// writers waiting a read lock will block. -// on darwin pthread_rwlock_t is already like this, on linux you can configure -// it to prefer writers. not worrying about anything else right now. - -static inline void wrlock_init(wrlock_t *lock) { - pthread_rwlockattr_t *pattr = NULL; -#if defined(__GLIBC__) - pthread_rwlockattr_t attr; - pattr = &attr; - pthread_rwlockattr_init(pattr); - pthread_rwlockattr_setkind_np(pattr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); -#endif -#ifdef JUSTLOG - if (pthread_rwlock_init(&lock->l, pattr)) - printk("URGENT: wrlock_init() error(PID: %d Process: %s)\n",current_pid(), current_comm()); -#else - if (pthread_rwlock_init(&lock->l, pattr)) __builtin_trap(); -#endif - lock->val = lock->line = lock->pid = 0; - lock->file = NULL; -} - -static inline void _lock_destroy(wrlock_t *lock) { - while((task_ref_cnt_val_wrapper() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections - nanosleep(&lock_pause, NULL); - } -#ifdef JUSTLOG - if (pthread_rwlock_destroy(&lock->l) != 0) { - printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),task_reference_count_wrapper()); - } -#else - if (pthread_rwlock_destroy(&lock->l) != 0) __builtin_trap(); -#endif -} - -static inline void lock_destroy(wrlock_t *lock) { - while((task_ref_cnt_val() > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections - nanosleep(&lock_pause, NULL); - } - - atomic_l_lockf("l_destroy\0", 0, __FILE__, __LINE__); - _lock_destroy(lock); - atomic_l_unlockf(); -} - -static inline void _read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { - loop_lock_read(lock, file, line); - task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); - //pthread_rwlock_rdlock(&lock->l); - // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke - if(lock->val) { - lock->val++; - } else if (lock->val > -1){ // Deal with insanity. -mke - lock->val++; - } else { - printk("ERROR: _read_lock() val is %d\n", lock->val); - lock->val++; - } - - if(lock->val > 1000) { // We likely have a problem. - printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); - read_unlock_and_destroy(lock); - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); - //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - return; - } - - lock->pid = current_pid(); - if(lock->pid > 9) - strncpy((char *)lock->comm, current_comm(), 16); - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); - //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); -} - -static inline void read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Wrapper so that external calls lock, internal calls using _read_unlock() don't -mke - atomic_l_lockf("r_lock\0", 0, __FILE__, __LINE__); - _read_lock(lock, file, line); - atomic_l_unlockf(); -} - -#define write_lock(lock) _write_lock(lock, __FILE__, __LINE__) - -static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke - task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); - atomic_l_lockf("rtw_lock\0", 0, __FILE__, __LINE__); - _read_unlock(lock, __FILE__, __LINE__); - __write_lock(lock, __FILE__, __LINE__); - atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); -} - -static inline void write_to_read_lock(wrlock_t *lock, __attribute__((unused)) const char *file, __attribute__((unused)) int line) { // Try to atomically swap a Write lock to a RO lock. -mke - task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); - atomic_l_lockf("wtr_lock\0", 0, __FILE__, __LINE__); - _write_unlock(lock, file, line); - _read_lock(lock, file, line); - atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); -} - -static inline void write_unlock_and_destroy(wrlock_t *lock) { - task_ref_cnt_mod_wrapper(1, __FILE__, __LINE__); - atomic_l_lockf("wuad_lock\0", 0, __FILE__, __LINE__); - _write_unlock(lock, __FILE__, __LINE__); - _lock_destroy(lock); - atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1, __FILE__, __LINE__); -} - -static inline void read_unlock_and_destroy(wrlock_t *lock) { - atomic_l_lockf("ruad_lock", 0, __FILE__, __LINE__); - if(trylockw(lock)) // It should be locked, but just in case. Likely masking underlying issue. -mke - _read_unlock(lock, __FILE__, __LINE__); - _lock_destroy(lock); - atomic_l_unlockf(); -} - extern __thread sigjmp_buf unwind_buf; extern __thread bool should_unwind; static inline int sigunwind_start(void) { @@ -582,4 +80,15 @@ static inline void sigunwind_end(void) { should_unwind = false; } +void cond_init(cond_t *cond); +void cond_destroy(cond_t *cond); +//static bool is_signal_pending(lock_t *lock); // Not used externally to sync.c, doesn't eneed to be exposed +unsigned locks_held_count_wrapper(void); +int wait_for(cond_t *cond, lock_t *lock, struct timespec *timeout); +int wait_for_ignore_signals(cond_t *cond, lock_t *lock, struct timespec *timeout); +void notify(cond_t *cond); +void notify_once(cond_t *cond); +void sigusr1_handler(int sig); +bool current_is_valid(void); + #endif From f1111b147bfddda060c50761e0af2f0532b027e4 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Wed, 6 Dec 2023 17:25:45 -0800 Subject: [PATCH 10/23] o Tweaks to Real Time Clock o Continuing work on implementing task reference counts --- app/RTCDevice.m | 51 ++++++++++++------------------------------------- emu/memory.c | 18 ++++++++++------- emu/memory.h | 2 +- fs/poll.c | 6 +++--- jit/jit.c | 6 +++--- jit/jit.h | 1 + kernel/exit.c | 16 ++++++++-------- kernel/fork.c | 2 +- kernel/mmap.c | 6 +++--- kernel/poll.c | 2 +- kernel/signal.c | 4 ++-- kernel/task.c | 11 ++++------- kernel/task.h | 3 +-- util/rw_locks.c | 6 +++--- 14 files changed, 54 insertions(+), 80 deletions(-) diff --git a/app/RTCDevice.m b/app/RTCDevice.m index a38fb5ca56..7e380a0bc9 100644 --- a/app/RTCDevice.m +++ b/app/RTCDevice.m @@ -1,11 +1,4 @@ -// -// RTCDevice.m -// iSH-AOK -// -// Created by Michael Miller on 11/24/23. -// - -#import +#include #include "fs/poll.h" #include "fs/dyndev.h" #include "kernel/errno.h" @@ -26,35 +19,23 @@ } rtc_time; // Get the time, put it in the appropriate structure -static rtc_time *get_current_time(rtc_fd *fd, size_t *len) { +static rtc_time get_current_time(rtc_fd *fd) { // Obtain the current date NSDate *currentDate = [NSDate date]; NSCalendar *calendar = [NSCalendar currentCalendar]; // Define the desired date components - NSDateComponents *components = [calendar components:(NSCalendarUnitYear | NSCalendarUnitMonth | NSCalendarUnitDay | NSCalendarUnitHour | NSCalendarUnitMinute | NSCalendarUnitSecond) - fromDate:currentDate]; - - // Allocate and populate the rtc_time structure - rtc_time *timeStruct = malloc(sizeof(rtc_time)); - if (!timeStruct) { - // Handle memory allocation failure - *len = 0; - return NULL; - } + NSDateComponents *components = [calendar components:(NSCalendarUnitYear | NSCalendarUnitMonth | NSCalendarUnitDay | NSCalendarUnitHour | NSCalendarUnitMinute | NSCalendarUnitSecond) fromDate:currentDate]; // Populate the structure // Note: tm_mon is 0-based (0 for January) and tm_year is years since 1900 - timeStruct->tm_sec = (int)[components second]; - timeStruct->tm_min = (int)[components minute]; - timeStruct->tm_hour = (int)[components hour]; - timeStruct->tm_mday = (int)[components day]; - timeStruct->tm_mon = (int)[components month] - 1; // Adjust for tm_mon - timeStruct->tm_year = (int)[components year] - 1900; // Adjust for tm_year - - // Update the size - *len = sizeof(rtc_time); - + rtc_time timeStruct; + timeStruct.tm_sec = (int)[components second]; + timeStruct.tm_min = (int)[components minute]; + timeStruct.tm_hour = (int)[components hour]; + timeStruct.tm_mday = (int)[components day]; + timeStruct.tm_mon = (int)[components month] - 1; // Adjust for tm_mon + timeStruct.tm_year = (int)[components year] - 1900; // Adjust for tm_year return timeStruct; } @@ -74,7 +55,7 @@ static int rtc_close(rtc_fd *fd) { return 0; } -#define RTC_RD_TIME 0x80247009 // Example definition, adjust as necessary +#define RTC_RD_TIME 0x80247009 static ssize_t rtc_ioctl_size(int cmd) { switch (cmd) { @@ -89,15 +70,7 @@ static int rtc_ioctl(struct fd *fd, int cmd, void *arg) { @autoreleasepool { switch (cmd) { case RTC_RD_TIME: { // On a real Linux, there are a number of other possible ioctl()'s. We don't really need them - size_t length = 0; - rtc_time *data = get_current_time(fd, &length); - - if (arg == NULL) { - return _EFAULT; // Null pointer argument - } - - *(rtc_time *) arg = *data; // This is the magic that gets the value back to the "kernel" - + *(rtc_time *) arg = get_current_time(fd); // This is the magic that gets the value back to the "kernel" return 0; // Success } default: diff --git a/emu/memory.c b/emu/memory.c index 3591f9c66b..95ce17f040 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -40,6 +40,9 @@ void mem_init(struct mem *mem) { #endif mem->mmu.changes = 0; wrlock_init(&mem->lock); + mem->reference.count = 0; + mem->reference.ready_to_be_freed = false; + wrlock_init(&mem->reference.lock); } void mem_destroy(struct mem *mem) { @@ -48,8 +51,9 @@ void mem_destroy(struct mem *mem) { nanosleep(&lock_pause, NULL); } pt_unmap_always(mem, 0, MEM_PAGES); + #if ENGINE_JIT - while((task_ref_cnt_get(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_get(current, 1)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } jit_free(mem->mmu.jit); @@ -108,7 +112,7 @@ struct pt_entry *mem_pt(struct mem *mem, page_t page) { static void mem_pt_del(struct mem *mem, page_t page) { struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { - while(task_ref_cnt_get(current) > 4) { // mark + while(task_ref_cnt_get(current, 0) > 4) { // mark nanosleep(&lock_pause, NULL); } entry->data = NULL; @@ -189,7 +193,7 @@ int pt_unmap(struct mem *mem, page_t start, pages_t pages) { int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { for (page_t page = start; page < start + pages; mem_next_page(mem, &page)) { - while(task_ref_cnt_get(current) >3) { + while(task_ref_cnt_get(current, 0) >3) { nanosleep(&lock_pause, NULL); } struct pt_entry *pt = mem_pt(mem, page); @@ -203,7 +207,7 @@ int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { if (--data->refcount == 0) { // vdso wasn't allocated with mmap, it's just in our data segment if (data->data != vdso_data) { - while(task_ref_cnt_get(current) > 3) { + while(task_ref_cnt_get(current, 0) > 3) { nanosleep(&lock_pause, NULL); } int err = munmap(data->data, data->size); @@ -251,7 +255,7 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) { } int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) { - while(task_ref_cnt_get(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } for (page_t page = start; page < start + pages; mem_next_page(src, &page)) { @@ -268,7 +272,7 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page dst_entry->offset = entry->offset; dst_entry->flags = entry->flags; } - while(task_ref_cnt_get(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } mem_changed(src); @@ -345,7 +349,7 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { mem_ref_cnt_mod(mem, 1); //read_to_write_lock(&mem->lock); memcpy(copy, data, PAGE_SIZE); //mkemkemke Crashes here a lot when running both the go and parallel make test. 01 June 2022 - task_ref_cnt_mod(current, -1); + mem_ref_cnt_mod(mem, -1); pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW); unlock(¤t->general_lock); write_to_read_lock(&mem->lock); diff --git a/emu/memory.h b/emu/memory.h index 67759598de..82d4753363 100644 --- a/emu/memory.h +++ b/emu/memory.h @@ -21,7 +21,7 @@ struct mem { #endif struct mmu mmu; struct { - pthread_mutex_t lock; + wrlock_t lock; int count; // If positive, don't delete yet, wait_to_delete bool ready_to_be_freed; // Should be false initially } reference; diff --git a/fs/poll.c b/fs/poll.c index 60212c3ad2..970fefecc4 100644 --- a/fs/poll.c +++ b/fs/poll.c @@ -330,7 +330,7 @@ void poll_destroy(struct poll *poll) { struct poll_fd *poll_fd; struct poll_fd *tmp; - while(task_ref_cnt_get(current)) { + while(task_ref_cnt_get(current, 0)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->poll_fds, poll_fd, tmp, fds) { @@ -341,12 +341,12 @@ void poll_destroy(struct poll *poll) { free(poll_fd); } - while(task_ref_cnt_get(current)) { + while(task_ref_cnt_get(current, 0)) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->pollfd_freelist, poll_fd, tmp, fds) { - while(task_ref_cnt_get(current)) { + while(task_ref_cnt_get(current, 0)) { nanosleep(&lock_pause, NULL); } list_remove(&poll_fd->fds); diff --git a/jit/jit.c b/jit/jit.c index d75aa20d5a..24c8c5993a 100644 --- a/jit/jit.c +++ b/jit/jit.c @@ -39,7 +39,7 @@ void jit_free(struct jit *jit) { if (!jit) return; bool signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_get(current) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight + while((task_ref_cnt_get(current, 0) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); } @@ -84,7 +84,7 @@ void jit_invalidate_range(struct jit *jit, page_t start, page_t end) { } void jit_invalidate_page(struct jit *jit, page_t page) { - while(task_ref_cnt_get(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke + while(task_ref_cnt_get(current, 0) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke nanosleep(&lock_pause, NULL); } // mofify_critical_region_count(current, 1, __FILE__, __LINE__); @@ -305,7 +305,7 @@ int cpu_run_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { unlock(&jit->lock); write_lock(&jit->jetsam_lock); lock(&jit->lock, 0); - while(task_ref_cnt_get(current) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke + while(task_ref_cnt_get(current, 0) > 3) {// Yes, this is weird. It might not work, but I'm trying. -mke nanosleep(&lock_pause, NULL); // Yes, this has triggered at least once. Is it doing any good though? -mke } jit_free_jetsam(jit); diff --git a/jit/jit.h b/jit/jit.h index 7ee8041250..392447d021 100644 --- a/jit/jit.h +++ b/jit/jit.h @@ -1,5 +1,6 @@ #ifndef JIT_H #define JIT_H +#define ENGINE_JIT 1 #include "misc.h" #include "emu/mmu.h" #include "util/list.h" diff --git a/kernel/exit.c b/kernel/exit.c index 87159074bd..86ebfc72d7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -18,7 +18,7 @@ extern const char extra_lock_comm; static void halt_system(void); static bool exit_tgroup(struct task *task) { - while((task_ref_cnt_get(task) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_get(task, 0) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } struct tgroup *group = task->group; @@ -66,7 +66,7 @@ noreturn void do_exit(int status) { bool signal_pending = !!(current->pending & ~current->blocked); // has to happen before mm_release - while((task_ref_cnt_get(current) > 1) || + while((task_ref_cnt_get(current, 0) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -83,14 +83,14 @@ noreturn void do_exit(int status) { do { nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); - } while((task_ref_cnt_get(current) > 1) || + } while((task_ref_cnt_get(current, 0) > 1) || (locks_held_count(current)) || (signal_pending)); // Wait for now, task is in one or more critical mm_release(current->mm); current->mm = NULL; signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_get(current) > 1) || + while((task_ref_cnt_get(current, 0) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -99,7 +99,7 @@ noreturn void do_exit(int status) { fdtable_release(current->files); current->files = NULL; - while((task_ref_cnt_get(current) > 1) || + while((task_ref_cnt_get(current, 0) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -111,7 +111,7 @@ noreturn void do_exit(int status) { // sighand must be released below so it can be protected by pids_lock // since it can be accessed by other threads - while((task_ref_cnt_get(current) > 1) || + while((task_ref_cnt_get(current, 0) > 1) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical// Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -130,7 +130,7 @@ noreturn void do_exit(int status) { complex_lockt(&pids_lock, 0); // release the sighand signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_get(current) > 2) || + while((task_ref_cnt_get(current, 0) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); @@ -156,7 +156,7 @@ noreturn void do_exit(int status) { signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_get(current) > 2) || + while((task_ref_cnt_get(current, 0) > 2) || (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); diff --git a/kernel/fork.c b/kernel/fork.c index a1915952ed..46e6c27cc6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -127,7 +127,7 @@ static int copy_task(struct task *task, dword_t flags, addr_t stack, addr_t ptid return 0; fail_free_sighand: - while(task_ref_cnt_get(task)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(task, 0)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } sighand_release(task->sighand); diff --git a/kernel/mmap.c b/kernel/mmap.c index 8dcd8c653f..b1cace3867 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -45,13 +45,13 @@ void mm_release(struct mm *mm) { if (--mm->refcount == 0) { if (mm->exefile != NULL) fd_close(mm->exefile); - while(task_ref_cnt_get(current)) { // FIXME: Should be locking current->reference.lock and updating + while(task_ref_cnt_get(current, 1)) { // FIXME: Should be locking current->reference.lock and updating // current->reference.count before mem_destroy nanosleep(&lock_pause, NULL); } mem_destroy(&mm->mem); - while(task_ref_cnt_get(current)) { //FIXME: Should now unlock after mem_destroy + while(task_ref_cnt_get(current, 1)) { //FIXME: Should now unlock after mem_destroy nanosleep(&lock_pause, NULL); } free(mm); @@ -182,7 +182,7 @@ int_t sys_mremap(addr_t addr, dword_t old_len, dword_t new_len, dword_t flags) { // shrinking always works if (new_pages <= old_pages) { - while(task_ref_cnt_get(current)) { + while(task_ref_cnt_get(current, 0)) { nanosleep(&lock_pause, NULL); } int err = pt_unmap(current->mem, PAGE(addr) + new_pages, old_pages - new_pages); diff --git a/kernel/poll.c b/kernel/poll.c index 06bdbe33f7..3bd67becb7 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -198,7 +198,7 @@ dword_t sys_poll(addr_t fds, dword_t nfds, int_t timeout) { TASK_MAY_BLOCK { res = poll_wait(poll, poll_event_callback, &context, timeout < 0 ? NULL : &timeout_ts); } - while(task_ref_cnt_get(current)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } poll_destroy(poll); diff --git a/kernel/signal.c b/kernel/signal.c index b1298be576..0e1acb96d3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -466,7 +466,7 @@ struct sighand *sighand_copy(struct sighand *sighand) { } void sighand_release(struct sighand *sighand) { - while(task_ref_cnt_get(current) > 2) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current, 0) > 2) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } if (--sighand->refcount == 0) { @@ -739,7 +739,7 @@ static int kill_group(pid_t_ pgid, dword_t sig) { } struct tgroup *tgroup; int err = _EPERM; - while((task_ref_cnt_get(current)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks + while((task_ref_cnt_get(current, 0)) || (locks_held_count(current))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } list_for_each_entry(&pid->pgroup, tgroup, pgroup) { diff --git a/kernel/task.c b/kernel/task.c index 52d567f414..1d2c84e30c 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -153,7 +153,7 @@ struct task *task_create_(struct task *parent) { // We consolidate the check for whether the task is in a critical section, // holds locks, or has pending signals into a single function. bool should_wait(struct task *t) { - return task_ref_cnt_get(t) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); + return task_ref_cnt_get(t, 0) > 1 || locks_held_count(t) || !!(t->pending & ~t->blocked); } void task_destroy(struct task *task, int caller) { @@ -192,7 +192,7 @@ void task_destroy(struct task *task, int caller) { retry: // Free the task's resources. - if (!task_ref_cnt_get(task)) { + if (!task_ref_cnt_get(task, 0)) { free(task); } else { goto retry; @@ -363,7 +363,8 @@ void modify_locks_held_count(struct task *task, int value) { // value Should onl pthread_mutex_unlock(&task->locks_held.lock); } -unsigned task_ref_cnt_get(struct task *task) { +// +unsigned task_ref_cnt_get(struct task *task, unsigned lock_if_zero) { unsigned tmp = 0; pthread_mutex_lock(&task->reference.lock); // This would make more tmp = task->reference.count; @@ -374,10 +375,6 @@ unsigned task_ref_cnt_get(struct task *task) { return tmp; } -unsigned task_ref_cnt_get_wrapper(void) { - return(task_ref_cnt_get(current)); -} - bool current_is_valid(void) { if(current != NULL) return true; diff --git a/kernel/task.h b/kernel/task.h index ab936835d4..5c96bb5bbe 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -241,8 +241,7 @@ __attribute__((always_inline)) inline int task_may_block_end(void) { void task_ref_cnt_mod(struct task *task, int value); void task_ref_cnt_mod_wrapper(int value); -unsigned task_ref_cnt_get(struct task *task); -unsigned task_ref_cnt_get_wrapper(void); +unsigned task_ref_cnt_get(struct task *task, unsigned lock_if_zero); void modify_locks_held_count(struct task *task, int value); void modify_locks_held_count_wrapper(int value); unsigned locks_held_count(struct task *task); diff --git a/util/rw_locks.c b/util/rw_locks.c index 780842e59c..8896418719 100644 --- a/util/rw_locks.c +++ b/util/rw_locks.c @@ -151,12 +151,12 @@ void wrlock_init(wrlock_t *lock) { } void _lock_destroy(wrlock_t *lock) { - while((task_ref_cnt_get(current) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + while((task_ref_cnt_get(current, 0) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } #ifdef JUSTLOG if (pthread_rwlock_destroy(&lock->l) != 0) { - printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),task_ref_cnt_get(current)); + printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),task_ref_cnt_get(current, 0)); } #else if (pthread_rwlock_destroy(&lock->l) != 0) __builtin_trap(); @@ -164,7 +164,7 @@ void _lock_destroy(wrlock_t *lock) { } void lock_destroy(wrlock_t *lock) { - while((task_ref_cnt_get(current) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + while((task_ref_cnt_get(current, 0) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } From 6726072e5fc9801f18c65f3e95fafe8e5df02685 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Fri, 8 Dec 2023 07:47:15 -0800 Subject: [PATCH 11/23] o tasks that have pending operations are now put in a queue for later deletion. Still testing --- kernel/exit.c | 16 ++++++++++++---- kernel/init.c | 2 ++ kernel/task.c | 49 +++++++++++++++++++++++++++++++++++++++++++------ kernel/task.h | 15 +++++++++++++-- main.c | 3 ++- util/list.h | 27 ++++++++++++++++++++++++++- 6 files changed, 98 insertions(+), 14 deletions(-) diff --git a/kernel/exit.c b/kernel/exit.c index 86ebfc72d7..1945916cfc 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -17,6 +17,8 @@ extern const char extra_lock_comm; static void halt_system(void); +// Checks if a task's thread group can be exited by waiting for the task's reference count +// to drop and checking if it holds any locks. static bool exit_tgroup(struct task *task) { while((task_ref_cnt_get(task, 0) > 2) || (locks_held_count(task))) { // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); @@ -39,8 +41,11 @@ static bool exit_tgroup(struct task *task) { return group_dead; } +// A function pointer that can be assigned to a cleanup function to be called upon task exit. void (*exit_hook)(struct task *task, int code) = NULL; +// Finds a new parent for the children of a task that is exiting. If no suitable parent +// is found within the task's group, it returns the 'init' task. static struct task *find_new_parent(struct task *task) { struct task *new_parent; list_for_each_entry(&task->group->threads, new_parent, group_links) { @@ -50,10 +55,11 @@ static struct task *find_new_parent(struct task *task) { return pid_get_task(1); } +// Handles the termination of the current task. It releases resources, notifies the parent, +// and re-parents any children. It ensures the task is not in a critical section and that +// all locks are released before proceeding. At least in theory noreturn void do_exit(int status) { - //atomic_l_lockf(0,__FILE__, __LINE__); - // pthread_mutex_lock(¤t->death_lock); - if(!pthread_mutex_trylock(¤t->death_lock)) { + if(current->reference.ready_to_be_freed) { goto EXIT; } else { //nanosleep(&lock_pause, NULL); // Stupid place holder @@ -207,6 +213,8 @@ noreturn void do_exit(int status) { EXIT:pthread_exit(NULL); } +// Exits all tasks in the current task's thread group and then calls do_exit to terminate +// the current task itself. noreturn void do_exit_group(int status) { struct tgroup *group = current->group; complex_lockt(&pids_lock, 0); @@ -242,7 +250,7 @@ noreturn void do_exit_group(int status) { do_exit(status); } -// always called from init process +// always called from init process. Intended to be called when the init process exits. static void halt_system(void) { // brutally murder everything // which will leave everything in an inconsistent state. I will solve this problem later. diff --git a/kernel/init.c b/kernel/init.c index 5ddef8b7c6..f731d6b4b7 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -120,6 +120,8 @@ intptr_t become_first_process(void) { establish_signal_handlers(); list_init(&alive_pids_list); + init_pending_queues(); // Initialize pending queus + struct task *task = construct_task(NULL); if (IS_ERR(task)) return PTR_ERR(task); diff --git a/kernel/task.c b/kernel/task.c index 1d2c84e30c..bedb88fdfb 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -11,6 +11,8 @@ #include #include +#define GRACE_PERIOD 2 // How long we want to deallocate tasks that have exited + pthread_mutex_t multicore_lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t extra_lock = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t delay_lock = PTHREAD_MUTEX_INITIALIZER; @@ -18,6 +20,9 @@ extern lock_t atomic_l_lock; pthread_mutex_t wait_for_lock = PTHREAD_MUTEX_INITIALIZER; time_t boot_time; // Store the boot time. -mke +struct list tasks_pending_deletion_queue; +pthread_mutex_t tasks_pending_deletion_lock = PTHREAD_MUTEX_INITIALIZER; + int iOSMajorRelease; bool doEnableMulticore; // Enable multicore if toggled, should default to false @@ -32,6 +37,12 @@ lock_t pids_lock; lock_t block_lock; struct list alive_pids_list; +void init_pending_queues(void) { +// Initialize the pending deletion queues. Tasks, memory and file descriptors (eventually) + list_init(&tasks_pending_deletion_queue); + +} + static bool pid_empty(struct pid *pid) { return pid->task == NULL && list_empty(&pid->session) && list_empty(&pid->pgroup); } @@ -189,14 +200,40 @@ void task_destroy(struct task *task, int caller) { if (locked_pids_lock) { unlock(&pids_lock); } - -retry: - // Free the task's resources. - if (!task_ref_cnt_get(task, 0)) { - free(task); + + if (task_ref_cnt_get(task, 1)) { + struct task_pending_deletion *pd = malloc(sizeof(struct task_pending_deletion)); + if (pd) { + task->reference.ready_to_be_freed = true; + pd->task = task; + pd->added_time = time(NULL); + list_init(&pd->list); + pthread_mutex_lock(&tasks_pending_deletion_lock); + list_add(&tasks_pending_deletion_queue, &pd->list); + pthread_mutex_unlock(&tasks_pending_deletion_lock); + } + // Lets cleanup any pending deletions here for now + cleanup_pending_deletions(); + return; } else { - goto retry; + free(task); + } +} + +// Cleanup function to delete tasks after the grace period +void cleanup_pending_deletions(void) { + pthread_mutex_lock(&tasks_pending_deletion_lock); + struct task_pending_deletion *pd, *tmp; + list_for_each_entry_safe(&tasks_pending_deletion_queue, pd, tmp, list) { + if (difftime(time(NULL), pd->added_time) >= GRACE_PERIOD) { // Delete reaped tasks older than + if (task_ref_cnt_get(pd->task, 0) == 0) { + free(pd->task); + list_remove(&pd->list); + free(pd); + } + } } + pthread_mutex_unlock(&tasks_pending_deletion_lock); } void run_at_boot(void) { // Stuff we run only once, at boot time. diff --git a/kernel/task.h b/kernel/task.h index 5c96bb5bbe..3b8cbae443 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -14,6 +14,17 @@ extern void task_ref_cnt_mod_wrapper(int value); +// Define a structure for the pending deletion queue +struct task_pending_deletion { + struct task *task; + time_t added_time; // Timestamp when the task was added to the queue + struct list list; // For linking in the pending deletion list +}; + +// Global list of tasks pending deletion +extern struct list tasks_pending_deletion_queue; +extern pthread_mutex_t tasks_pending_deletion_lock; + struct task { struct cpu_state cpu; struct mm *mm; // locked by general_lock @@ -21,8 +32,6 @@ struct task { pthread_t thread; uint64_t threadid; - pthread_mutex_t death_lock; // Set when process is about to be reaped. Immediately cease all activity on this task. -mke - struct { pthread_mutex_t lock; int count; // If positive, don't delete yet, wait_to_delete @@ -245,5 +254,7 @@ unsigned task_ref_cnt_get(struct task *task, unsigned lock_if_zero); void modify_locks_held_count(struct task *task, int value); void modify_locks_held_count_wrapper(int value); unsigned locks_held_count(struct task *task); +void init_pending_queues(void); +void cleanup_pending_deletions(void); #endif diff --git a/main.c b/main.c index 1e703409e9..f0145a449b 100644 --- a/main.c +++ b/main.c @@ -8,6 +8,7 @@ #include #include #include +#include "util/list.h" #include #include @@ -62,6 +63,6 @@ int main(int argc, char *const argv[]) { pthread_t id; pthread_create(&id, NULL, gen_exception_thread, NULL); pthread_join(id, NULL); - + task_run_current(); } diff --git a/util/list.h b/util/list.h index 89e95b8977..1e9f437635 100644 --- a/util/list.h +++ b/util/list.h @@ -3,6 +3,7 @@ #include #include +#include struct list { struct list *next, *prev; @@ -10,22 +11,32 @@ struct list { #ifndef __KERNEL__ +// Initializes a list node by setting its next and prev pointers to point to itself, effectively creating +// a new circular list with one element. static inline void list_init(struct list *list) { list->next = list; list->prev = list; } +// This is a macro that can be used to statically initialize a list node. It sets both the next and prev pointers of the +// list node to point to the node itself, similar to list_init. #define LIST_INITIALIZER(x) {.prev = &x, .next = &x} +// Checks if both the next and prev pointers of a list node are NULL. This is typically used to check if the list node +// has not been added to any list and is stand-alone. static inline bool list_null(struct list *list) { return list->next == NULL && list->prev == NULL; } +// A list is considered empty if its next pointer points to itself or if both next and prev pointers are NULL. This +// function is useful to check before performing operations that require the list to have elements. static inline bool list_empty(struct list *list) { return list->next == list || list_null(list); } static inline void _list_add_between(struct list *prev, struct list *next, struct list *item) { + assert(prev != NULL && next != NULL); + prev->next = item; item->prev = prev; item->next = next; @@ -37,6 +48,8 @@ static inline void list_add_tail(struct list *list, struct list *item) { } static inline void list_add(struct list *list, struct list *item) { + assert(list != NULL && !list_null(list)); + _list_add_between(list, list->next, item); } @@ -66,29 +79,41 @@ static inline void list_remove_safe(struct list *item) { list_remove(item); } +// Given a pointer to a list node (ptr), the containing structure's type, and the name of the list member within +// the containing structure, this macro calculates the starting address of the containing structure. #define list_entry(item, type, member) \ container_of(item, type, member) + +// Retrieves the first entry from the list, which is the entry following the head of the list. #define list_first_entry(list, type, member) \ list_entry((list)->next, type, member) + #define list_next_entry(item, member) \ list_entry((item)->member.next, typeof(*(item)), member) +// Iterates over each element of the list. The pos variable is the loop cursor, and head is the pointer to the list head. #define list_for_each(list, item) \ for (item = (list)->next; item != (list); item = item->next) + +// Similar to list_for_each but safe against removal of the list entry during iteration. This is achieved by storing the next +// node in n before processing the current node. #define list_for_each_safe(list, item, tmp) \ for (item = (list)->next, tmp = item->next; item != (list); \ item = tmp, tmp = item->next) - +// Iterates over the list of given type, retrieving each entry. used for iterating over a list of structures. #define list_for_each_entry(list, item, member) \ for (item = list_entry((list)->next, typeof(*item), member); \ &item->member != (list); \ item = list_entry(item->member.next, typeof(*item), member)) + +// Do the same as above, in such a way that the current entry can be removed during iteration #define list_for_each_entry_safe(list, item, tmp, member) \ for (item = list_first_entry(list, typeof(*(item)), member), \ tmp = list_next_entry(item, member); \ &item->member != (list); \ item = tmp, tmp = list_next_entry(item, member)) +// Iterate and count static inline unsigned long list_size(struct list *list) { unsigned long count = 0; struct list *item; From 0f8681f2f927d428ea4f321e12c694ea99fd6829 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Fri, 8 Dec 2023 15:49:56 -0800 Subject: [PATCH 12/23] o Mostly a WIP, refined the task ref count stuff a bit, added some more to the memory ref count. Runs, but not for long on complex workloads --- emu/memory.c | 15 +++++---- emu/memory.h | 2 +- .../xcshareddata/xcschemes/iSH.xcscheme | 31 ++----------------- kernel/exit.c | 21 ++++++++----- kernel/log.c | 7 +++-- kernel/mmap.c | 5 +-- kernel/poll.c | 2 +- kernel/task.c | 15 +++++---- util/ro_locks.c | 4 +-- util/ro_locks.h | 5 +++ util/rw_locks.c | 26 +++++++--------- 11 files changed, 60 insertions(+), 73 deletions(-) diff --git a/emu/memory.c b/emu/memory.c index 95ce17f040..ce44c774fa 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -47,13 +47,13 @@ void mem_init(struct mem *mem) { void mem_destroy(struct mem *mem) { write_lock(&mem->lock); - while((mem_ref_cnt_val_get(mem)) && (current->pid > 1) ){ + while((mem_ref_cnt_get(mem)) && (current->pid > 1) ){ nanosleep(&lock_pause, NULL); } pt_unmap_always(mem, 0, MEM_PAGES); #if ENGINE_JIT - while((task_ref_cnt_get(current, 1)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks + while((mem_ref_cnt_get(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } jit_free(mem->mmu.jit); @@ -61,7 +61,7 @@ void mem_destroy(struct mem *mem) { for (int i = 0; i < MEM_PGDIR_SIZE; i++) { do { nanosleep(&lock_pause, NULL); - } while(mem_ref_cnt_val_get(mem)); + } while(mem_ref_cnt_get(mem)); if (mem->pgdir[i] != NULL) @@ -112,7 +112,7 @@ struct pt_entry *mem_pt(struct mem *mem, page_t page) { static void mem_pt_del(struct mem *mem, page_t page) { struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { - while(task_ref_cnt_get(current, 0) > 4) { // mark + while(mem_ref_cnt_get(mem)) { // Don't delete if memory is in use nanosleep(&lock_pause, NULL); } entry->data = NULL; @@ -255,7 +255,7 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) { } int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) { - while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current, 0) > 1) { // Will be at least 1, anything higher means another thread is accessing nanosleep(&lock_pause, NULL); } for (page_t page = start; page < start + pages; mem_next_page(src, &page)) { @@ -272,7 +272,7 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page dst_entry->offset = entry->offset; dst_entry->flags = entry->flags; } - while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } mem_changed(src); @@ -347,7 +347,6 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) { // copy/paste from above mem_ref_cnt_mod(mem, 1); - //read_to_write_lock(&mem->lock); memcpy(copy, data, PAGE_SIZE); //mkemkemke Crashes here a lot when running both the go and parallel make test. 01 June 2022 mem_ref_cnt_mod(mem, -1); pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW); @@ -438,7 +437,7 @@ void mem_ref_cnt_mod(struct mem *mem, int value) { // value Should only be -1 or pthread_mutex_unlock(&mem->reference.lock); } -int mem_ref_cnt_val_get(struct mem *mem) { +int mem_ref_cnt_get(struct mem *mem) { pthread_mutex_lock(&mem->reference.lock); int cnt = mem->reference.count; pthread_mutex_unlock(&mem->reference.lock); diff --git a/emu/memory.h b/emu/memory.h index 82d4753363..43d1b6c8f0 100644 --- a/emu/memory.h +++ b/emu/memory.h @@ -108,7 +108,7 @@ int mem_segv_reason(struct mem *mem, addr_t addr); // Reference counting is important void mem_ref_cnt_mod(struct mem *mem, int value); -int mem_ref_cnt_val_get(struct mem *mem); +int mem_ref_cnt_get(struct mem *mem); extern size_t real_page_size; diff --git a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme index b7786dac42..40b29a55da 100644 --- a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme +++ b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme @@ -91,8 +91,10 @@ buildConfiguration = "Debug-ApplePleaseFixFB19282108" selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" + enableAddressSanitizer = "YES" enableASanStackUseAfterReturn = "YES" - enableUBSanitizer = "YES" + disableMainThreadChecker = "YES" + disablePerformanceAntipatternChecker = "YES" launchStyle = "0" useCustomWorkingDirectory = "NO" ignoresPersistentStateOnLaunch = "NO" @@ -110,33 +112,6 @@ ReferencedContainer = "container:iSH-AOK.xcodeproj"> - - - - - - - - - - - - pending & ~current->blocked); - while((task_ref_cnt_get(current, 0) > 2) || + while((task_ref_cnt_get(current, 0) > 2) || // We added one to the task reference count above, thus the check is 2, in case any other thread is accessing. (locks_held_count(current)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); signal_pending = !!(current->pending & ~current->blocked); } + sighand_release(current->sighand); current->sighand = NULL; struct sigqueue *sigqueue, *sigqueue_tmp; @@ -149,11 +150,13 @@ noreturn void do_exit(int status) { list_remove(&sigqueue->queue); free(sigqueue); } + struct task *leader = current->group->leader; // reparent children struct task *new_parent = find_new_parent(current); struct task *child, *tmp; + list_for_each_entry_safe(¤t->children, child, tmp, siblings) { child->parent = new_parent; list_remove(&child->siblings); @@ -179,7 +182,7 @@ noreturn void do_exit(int status) { } else { leader->zombie = true; notify(&parent->group->child_exit); - struct siginfo_ info = { + struct siginfo_ info = { //mkemkemke This is interesting. Need to think about possibilities. TODO .code = SI_KERNEL_, .child.pid = current->pid, .child.uid = current->uid, @@ -191,7 +194,6 @@ noreturn void do_exit(int status) { send_signal(parent, leader->exit_signal, info); } - if (exit_hook != NULL) exit_hook(current, status); @@ -199,17 +201,17 @@ noreturn void do_exit(int status) { } vfork_notify(current); - task_ref_cnt_mod(current, -1); + if(current != leader) { + task_ref_cnt_mod(current, -1); task_destroy(current, 1); } else { unlock(¤t->general_lock); + task_ref_cnt_mod(current, -1); } unlock(&pids_lock); - //atomic_l_unlockf(); - EXIT:pthread_exit(NULL); } @@ -244,10 +246,13 @@ noreturn void do_exit_group(int status) { } unlock(&pids_lock); - task_ref_cnt_mod(current, -1); unlock(&group->lock); - //if(current->pid <= MAX_PID) // abort if crazy. -mke + task_ref_cnt_mod(current, -1); + if(current->pid <= MAX_PID) // abort if crazy. -mke do_exit(status); + + unlock(&pids_lock); // Shouldn't get here + pthread_exit(NULL); } // always called from init process. Intended to be called when the init process exits. diff --git a/kernel/log.c b/kernel/log.c index 5b41d97a74..01b6b938a0 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -221,7 +221,7 @@ inline int current_pid(void) { return -1; } } - + // This should never happen task_ref_cnt_mod(current, -1); return -1; } @@ -237,7 +237,7 @@ inline int current_uid(void) { return -1; } } - + // This should never happen task_ref_cnt_mod(current, -1); return -1; } @@ -252,6 +252,7 @@ inline char * current_comm(void) { task_ref_cnt_mod(current, -1); return ""; } + if (current->exiting != true) { task_ref_cnt_mod(current, -1); return comm; @@ -260,8 +261,8 @@ inline char * current_comm(void) { return ""; } } - task_ref_cnt_mod(current, -1); + task_ref_cnt_mod(current, -1); return ""; } diff --git a/kernel/mmap.c b/kernel/mmap.c index b1cace3867..b0791a3d61 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -45,15 +45,16 @@ void mm_release(struct mm *mm) { if (--mm->refcount == 0) { if (mm->exefile != NULL) fd_close(mm->exefile); - while(task_ref_cnt_get(current, 1)) { // FIXME: Should be locking current->reference.lock and updating + while(mem_ref_cnt_get(&mm->mem) ) { // FIXME: Should be locking current->reference.lock and updating // current->reference.count before mem_destroy nanosleep(&lock_pause, NULL); } mem_destroy(&mm->mem); - while(task_ref_cnt_get(current, 1)) { //FIXME: Should now unlock after mem_destroy + while(task_ref_cnt_get(current, 1) > 1) { //FIXME: Should now unlock after mem_destroy nanosleep(&lock_pause, NULL); } + free(mm); } } diff --git a/kernel/poll.c b/kernel/poll.c index 3bd67becb7..54c6fb7701 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -198,7 +198,7 @@ dword_t sys_poll(addr_t fds, dword_t nfds, int_t timeout) { TASK_MAY_BLOCK { res = poll_wait(poll, poll_event_callback, &context, timeout < 0 ? NULL : &timeout_ts); } - while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections + while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } poll_destroy(poll); diff --git a/kernel/task.c b/kernel/task.c index bedb88fdfb..685896a91a 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -79,7 +79,7 @@ struct pid *pid_get_last_allocated(void) { } dword_t get_count_of_blocked_tasks(void) { - task_ref_cnt_mod(current, 1); + // task_ref_cnt_mod(current, 1); // Not needed? dword_t res = 0; struct pid *pid_entry; complex_lockt(&pids_lock, 0); @@ -88,7 +88,7 @@ dword_t get_count_of_blocked_tasks(void) { res++; } } - task_ref_cnt_mod(current, -1); + // task_ref_cnt_mod(current, -1); unlock(&pids_lock); return res; } @@ -201,7 +201,7 @@ void task_destroy(struct task *task, int caller) { unlock(&pids_lock); } - if (task_ref_cnt_get(task, 1)) { + if (task_ref_cnt_get(task, 1)) { // Check to see if another thread is accessing this process. If yes, note that and defer freeing it struct task_pending_deletion *pd = malloc(sizeof(struct task_pending_deletion)); if (pd) { task->reference.ready_to_be_freed = true; @@ -212,7 +212,7 @@ void task_destroy(struct task *task, int caller) { list_add(&tasks_pending_deletion_queue, &pd->list); pthread_mutex_unlock(&tasks_pending_deletion_lock); } - // Lets cleanup any pending deletions here for now + // Lets cleanup any expired pending deletions here for now cleanup_pending_deletions(); return; } else { @@ -225,7 +225,7 @@ void cleanup_pending_deletions(void) { pthread_mutex_lock(&tasks_pending_deletion_lock); struct task_pending_deletion *pd, *tmp; list_for_each_entry_safe(&tasks_pending_deletion_queue, pd, tmp, list) { - if (difftime(time(NULL), pd->added_time) >= GRACE_PERIOD) { // Delete reaped tasks older than + if ((difftime(time(NULL), pd->added_time) >= GRACE_PERIOD) && !! (!pd->task->reference.count)) { // Delete reaped tasks old and no longer referenced if (task_ref_cnt_get(pd->task, 0) == 0) { free(pd->task); list_remove(&pd->list); @@ -258,6 +258,7 @@ void task_run_current(void) { tlb_refresh(&tlb, ¤t->mem->mmu); while (true) { + task_ref_cnt_mod(current, 1); read_lock(¤t->mem->lock); if(!doEnableMulticore) { @@ -279,6 +280,8 @@ void task_run_current(void) { } else { handle_interrupt(interrupt); } + + task_ref_cnt_mod(current, -1); } } @@ -336,7 +339,7 @@ void update_thread_name(void) { void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke // Keep track of how many threads are referencing this task - if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke + if(!doEnableExtraLocking) { // If they want to fly by the seat of their pants... -mke return; } diff --git a/util/ro_locks.c b/util/ro_locks.c index b3249b1140..56ef18115f 100644 --- a/util/ro_locks.c +++ b/util/ro_locks.c @@ -91,14 +91,14 @@ void mylock(lock_t *lock, int log_lock) { unlock(lock); if(!log_lock) { - task_ref_cnt_mod_wrapper(1); + // task_ref_cnt_mod_wrapper(1); pthread_mutex_lock(&lock->m); modify_locks_held_count_wrapper(1); lock->owner = pthread_self(); lock->pid = current_pid(); lock->uid = current_uid(); strlcpy(lock->comm, current_comm(), 16); - task_ref_cnt_mod_wrapper(-1); + // task_ref_cnt_mod_wrapper(-1); } else { pthread_mutex_lock(&lock->m); lock->owner = pthread_self(); diff --git a/util/ro_locks.h b/util/ro_locks.h index 119aef93f0..e3f15c37db 100644 --- a/util/ro_locks.h +++ b/util/ro_locks.h @@ -19,6 +19,11 @@ typedef struct { char comm[16]; char lname[16]; bool wait4; + struct { + pthread_mutex_t lock; + int count; // If positive, don't delete yet, wait_to_delete + bool ready_to_be_freed; // Should be false initially + } reference; #if LOCK_DEBUG struct lock_debug { const char *file; diff --git a/util/rw_locks.c b/util/rw_locks.c index 8896418719..e088b31179 100644 --- a/util/rw_locks.c +++ b/util/rw_locks.c @@ -20,7 +20,7 @@ bool current_is_valid(void); // it to prefer writers. not worrying about anything else right now. void loop_lock_generic(wrlock_t *lock, int is_write) { - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); modify_locks_held_count_wrapper(1); unsigned count = 0; @@ -40,14 +40,14 @@ void loop_lock_generic(wrlock_t *lock, int is_write) { atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0); } - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } void _read_lock(wrlock_t *lock) { + task_ref_cnt_mod(current, 1); loop_lock_read(lock); - task_ref_cnt_mod_wrapper(1); //pthread_rwlock_rdlock(&lock->l); // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke if(lock->val) { @@ -62,7 +62,7 @@ void _read_lock(wrlock_t *lock) { if(lock->val > 1000) { // We likely have a problem. printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); read_unlock_and_destroy(lock); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); return; } @@ -70,7 +70,7 @@ void _read_lock(wrlock_t *lock) { lock->pid = current_pid(); if(lock->pid > 9) strncpy((char *)lock->comm, current_comm(), 16); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -151,9 +151,6 @@ void wrlock_init(wrlock_t *lock) { } void _lock_destroy(wrlock_t *lock) { - while((task_ref_cnt_get(current, 0) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections - nanosleep(&lock_pause, NULL); - } #ifdef JUSTLOG if (pthread_rwlock_destroy(&lock->l) != 0) { printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),task_ref_cnt_get(current, 0)); @@ -214,36 +211,37 @@ void handle_lock_error(wrlock_t *lock, const char *func) { } void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); atomic_l_lockf("rtw_lock\0", 0); _read_unlock(lock); _write_lock(lock); atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } void write_to_read_lock(wrlock_t *lock) { // Try to atomically swap a Write lock to a RO lock. -mke - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); atomic_l_lockf("wtr_lock\0", 0); _write_unlock(lock); _read_lock(lock); atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } void write_unlock_and_destroy(wrlock_t *lock) { - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); atomic_l_lockf("wuad_lock\0", 0); _write_unlock(lock); _lock_destroy(lock); atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } void read_unlock_and_destroy(wrlock_t *lock) { atomic_l_lockf("ruad_lock", 0); if(trylockw(lock)) // It should be locked, but just in case. Likely masking underlying issue. -mke _read_unlock(lock); + _lock_destroy(lock); atomic_l_unlockf(); } From 68db1204d0b7c908bdec9ce2af821034ca15b455 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sat, 9 Dec 2023 06:55:49 -0800 Subject: [PATCH 13/23] o Another checkpoint. Note that STRACE is currently defined to log to the kernel buffer. WIP --- app/AppDelegate.m | 3 +++ debug.h | 3 ++- emu/memory.c | 15 +++++++-------- fs/poll.c | 4 ++-- jit/jit.h | 6 ++++++ kernel/exec.c | 15 +++++++++++---- kernel/log.c | 6 ++++-- kernel/signal.c | 6 +++--- kernel/task.c | 5 ++--- kernel/user.c | 8 ++++++++ 10 files changed, 48 insertions(+), 23 deletions(-) diff --git a/app/AppDelegate.m b/app/AppDelegate.m index bada4d63be..2732991f13 100644 --- a/app/AppDelegate.m +++ b/app/AppDelegate.m @@ -45,6 +45,7 @@ @interface AppDelegate () static void ios_handle_exit(struct task *task, int code) { // we are interested in init and in children of init // this is called with pids_lock as an implementation side effect, please do not cite as an example of good API design + task_ref_cnt_mod(task, 1); lock(&task->general_lock, 0); complex_lockt(&pids_lock, 0); if(task->pid > MAX_PID) {// Corruption @@ -57,6 +58,7 @@ static void ios_handle_exit(struct task *task, int code) { if (task->parent != NULL && task->parent->parent != NULL) { unlock(&pids_lock); unlock(&task->general_lock); + task_ref_cnt_mod(task, 1); return; } // pid should be saved now since task would be freed @@ -64,6 +66,7 @@ static void ios_handle_exit(struct task *task, int code) { unlock(&pids_lock); unlock(&task->general_lock); + task_ref_cnt_mod(task, 1); dispatch_async(dispatch_get_main_queue(), ^{ [[NSNotificationCenter defaultCenter] postNotificationName:ProcessExitedNotification object:nil diff --git a/debug.h b/debug.h index 6862585572..6fab7296f0 100644 --- a/debug.h +++ b/debug.h @@ -78,7 +78,8 @@ extern int log_override; extern void (*die_handler)(const char *msg); _Noreturn void die(const char *msg, ...); -#define STRACE(msg, ...) TRACE_(strace, msg, ##__VA_ARGS__) +// #define STRACE(msg, ...) TRACE_(strace, msg, ##__VA_ARGS__) +#define STRACE(fmt, ...) printk(fmt, ##__VA_ARGS__) #if defined(__i386__) || defined(__x86_64__) #define debugger __asm__("int3") diff --git a/emu/memory.c b/emu/memory.c index ce44c774fa..19e5ea9355 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -193,7 +193,7 @@ int pt_unmap(struct mem *mem, page_t start, pages_t pages) { int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { for (page_t page = start; page < start + pages; mem_next_page(mem, &page)) { - while(task_ref_cnt_get(current, 0) >3) { + while(mem_ref_cnt_get(mem) > 1) { // Being 1 is normal as pt_copy_on_write() increments the ref count nanosleep(&lock_pause, NULL); } struct pt_entry *pt = mem_pt(mem, page); @@ -207,7 +207,7 @@ int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { if (--data->refcount == 0) { // vdso wasn't allocated with mmap, it's just in our data segment if (data->data != vdso_data) { - while(task_ref_cnt_get(current, 0) > 3) { + while(mem_ref_cnt_get(mem)) { nanosleep(&lock_pause, NULL); } int err = munmap(data->data, data->size); @@ -255,9 +255,8 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) { } int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) { - while(task_ref_cnt_get(current, 0) > 1) { // Will be at least 1, anything higher means another thread is accessing - nanosleep(&lock_pause, NULL); - } + mem_ref_cnt_mod(src, 1); + mem_ref_cnt_mod(dst, 1); for (page_t page = start; page < start + pages; mem_next_page(src, &page)) { struct pt_entry *entry = mem_pt(src, page); if (entry == NULL) @@ -272,11 +271,11 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page dst_entry->offset = entry->offset; dst_entry->flags = entry->flags; } - while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections - nanosleep(&lock_pause, NULL); - } mem_changed(src); mem_changed(dst); + mem_ref_cnt_mod(src, -1); + mem_ref_cnt_mod(dst, -1); + return 0; } diff --git a/fs/poll.c b/fs/poll.c index 970fefecc4..1f465e5a3a 100644 --- a/fs/poll.c +++ b/fs/poll.c @@ -330,7 +330,7 @@ void poll_destroy(struct poll *poll) { struct poll_fd *poll_fd; struct poll_fd *tmp; - while(task_ref_cnt_get(current, 0)) { + while(task_ref_cnt_get(current, 0) > 1) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->poll_fds, poll_fd, tmp, fds) { @@ -341,7 +341,7 @@ void poll_destroy(struct poll *poll) { free(poll_fd); } - while(task_ref_cnt_get(current, 0)) { + while(task_ref_cnt_get(current, 0) > 1) { nanosleep(&lock_pause, NULL); } diff --git a/jit/jit.h b/jit/jit.h index 392447d021..4057077a5e 100644 --- a/jit/jit.h +++ b/jit/jit.h @@ -29,6 +29,12 @@ struct jit { struct { struct list blocks[2]; } *page_hash; + + struct { + pthread_mutex_t lock; + int count; // If positive, don't delete yet, wait_to_delete + bool ready_to_be_freed; // Should be false initially + } reference; lock_t lock; wrlock_t jetsam_lock; diff --git a/kernel/exec.c b/kernel/exec.c index f7c847ca3b..59d21a7b3d 100644 --- a/kernel/exec.c +++ b/kernel/exec.c @@ -102,6 +102,7 @@ static int load_entry(struct prg_header ph, addr_t bias, struct fd *fd) { // of the load entry or the end of the page, whichever comes first addr_t file_end = addr + filesize; dword_t tail_size = PAGE_SIZE - PGOFFSET(file_end); + if (tail_size == PAGE_SIZE) // if you can calculate tail_size better and not have to do this please let me know tail_size = 0; @@ -109,20 +110,26 @@ static int load_entry(struct prg_header ph, addr_t bias, struct fd *fd) { if (tail_size != 0) { // Unlock and lock the mem because the user functions must be // called without locking mem. - if(trylockw(¤t->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke + if(trylockw(¤t->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke write_unlock(¤t->mem->lock); + + mem_ref_cnt_mod(current->mem, 1); user_memset(file_end, 0, tail_size); write_lock(¤t->mem->lock); + mem_ref_cnt_mod(current->mem, -1); } if (tail_size > bss_size) tail_size = bss_size; // then map the pages from after the file mapping up to and including the end of bss if (bss_size - tail_size != 0) - if ((err = pt_map_nothing(current->mem, PAGE_ROUND_UP(addr + filesize), - PAGE_ROUND_UP(bss_size - tail_size), flags)) < 0) - return err; + + if ((err = pt_map_nothing(current->mem, PAGE_ROUND_UP(addr + filesize), + PAGE_ROUND_UP(bss_size - tail_size), flags)) < 0) + + return err; } + return 0; } diff --git a/kernel/log.c b/kernel/log.c index 01b6b938a0..23e24339f9 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -214,8 +214,9 @@ inline int current_pid(void) { task_ref_cnt_mod(current, 1); if(current != NULL) { if (current->exiting != true) { + int tmp = current->pid; task_ref_cnt_mod(current, -1); - return current->pid; + return tmp; } else { task_ref_cnt_mod(current, -1); return -1; @@ -230,8 +231,9 @@ inline int current_uid(void) { task_ref_cnt_mod(current, 1); if(current != NULL) { if (current->exiting != true) { + int tmp = current->uid; task_ref_cnt_mod(current, -1); - return current->uid; + return tmp; } else { task_ref_cnt_mod(current, -1); return -1; diff --git a/kernel/signal.c b/kernel/signal.c index 0e1acb96d3..2e42b2ed61 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -466,9 +466,9 @@ struct sighand *sighand_copy(struct sighand *sighand) { } void sighand_release(struct sighand *sighand) { - while(task_ref_cnt_get(current, 0) > 2) { // Wait for now, task is in one or more critical sections - nanosleep(&lock_pause, NULL); - } + // while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections + // nanosleep(&lock_pause, NULL); + // } if (--sighand->refcount == 0) { free(sighand); } diff --git a/kernel/task.c b/kernel/task.c index 685896a91a..7ed0e99d89 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -169,8 +169,8 @@ bool should_wait(struct task *t) { void task_destroy(struct task *task, int caller) { if(trylock(&task->general_lock) == (_EBUSY)) { // Get it if a lock does not exist - task->exiting = true; lock(&task->general_lock, 0); + task->exiting = true; } //printk("TD(%s:%d): Called by %d\n", task->comm, task->pid, caller); @@ -258,8 +258,8 @@ void task_run_current(void) { tlb_refresh(&tlb, ¤t->mem->mmu); while (true) { - task_ref_cnt_mod(current, 1); read_lock(¤t->mem->lock); + task_ref_cnt_mod(current, 1); if(!doEnableMulticore) { pthread_mutex_lock(&multicore_lock); @@ -369,7 +369,6 @@ void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 return; } - task->reference.count = task->reference.count + value; pthread_mutex_unlock(&task->reference.lock); diff --git a/kernel/user.c b/kernel/user.c index d0c81a6a92..74dba162a3 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -61,15 +61,23 @@ int user_read(addr_t addr, void *buf, size_t count) { int user_write_task(struct task *task, addr_t addr, const void *buf, size_t count) { read_lock(&task->mem->lock); + task_ref_cnt_mod(current, 1); + mem_ref_cnt_mod(current->mem, 1); int res = __user_write_task(task, addr, buf, count, false); read_unlock(&task->mem->lock); + task_ref_cnt_mod(current, -1); + mem_ref_cnt_mod(current->mem, -1); return res; } int user_write_task_ptrace(struct task *task, addr_t addr, const void *buf, size_t count) { read_lock(&task->mem->lock); + task_ref_cnt_mod(current, 1); + mem_ref_cnt_mod(current->mem, 1); int res = __user_write_task(task, addr, buf, count, true); read_unlock(&task->mem->lock); + task_ref_cnt_mod(current, -1); + mem_ref_cnt_mod(current->mem, -1); return res; } From eb563fdcce6e18f5b6477f334392e1655e7a282b Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 10 Dec 2023 08:45:38 -0800 Subject: [PATCH 14/23] o Moved some stuff around to more logical places o Continued to tinker with reference counts o Kinda works, but still not particularly stable --- emu/decode.h | 10 +++-- fs/inode.c | 2 +- iSH-AOK.xcodeproj/project.pbxproj | 2 + jit/jit.c | 10 ++--- kernel/exit.c | 2 +- kernel/log.c | 61 ------------------------------- kernel/log.h | 17 +++++++++ kernel/task.c | 59 ++++++++++++++++++++++++++++++ kernel/task.h | 7 +++- kernel/time.h | 1 + util/ro_locks.c | 34 ++++++++--------- util/ro_locks.h | 1 - util/rw_locks.c | 31 +++++++--------- util/sync.h | 4 -- 14 files changed, 128 insertions(+), 113 deletions(-) create mode 100644 kernel/log.h diff --git a/emu/decode.h b/emu/decode.h index 2f5037312d..8764c1348e 100644 --- a/emu/decode.h +++ b/emu/decode.h @@ -2,6 +2,7 @@ #include "emu/cpu.h" #include "emu/modrm.h" #include "emu/interrupt.h" +#include "kernel/task.h" #undef oz #define oz OP_SIZE @@ -13,9 +14,10 @@ #undef DEFAULT_CHANNEL #define DEFAULT_CHANNEL instr #define TRACEI(msg, ...) TRACE(msg "\t", ##__VA_ARGS__) -extern int current_pid(void); -extern char* curent_comm(void); -#define TRACEIP() TRACE("%d %08x\t", current_pid(), state->ip) +//extern int current_pid(struct task *task); +//extern char* curent_comm(void); +//#define TRACEIP() TRACE("%d %08x\t", current_pid(current), state->ip) +//#define TRACEIP() TRACE("%d %08x\t", current_pid(), state->ip) // this will be the next PyEval_EvalFrameEx __no_instrument DECODER_RET glue(DECODER_NAME, OP_SIZE)(DECODER_ARGS) { @@ -34,7 +36,7 @@ __no_instrument DECODER_RET glue(DECODER_NAME, OP_SIZE)(DECODER_ARGS) { #define READMODRM_NOMEM READMODRM; if (modrm.type != modrm_reg) UNDEFINED restart: - TRACEIP(); + // TRACEIP(current); READINSN; switch (insn) { #define MAKE_OP(x, OP, op) \ diff --git a/fs/inode.c b/fs/inode.c index edd44e594f..8db0cf792d 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -8,7 +8,7 @@ lock_t inodes_lock = LOCK_INITIALIZER; #define INODES_HASH_SIZE (1 << 10) static struct list inodes_hash[INODES_HASH_SIZE]; -int current_pid(void); +int current_pid(struct task *task); static struct inode_data *inode_get_data(struct mount *mount, ino_t ino) { int index = ino % INODES_HASH_SIZE; diff --git a/iSH-AOK.xcodeproj/project.pbxproj b/iSH-AOK.xcodeproj/project.pbxproj index b62368d8da..476a8eb99c 100644 --- a/iSH-AOK.xcodeproj/project.pbxproj +++ b/iSH-AOK.xcodeproj/project.pbxproj @@ -595,6 +595,7 @@ 5D59DADA2B17999500FA995C /* rw_locks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rw_locks.h; sourceTree = ""; }; 5D59DADB2B179A2300FA995C /* ro_locks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = ro_locks.c; sourceTree = ""; }; 5D59DAE32B17EB1600FA995C /* rw_locks.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rw_locks.c; sourceTree = ""; }; + 5D84E3102B2614D700F56951 /* log.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; 5D8ACEF9284BF122003C50D3 /* net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = net.c; sourceTree = ""; }; 5D8ACEFB284CE096003C50D3 /* sys.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sys.h; sourceTree = ""; }; 5D8ACEFC284CE096003C50D3 /* sys.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sys.c; sourceTree = ""; }; @@ -1167,6 +1168,7 @@ 497F6C97254E5C9800C82F46 /* init.h */, 497F6C7A254E5C9700C82F46 /* ipc.c */, 497F6C83254E5C9700C82F46 /* log.c */, + 5D84E3102B2614D700F56951 /* log.h */, 497F6C85254E5C9700C82F46 /* misc.c */, 497F6C77254E5C9700C82F46 /* mm.h */, 497F6C9D254E5C9800C82F46 /* mmap.c */, diff --git a/jit/jit.c b/jit/jit.c index 24c8c5993a..e273e267b0 100644 --- a/jit/jit.c +++ b/jit/jit.c @@ -10,7 +10,7 @@ #include "util/list.h" #include "util/sync.h" -extern int current_pid(void); +extern int current_pid(struct task *task); static void jit_block_disconnect(struct jit *jit, struct jit_block *block); static void jit_block_free(struct jit *jit, struct jit_block *block); @@ -97,7 +97,7 @@ void jit_invalidate_all(struct jit *jit) { } static void jit_resize_hash(struct jit *jit, size_t new_size) { - TRACE_(verbose, "%d resizing hash to %lu, using %lu bytes for gadgets\n", current_pid(), new_size, jit->mem_used); + TRACE_(verbose, "%d resizing hash to %lu, using %lu bytes for gadgets\n", current_pid(current), new_size, jit->mem_used); struct list *new_hash = calloc(new_size, sizeof(struct list)); for (size_t i = 0; i < jit->hash_size; i++) { if (list_null(&jit->hash[i])) @@ -140,7 +140,7 @@ static struct jit_block *jit_lookup(struct jit *jit, addr_t addr) { static struct jit_block *jit_block_compile(addr_t ip, struct tlb *tlb) { struct gen_state state; - TRACE("%d %08x --- compiling:\n", current_pid(), ip); + TRACE("%d %08x --- compiling:\n", current_pid(current), ip); gen_start(ip, &state); while (true) { @@ -225,7 +225,7 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { block = jit_block_compile(ip, tlb); jit_insert(jit, block); } else { - TRACE("%d %08x --- missed cache\n", current_pid(), ip); + TRACE("%d %08x --- missed cache\n", current_pid(current), ip); } cache[cache_index] = block; unlock(&jit->lock); @@ -255,7 +255,7 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) { // block may be jetsam, but that's ok, because it can't be freed until // every thread on this jit is not executing anything - TRACE("%d %08x --- cycle %ld\n", current_pid(), ip, frame->cpu.cycle); + TRACE("%d %08x --- cycle %ld\n", current_pid(current), ip, frame->cpu.cycle); interrupt = jit_enter(block, frame, tlb); if (interrupt == INT_NONE && __atomic_exchange_n(cpu->poked_ptr, false, __ATOMIC_SEQ_CST)) diff --git a/kernel/exit.c b/kernel/exit.c index 6a8efa4f75..c43a6a6b1e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -92,6 +92,7 @@ noreturn void do_exit(int status) { } while((task_ref_cnt_get(current, 0) > 1) || (locks_held_count(current)) || (signal_pending)); // Wait for now, task is in one or more critical + complex_lockt(&pids_lock, 0); mm_release(current->mm); current->mm = NULL; @@ -133,7 +134,6 @@ noreturn void do_exit(int status) { // the actual freeing needs pids_lock task_ref_cnt_mod(current, 1); - complex_lockt(&pids_lock, 0); // release the sighand signal_pending = !!(current->pending & ~current->blocked); while((task_ref_cnt_get(current, 0) > 2) || // We added one to the task reference count above, thus the check is 2, in case any other thread is accessing. diff --git a/kernel/log.c b/kernel/log.c index 23e24339f9..8e1edd9d45 100644 --- a/kernel/log.c +++ b/kernel/log.c @@ -208,64 +208,3 @@ void die(const char *msg, ...) { abort(); va_end(args); } - -// fun little utility function -inline int current_pid(void) { - task_ref_cnt_mod(current, 1); - if(current != NULL) { - if (current->exiting != true) { - int tmp = current->pid; - task_ref_cnt_mod(current, -1); - return tmp; - } else { - task_ref_cnt_mod(current, -1); - return -1; - } - } - // This should never happen - task_ref_cnt_mod(current, -1); - return -1; -} - -inline int current_uid(void) { - task_ref_cnt_mod(current, 1); - if(current != NULL) { - if (current->exiting != true) { - int tmp = current->uid; - task_ref_cnt_mod(current, -1); - return tmp; - } else { - task_ref_cnt_mod(current, -1); - return -1; - } - } - // This should never happen - task_ref_cnt_mod(current, -1); - return -1; -} - -inline char * current_comm(void) { - static char comm[16]; - task_ref_cnt_mod(current, 1); - if(current != NULL) { - if(strcmp(current->comm, "")) { - strncpy(comm, current->comm, 16); - } else { - task_ref_cnt_mod(current, -1); - return ""; - } - - if (current->exiting != true) { - task_ref_cnt_mod(current, -1); - return comm; - } else { - task_ref_cnt_mod(current, -1); - return ""; - } - } - - task_ref_cnt_mod(current, -1); - return ""; -} - - diff --git a/kernel/log.h b/kernel/log.h new file mode 100644 index 0000000000..90207607a2 --- /dev/null +++ b/kernel/log.h @@ -0,0 +1,17 @@ +// +// log.h +// iSH-AOK +// +// Created by Michael Miller on 12/10/23. +// + +#ifndef log_h +#define log_h + +// Function prototypes +size_t sys_syslog(int_t type, addr_t buf_addr, int_t len); +void ish_vprintk(const char *msg, va_list args); +void ish_printk(const char *msg, ...); +_Noreturn void die(const char *msg, ...); + +#endif /* log_h */ diff --git a/kernel/task.c b/kernel/task.c index 7ed0e99d89..9c4d06e883 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -441,3 +441,62 @@ void modify_locks_held_count_wrapper(int value) { // sync.h can't know about the modify_locks_held_count(current, value); return; } + +// fun little utility function +int current_pid(struct task *task) { + task_ref_cnt_mod(current, 1); + if(current != NULL) { + if (current->exiting != true) { + int tmp = current->pid; + task_ref_cnt_mod(current, -1); + return tmp; + } else { + task_ref_cnt_mod(current, -1); + return -1; + } + } + // This should never happen + task_ref_cnt_mod(current, -1); + return -1; +} + +int current_uid(void) { + task_ref_cnt_mod(current, 1); + if(current != NULL) { + if (current->exiting != true) { + int tmp = current->uid; + task_ref_cnt_mod(current, -1); + return tmp; + } else { + task_ref_cnt_mod(current, -1); + return -1; + } + } + // This should never happen + task_ref_cnt_mod(current, -1); + return -1; +} + +char * current_comm(void) { + static char comm[16]; + task_ref_cnt_mod(current, 1); + if(current != NULL) { + if(strcmp(current->comm, "")) { + strncpy(comm, current->comm, 16); + } else { + task_ref_cnt_mod(current, -1); + return ""; + } + + if (current->exiting != true) { + task_ref_cnt_mod(current, -1); + return comm; + } else { + task_ref_cnt_mod(current, -1); + return ""; + } + } + + task_ref_cnt_mod(current, -1); + return ""; +} diff --git a/kernel/task.h b/kernel/task.h index 3b8cbae443..94866f1617 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -9,8 +9,8 @@ #include "kernel/resource.h" #include "fs/sockrestart.h" #include "util/list.h" -#include "util/sync.h" #include "util/timer.h" +#include "util/sync.h" extern void task_ref_cnt_mod_wrapper(int value); @@ -257,4 +257,9 @@ unsigned locks_held_count(struct task *task); void init_pending_queues(void); void cleanup_pending_deletions(void); +bool current_is_valid(void); +int current_pid(struct task *task); +int current_uid(void); +char * current_comm(void); + #endif diff --git a/kernel/time.h b/kernel/time.h index b82b53bbfa..ea6eea91d2 100644 --- a/kernel/time.h +++ b/kernel/time.h @@ -2,6 +2,7 @@ #define TIME_H #include "misc.h" #include +#include "kernel/fs.h" struct timespec64 { int64_t tv_sec; // seconds diff --git a/util/ro_locks.c b/util/ro_locks.c index 56ef18115f..703d2a4d75 100644 --- a/util/ro_locks.c +++ b/util/ro_locks.c @@ -10,12 +10,10 @@ #include "debug.h" #include "kernel/errno.h" #include "kernel/task.h" +#include "kernel/log.h" #include "util/sync.h" // The following are in log.c. There should probably be in a log.h that gets included instead. -extern int current_pid(void); -extern int current_uid(void); -extern char* current_comm(void); bool current_is_valid(void); // Lock to lock locks. Used to assure transition between RO<->RW is automic for RW locks @@ -69,11 +67,11 @@ void atomic_l_lockf(char lname[16], int skiplog) { return; int res = 0; if(atomic_l_lock.pid > 0) { - if(current_pid() != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke + if(current_pid(current) != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke res = pthread_mutex_lock(&atomic_l_lock.m); - atomic_l_lock.pid = current_pid(); + atomic_l_lock.pid = current_pid(current); } else if(!skiplog) { - printk("WARNING: Odd attempt by process (%s:%d) to attain same locking lock twice. Ignoring\n", current_comm(), current_pid()); + printk("WARNING: Odd attempt by process (%s:%d) to attain same locking lock twice. Ignoring\n", current_comm(), current_pid(current)); res = 0; } } @@ -95,14 +93,14 @@ void mylock(lock_t *lock, int log_lock) { pthread_mutex_lock(&lock->m); modify_locks_held_count_wrapper(1); lock->owner = pthread_self(); - lock->pid = current_pid(); + lock->pid = current_pid(current); lock->uid = current_uid(); strlcpy(lock->comm, current_comm(), 16); // task_ref_cnt_mod_wrapper(-1); } else { pthread_mutex_lock(&lock->m); lock->owner = pthread_self(); - lock->pid = current_pid(); + lock->pid = current_pid(current); lock->uid = current_uid(); strncpy(lock->comm, current_comm(), 16); } @@ -125,7 +123,7 @@ void atomic_l_unlockf(void) { } void complex_lockt(lock_t *lock, int log_lock) { - if (lock->pid == current_pid()) + if (lock->pid == current_pid(current)) return; unsigned int count = 0; @@ -141,7 +139,7 @@ void complex_lockt(lock_t *lock, int log_lock) { if (count > count_max) { if (!log_lock) { printk("ERROR: Possible deadlock, aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d)\n", - current_pid(), current_comm(), lock->comm, lock->pid); + current_pid(current), current_comm(), lock->comm, lock->pid); pthread_mutex_unlock(&lock->m); modify_locks_held_count_wrapper(-1); } @@ -154,11 +152,11 @@ void complex_lockt(lock_t *lock, int log_lock) { if (count > count_max * 0.90) { if (!log_lock) printk("Warning: large lock attempt count (%d), aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d) \n", - count, current_pid(), current_comm(), lock->comm, lock->pid); + count, current_pid(current), current_comm(), lock->comm, lock->pid); } lock->owner = pthread_self(); - lock->pid = current_pid(); + lock->pid = current_pid(current); lock->uid = current_uid(); strncpy(lock->comm, current_comm(), sizeof(lock->comm) - 1); lock->comm[sizeof(lock->comm) - 1] = '\0'; // Null-terminate just in case @@ -172,15 +170,15 @@ int trylock(lock_t *lock) { if (!status) { lock->debug.file = file; lock->debug.line = line; - extern int current_pid(void); - lock->debug.pid = current_pid(); + extern int current_pid(struct task *task); + lock->debug.pid = current_pid(current); } #endif - if((!status) && (current_pid() > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke + if((!status) && (current_pid(current) > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke modify_locks_held_count_wrapper(1); //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = current_pid(); + lock->pid = current_pid(current); strncpy(lock->comm, current_comm(), 16); } return status; @@ -193,8 +191,8 @@ int trylocknl(lock_t *lock, char *comm, int pid) { if (!status) { lock->debug.file = file; lock->debug.line = line; - extern int current_pid(void); - lock->debug.pid = current_pid(); + extern int current_pid(current); + lock->debug.pid = current_pid(current); } #endif if(!status) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke diff --git a/util/ro_locks.h b/util/ro_locks.h index e3f15c37db..2f762d2670 100644 --- a/util/ro_locks.h +++ b/util/ro_locks.h @@ -34,7 +34,6 @@ typedef struct { #endif } lock_t; - void lock_init(lock_t *lock, char lname[16]); void unlock(lock_t *lock); void atomic_l_lockf(char lname[16], int skiplog); diff --git a/util/rw_locks.c b/util/rw_locks.c index e088b31179..42c2526577 100644 --- a/util/rw_locks.c +++ b/util/rw_locks.c @@ -9,9 +9,6 @@ #include "util/sync.h" // The following are in log.c. There should probably be in a log.h that gets included instead. -extern int current_pid(void); -extern int current_uid(void); -extern char* current_comm(void); bool current_is_valid(void); // this is a read-write lock that prefers writers, i.e. if there are any @@ -67,7 +64,7 @@ void _read_lock(wrlock_t *lock) { return; } - lock->pid = current_pid(); + lock->pid = current_pid(current); if(lock->pid > 9) strncpy((char *)lock->comm, current_comm(), 16); task_ref_cnt_mod(current, -1); @@ -82,7 +79,7 @@ void read_lock(wrlock_t *lock) { // Wrapper so that external calls lock, interna void _read_unlock(wrlock_t *lock) { if(lock->val <= 0) { - printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(), current_comm(), lock->val); + printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(current), current_comm(), lock->val); lock->val = 0; lock->pid = -1; lock->comm[0] = 0; @@ -92,29 +89,29 @@ void _read_unlock(wrlock_t *lock) { } assert(lock->val > 0); if (pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(), current_comm()); + printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(current), current_comm()); lock->val--; modify_locks_held_count_wrapper(-1); //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } void read_unlock(wrlock_t *lock) { - if(lock->pid != current_pid() && (lock->pid != -1)) { + if(lock->pid != current_pid(current) && (lock->pid != -1)) { atomic_l_lockf("r_unlock\0", 0); _read_unlock(lock); } else { // We can unlock our own lock without additional locking. -mke _read_unlock(lock); return; } - if(lock->pid != current_pid() && (lock->pid != -1)) + if(lock->pid != current_pid(current) && (lock->pid != -1)) atomic_l_unlockf(); } void _write_unlock(wrlock_t *lock) { if(pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(), current_comm()); + printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(current), current_comm()); if(lock->val != -1) { - printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(), current_comm()); + printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(current), current_comm()); } //assert(lock->val == -1); lock->val = lock->line = lock->pid = 0; @@ -142,7 +139,7 @@ void wrlock_init(wrlock_t *lock) { #endif #ifdef JUSTLOG if (pthread_rwlock_init(&lock->l, pattr)) - printk("URGENT: wrlock_init() error(PID: %d Process: %s)\n",current_pid(), current_comm()); + printk("URGENT: wrlock_init() error(PID: %d Process: %s)\n",current_pid(current), current_comm()); #else if (pthread_rwlock_init(&lock->l, pattr)) __builtin_trap(); #endif @@ -153,7 +150,7 @@ void wrlock_init(wrlock_t *lock) { void _lock_destroy(wrlock_t *lock) { #ifdef JUSTLOG if (pthread_rwlock_destroy(&lock->l) != 0) { - printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(), current_comm(),task_ref_cnt_get(current, 0)); + printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(current), current_comm(),task_ref_cnt_get(current, 0)); } #else if (pthread_rwlock_destroy(&lock->l) != 0) __builtin_trap(); @@ -161,7 +158,7 @@ void _lock_destroy(wrlock_t *lock) { } void lock_destroy(wrlock_t *lock) { - while((task_ref_cnt_get(current, 0) > 1) && (current_pid() != 1)) { // Wait for now, task is in one or more critical sections + while((task_ref_cnt_get(current, 0) > 1) && (current_pid(current) != 1)) { // Wait for now, task is in one or more critical sections nanosleep(&lock_pause, NULL); } @@ -177,7 +174,7 @@ void _write_lock(wrlock_t *lock) { // Write lock lock->val = -1; // lock->file = file; // lock->line = line; - lock->pid = current_pid(); + lock->pid = current_pid(current); if(lock->pid > 9) strncpy((char *)lock->comm, current_comm(), 16); //STRACE("write_lock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); @@ -254,14 +251,14 @@ int trylockw(wrlock_t *lock) { if (!status) { lock->debug.file = file; lock->debug.line = line; - extern int current_pid(void); - lock->debug.pid = current_pid(); + extern int current_pid(current); + lock->debug.pid = current_pid(current); } #endif if(status == 0) { modify_locks_held_count_wrapper(1); //STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = current_pid(); + lock->pid = current_pid(current); strncpy(lock->comm, current_comm(), 16); } return status; diff --git a/util/sync.h b/util/sync.h index 4141c0da71..b6a192bf88 100644 --- a/util/sync.h +++ b/util/sync.h @@ -17,10 +17,6 @@ #define LOCK_DEBUG 0 // The following are in log.c. There should probably be in a log.h that gets included instead. -extern int current_pid(void); -extern int current_uid(void); -extern char* current_comm(void); -bool current_is_valid(void); unsigned locks_held_count_wrapper(void); From 203871c58d169b1ff6e0aa6bd7f6c8498159f8db Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Fri, 15 Dec 2023 10:38:15 -0800 Subject: [PATCH 15/23] o Inline most of ro_lock.c, because that makes things much faster. Who knew? Clearly not me. o Various tweaking o Still very much a WIP --- app/AppDelegate.m | 8 +- debug.h | 4 +- emu/memory.c | 11 +- emu/memory.h | 2 +- fs/inode.c | 1 - fs/poll.c | 2 +- .../xcshareddata/xcschemes/iSH.xcscheme | 36 +++- jit/jit.c | 9 +- kernel/calls.c | 1 + kernel/calls.h | 2 +- kernel/exit.c | 128 ++++++------ kernel/init.c | 2 +- kernel/mmap.c | 2 +- kernel/task.c | 59 ------ kernel/task.h | 61 +++++- util/ro_locks.c | 160 --------------- util/ro_locks.h | 183 +++++++++++++++++- util/rw_locks.c | 19 +- util/sync.c | 3 +- 19 files changed, 362 insertions(+), 331 deletions(-) diff --git a/app/AppDelegate.m b/app/AppDelegate.m index 2732991f13..99fb49a3eb 100644 --- a/app/AppDelegate.m +++ b/app/AppDelegate.m @@ -47,16 +47,16 @@ static void ios_handle_exit(struct task *task, int code) { // this is called with pids_lock as an implementation side effect, please do not cite as an example of good API design task_ref_cnt_mod(task, 1); lock(&task->general_lock, 0); - complex_lockt(&pids_lock, 0); + // complex_lockt(&pids_lock, 0); if(task->pid > MAX_PID) {// Corruption printk("ERROR: Insane PID in ios_handle_exit(%d)\n", task->pid); - unlock(&pids_lock); + // unlock(&pids_lock); // No reason to unlock the task, it has already been freed. :-( //unlock(&task->general_lock); return; } if (task->parent != NULL && task->parent->parent != NULL) { - unlock(&pids_lock); + // unlock(&pids_lock); unlock(&task->general_lock); task_ref_cnt_mod(task, 1); return; @@ -64,7 +64,7 @@ static void ios_handle_exit(struct task *task, int code) { // pid should be saved now since task would be freed pid_t pid = task->pid; - unlock(&pids_lock); + //unlock(&pids_lock); unlock(&task->general_lock); task_ref_cnt_mod(task, 1); dispatch_async(dispatch_get_main_queue(), ^{ diff --git a/debug.h b/debug.h index 6fab7296f0..97560380ad 100644 --- a/debug.h +++ b/debug.h @@ -78,8 +78,8 @@ extern int log_override; extern void (*die_handler)(const char *msg); _Noreturn void die(const char *msg, ...); -// #define STRACE(msg, ...) TRACE_(strace, msg, ##__VA_ARGS__) -#define STRACE(fmt, ...) printk(fmt, ##__VA_ARGS__) +#define STRACE(msg, ...) TRACE_(strace, msg, ##__VA_ARGS__) +// #define STRACE(fmt, ...) printk(fmt, ##__VA_ARGS__) #if defined(__i386__) || defined(__x86_64__) #define debugger __asm__("int3") diff --git a/emu/memory.c b/emu/memory.c index 19e5ea9355..06a457d144 100644 --- a/emu/memory.c +++ b/emu/memory.c @@ -42,7 +42,10 @@ void mem_init(struct mem *mem) { wrlock_init(&mem->lock); mem->reference.count = 0; mem->reference.ready_to_be_freed = false; - wrlock_init(&mem->reference.lock); + int rc = pthread_mutex_init(&mem->reference.lock, NULL); + if (rc != 0) { + // Handle error + } } void mem_destroy(struct mem *mem) { @@ -53,7 +56,7 @@ void mem_destroy(struct mem *mem) { pt_unmap_always(mem, 0, MEM_PAGES); #if ENGINE_JIT - while((mem_ref_cnt_get(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks + while((mem_ref_cnt_get(mem)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks nanosleep(&lock_pause, NULL); } jit_free(mem->mmu.jit); @@ -112,7 +115,7 @@ struct pt_entry *mem_pt(struct mem *mem, page_t page) { static void mem_pt_del(struct mem *mem, page_t page) { struct pt_entry *entry = mem_pt(mem, page); if (entry != NULL) { - while(mem_ref_cnt_get(mem)) { // Don't delete if memory is in use + while(mem_ref_cnt_get(mem) > 1) { // Don't delete if memory is in use nanosleep(&lock_pause, NULL); } entry->data = NULL; @@ -207,7 +210,7 @@ int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) { if (--data->refcount == 0) { // vdso wasn't allocated with mmap, it's just in our data segment if (data->data != vdso_data) { - while(mem_ref_cnt_get(mem)) { + while(mem_ref_cnt_get(mem) > 1) { nanosleep(&lock_pause, NULL); } int err = munmap(data->data, data->size); diff --git a/emu/memory.h b/emu/memory.h index 43d1b6c8f0..43778b7430 100644 --- a/emu/memory.h +++ b/emu/memory.h @@ -21,7 +21,7 @@ struct mem { #endif struct mmu mmu; struct { - wrlock_t lock; + pthread_mutex_t lock; int count; // If positive, don't delete yet, wait_to_delete bool ready_to_be_freed; // Should be false initially } reference; diff --git a/fs/inode.c b/fs/inode.c index 8db0cf792d..b6c64c3e6e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -8,7 +8,6 @@ lock_t inodes_lock = LOCK_INITIALIZER; #define INODES_HASH_SIZE (1 << 10) static struct list inodes_hash[INODES_HASH_SIZE]; -int current_pid(struct task *task); static struct inode_data *inode_get_data(struct mount *mount, ino_t ino) { int index = ino % INODES_HASH_SIZE; diff --git a/fs/poll.c b/fs/poll.c index 1f465e5a3a..265f4a8dbf 100644 --- a/fs/poll.c +++ b/fs/poll.c @@ -37,7 +37,7 @@ static int real_poll_update(struct real_poll *real, int fd, int types, void *dat // lock order: fd, then poll -struct poll *poll_create() { +struct poll *poll_create(void) { struct poll *poll = malloc(sizeof(struct poll)); if (poll == NULL) return ERR_PTR(_ENOMEM); diff --git a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme index 40b29a55da..a4fe874306 100644 --- a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme +++ b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme @@ -91,10 +91,8 @@ buildConfiguration = "Debug-ApplePleaseFixFB19282108" selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" - enableAddressSanitizer = "YES" enableASanStackUseAfterReturn = "YES" - disableMainThreadChecker = "YES" - disablePerformanceAntipatternChecker = "YES" + enableUBSanitizer = "YES" launchStyle = "0" useCustomWorkingDirectory = "NO" ignoresPersistentStateOnLaunch = "NO" @@ -112,6 +110,38 @@ ReferencedContainer = "container:iSH-AOK.xcodeproj"> + + + + + + + + + + + + + + 4) { // It's all a bit magic, but I think this is doing something useful. -mke - nanosleep(&lock_pause, NULL); - } - // mofify_critical_region_count(current, 1, __FILE__, __LINE__); + int tmp = task_ref_cnt_get(current, 0); + //while(task_ref_cnt_get(current, 0) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke + // nanosleep(&lock_pause, NULL); + //} jit_invalidate_range(jit, page, page + 1); - // mofify_critical_region_count(current, -1, __FILE__, __LINE__); } void jit_invalidate_all(struct jit *jit) { diff --git a/kernel/calls.c b/kernel/calls.c index d615e81db1..90a20d02b8 100644 --- a/kernel/calls.c +++ b/kernel/calls.c @@ -11,6 +11,7 @@ extern bool isGlibC; dword_t syscall_stub(void) { STRACE("syscall_stub()"); + // I should probably do a prink here. Not sure why I removed it return _ENOSYS; } dword_t syscall_stub_silent(void) { diff --git a/kernel/calls.h b/kernel/calls.h index 15eb7cd78a..e7833329a5 100644 --- a/kernel/calls.h +++ b/kernel/calls.h @@ -35,7 +35,7 @@ dword_t sys_vfork(void); ssize_t sys_execve(addr_t file, addr_t argv, addr_t envp); int do_execve(const char *file, size_t argc, const char *argv, const char *envp); dword_t sys_exit(dword_t status); -noreturn void do_exit(int status); +noreturn void do_exit(struct task *task, int status); noreturn void do_exit_group(int status); dword_t sys_exit_group(dword_t status); dword_t sys_wait4(pid_t_ pid, addr_t status_addr, dword_t options, addr_t rusage_addr); diff --git a/kernel/exit.c b/kernel/exit.c index c43a6a6b1e..a27d9e4462 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -58,27 +58,25 @@ static struct task *find_new_parent(struct task *task) { // Handles the termination of the current task. It releases resources, notifies the parent, // and re-parents any children. It ensures the task is not in a critical section and that // all locks are released before proceeding. At least in theory -noreturn void do_exit(int status) { - if(current->reference.ready_to_be_freed) { +noreturn void do_exit(struct task *task, int status) { + if(task->reference.ready_to_be_freed) { goto EXIT; } else { - //nanosleep(&lock_pause, NULL); // Stupid place holder + task->exiting = true; } - - current->exiting = true; - lock(¤t->general_lock, 0); + lock(&task->general_lock, 0); - bool signal_pending = !!(current->pending & ~current->blocked); + bool signal_pending = !!(task->pending & ~task->blocked); // has to happen before mm_release - while((task_ref_cnt_get(current, 0) > 1) || - (locks_held_count(current)) || + while((task_ref_cnt_get(task, 0) > 2) || + (locks_held_count(task)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); - signal_pending = !!(current->pending & ~current->blocked); + signal_pending = !!(task->pending & ~task->blocked); } - addr_t clear_tid = current->clear_tid; + addr_t clear_tid = task->clear_tid; if (clear_tid) { pid_t_ zero = 0; if (user_put(clear_tid, zero) == 0) @@ -88,91 +86,92 @@ noreturn void do_exit(int status) { // release all our resources do { nanosleep(&lock_pause, NULL); - signal_pending = !!(current->pending & ~current->blocked); - } while((task_ref_cnt_get(current, 0) > 1) || - (locks_held_count(current)) || + signal_pending = !!(task->pending & ~task->blocked); + int tmp = task_ref_cnt_get(task, 0); + nanosleep(&lock_pause, NULL); + } while((task_ref_cnt_get(task, 0) > 2) || + (locks_held_count(task)) || (signal_pending)); // Wait for now, task is in one or more critical complex_lockt(&pids_lock, 0); - mm_release(current->mm); - current->mm = NULL; + mm_release(task->mm); + task->mm = NULL; - signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_get(current, 0) > 1) || - (locks_held_count(current)) || + signal_pending = !!(task->pending & ~task->blocked); + while((task_ref_cnt_get(task, 0) > 2) || + (locks_held_count(task)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); - signal_pending = !!(current->pending & ~current->blocked); + signal_pending = !!(task->pending & ~task->blocked); } - fdtable_release(current->files); - current->files = NULL; + fdtable_release(task->files); + task->files = NULL; - while((task_ref_cnt_get(current, 0) > 1) || - (locks_held_count(current)) || + while((task_ref_cnt_get(task, 0) > 2) || + (locks_held_count(task)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); - signal_pending = !!(current->pending & ~current->blocked); + signal_pending = !!(task->pending & ~task->blocked); } - fs_info_release(current->fs); - current->fs = NULL; - signal_pending = !!(current->pending & ~current->blocked); + fs_info_release(task->fs); + task->fs = NULL; + signal_pending = !!(task->pending & ~task->blocked); // sighand must be released below so it can be protected by pids_lock // since it can be accessed by other threads - while((task_ref_cnt_get(current, 0) > 1) || - (locks_held_count(current)) || + while((task_ref_cnt_get(task, 0) > 2) || + (locks_held_count(task)) || (signal_pending)) { // Wait for now, task is in one or more critical// Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); - signal_pending = !!(current->pending & ~current->blocked); + signal_pending = !!(task->pending & ~task->blocked); } // save things that our parent might be interested in - current->exit_code = status; // FIXME locking + task->exit_code = status; // FIXME locking struct rusage_ rusage = rusage_get_current(); - lock(¤t->group->lock, 0); - rusage_add(¤t->group->rusage, &rusage); - struct rusage_ group_rusage = current->group->rusage; - unlock(¤t->group->lock); + lock(&task->group->lock, 0); + rusage_add(&task->group->rusage, &rusage); + struct rusage_ group_rusage = task->group->rusage; + unlock(&task->group->lock); // the actual freeing needs pids_lock - task_ref_cnt_mod(current, 1); // release the sighand - signal_pending = !!(current->pending & ~current->blocked); - while((task_ref_cnt_get(current, 0) > 2) || // We added one to the task reference count above, thus the check is 2, in case any other thread is accessing. - (locks_held_count(current)) || + signal_pending = !!(task->pending & ~task->blocked); + while((task_ref_cnt_get(task, 0) > 2) || // We added one to the task reference count above, thus the check is 2, in case any other thread is accessing. + (locks_held_count(task)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); - signal_pending = !!(current->pending & ~current->blocked); + signal_pending = !!(task->pending & ~task->blocked); } - sighand_release(current->sighand); - current->sighand = NULL; + sighand_release(task->sighand); + task->sighand = NULL; struct sigqueue *sigqueue, *sigqueue_tmp; - list_for_each_entry_safe(¤t->queue, sigqueue, sigqueue_tmp, queue) { + list_for_each_entry_safe(&task->queue, sigqueue, sigqueue_tmp, queue) { list_remove(&sigqueue->queue); free(sigqueue); } - struct task *leader = current->group->leader; + struct task *leader = task->group->leader; // reparent children - struct task *new_parent = find_new_parent(current); + struct task *new_parent = find_new_parent(task); struct task *child, *tmp; - list_for_each_entry_safe(¤t->children, child, tmp, siblings) { + list_for_each_entry_safe(&task->children, child, tmp, siblings) { child->parent = new_parent; list_remove(&child->siblings); list_add(&new_parent->children, &child->siblings); } - signal_pending = !!(current->pending & ~current->blocked); + signal_pending = !!(task->pending & ~task->blocked); - while((task_ref_cnt_get(current, 0) > 2) || - (locks_held_count(current)) || + while((task_ref_cnt_get(task, 0) > 2) || + (locks_held_count(task)) || (signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight nanosleep(&lock_pause, NULL); - signal_pending = !!(current->pending & ~current->blocked); + signal_pending = !!(task->pending & ~task->blocked); } - if (exit_tgroup(current)) { + if (exit_tgroup(task)) { // notify parent that we died struct task *parent = leader->parent; lock(&parent->general_lock, 0); @@ -184,9 +183,9 @@ noreturn void do_exit(int status) { notify(&parent->group->child_exit); struct siginfo_ info = { //mkemkemke This is interesting. Need to think about possibilities. TODO .code = SI_KERNEL_, - .child.pid = current->pid, - .child.uid = current->uid, - .child.status = current->exit_code, + .child.pid = task->pid, + .child.uid = task->uid, + .child.status = task->exit_code, .child.utime = clock_from_timeval(group_rusage.utime), .child.stime = clock_from_timeval(group_rusage.stime), }; @@ -195,19 +194,17 @@ noreturn void do_exit(int status) { } if (exit_hook != NULL) - exit_hook(current, status); + exit_hook(task, status); unlock(&parent->general_lock); } - vfork_notify(current); + vfork_notify(task); - if(current != leader) { - task_ref_cnt_mod(current, -1); - task_destroy(current, 1); + if(task != leader) { + task_destroy(task, 1); } else { - unlock(¤t->general_lock); - task_ref_cnt_mod(current, -1); + unlock(&task->general_lock); } unlock(&pids_lock); @@ -247,10 +244,11 @@ noreturn void do_exit_group(int status) { unlock(&pids_lock); unlock(&group->lock); - task_ref_cnt_mod(current, -1); + struct task *foo = current; // debugging if(current->pid <= MAX_PID) // abort if crazy. -mke - do_exit(status); + do_exit(current, status); + task_ref_cnt_mod(current, -1); unlock(&pids_lock); // Shouldn't get here pthread_exit(NULL); } @@ -276,7 +274,7 @@ static void halt_system(void) { dword_t sys_exit(dword_t status) { STRACE("exit(%d)\n", status); - do_exit(status << 8); + do_exit(current, status << 8); } dword_t sys_exit_group(dword_t status) { diff --git a/kernel/init.c b/kernel/init.c index f731d6b4b7..30e7d65da1 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -125,7 +125,7 @@ intptr_t become_first_process(void) { struct task *task = construct_task(NULL); if (IS_ERR(task)) return PTR_ERR(task); - + //task_ref_cnt_mod(task, 1); current = task; return 0; } diff --git a/kernel/mmap.c b/kernel/mmap.c index b0791a3d61..228b33a4ed 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -51,7 +51,7 @@ void mm_release(struct mm *mm) { } mem_destroy(&mm->mem); - while(task_ref_cnt_get(current, 1) > 1) { //FIXME: Should now unlock after mem_destroy + while(task_ref_cnt_get(current, 1) > 2) { //FIXME: Should now unlock after mem_destroy nanosleep(&lock_pause, NULL); } diff --git a/kernel/task.c b/kernel/task.c index 9c4d06e883..7ed0e99d89 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -441,62 +441,3 @@ void modify_locks_held_count_wrapper(int value) { // sync.h can't know about the modify_locks_held_count(current, value); return; } - -// fun little utility function -int current_pid(struct task *task) { - task_ref_cnt_mod(current, 1); - if(current != NULL) { - if (current->exiting != true) { - int tmp = current->pid; - task_ref_cnt_mod(current, -1); - return tmp; - } else { - task_ref_cnt_mod(current, -1); - return -1; - } - } - // This should never happen - task_ref_cnt_mod(current, -1); - return -1; -} - -int current_uid(void) { - task_ref_cnt_mod(current, 1); - if(current != NULL) { - if (current->exiting != true) { - int tmp = current->uid; - task_ref_cnt_mod(current, -1); - return tmp; - } else { - task_ref_cnt_mod(current, -1); - return -1; - } - } - // This should never happen - task_ref_cnt_mod(current, -1); - return -1; -} - -char * current_comm(void) { - static char comm[16]; - task_ref_cnt_mod(current, 1); - if(current != NULL) { - if(strcmp(current->comm, "")) { - strncpy(comm, current->comm, 16); - } else { - task_ref_cnt_mod(current, -1); - return ""; - } - - if (current->exiting != true) { - task_ref_cnt_mod(current, -1); - return comm; - } else { - task_ref_cnt_mod(current, -1); - return ""; - } - } - - task_ref_cnt_mod(current, -1); - return ""; -} diff --git a/kernel/task.h b/kernel/task.h index 94866f1617..a9b14d128c 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -258,8 +258,63 @@ void init_pending_queues(void); void cleanup_pending_deletions(void); bool current_is_valid(void); -int current_pid(struct task *task); -int current_uid(void); -char * current_comm(void); +// fun little utility function +static inline int current_pid(struct task *task) { + task_ref_cnt_mod(task, 10); + if(task != NULL) { + if (task->exiting != true) { + int tmp = task->pid; + task_ref_cnt_mod(task, -10); + return tmp; + } else { + task_ref_cnt_mod(task, -10); + return -1; + } + } + // This should never happen + task_ref_cnt_mod(task, -10); + return -1; +} + +static inline int current_uid(struct task *task) { + task_ref_cnt_mod(task, 1); + if(task != NULL) { + if (task->exiting != true) { + int tmp = task->uid; + task_ref_cnt_mod(task, -1); + return tmp; + } else { + task_ref_cnt_mod(task, -1); + return -1; + } + } + // This should never happen + task_ref_cnt_mod(task, -1); + return -1; +} + +static inline char * current_comm(struct task *task) { + static char comm[16]; + task_ref_cnt_mod(task, 1); + if(task != NULL) { + if(strcmp(task->comm, "")) { + strncpy(comm, task->comm, 16); + } else { + task_ref_cnt_mod(task, -1); + return ""; + } + + if (task->exiting != true) { + task_ref_cnt_mod(task, -1); + return comm; + } else { + task_ref_cnt_mod(task, -1); + return ""; + } + } + + task_ref_cnt_mod(task, -1); + return ""; +} #endif diff --git a/util/ro_locks.c b/util/ro_locks.c index 703d2a4d75..1a452078c7 100644 --- a/util/ro_locks.c +++ b/util/ro_locks.c @@ -45,163 +45,3 @@ void lock_init(lock_t *lock, char lname[16]) { lock->uid = -1; } -void unlock(lock_t *lock) { - //pid_t pid = current_pid(); - - lock->owner = zero_init(pthread_t); - pthread_mutex_unlock(&lock->m); - lock->pid = -1; // - lock->comm[0] = 0; - modify_locks_held_count_wrapper(-1); - -#if LOCK_DEBUG - assert(lock->debug.initialized); - assert(lock->debug.file && "Attempting to unlock an unlocked lock"); - lock->debug = (struct lock_debug) { .initialized = true }; -#endif - return; -} - -void atomic_l_lockf(char lname[16], int skiplog) { - if(!doEnableExtraLocking) - return; - int res = 0; - if(atomic_l_lock.pid > 0) { - if(current_pid(current) != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke - res = pthread_mutex_lock(&atomic_l_lock.m); - atomic_l_lock.pid = current_pid(current); - } else if(!skiplog) { - printk("WARNING: Odd attempt by process (%s:%d) to attain same locking lock twice. Ignoring\n", current_comm(), current_pid(current)); - res = 0; - } - } - if(!res) { - strlcpy((char *)&atomic_l_lock.comm, current_comm(), 16); - strlcpy((char *)&atomic_l_lock.lname, lname, 16); - modify_locks_held_count_wrapper(1); - } else if (!skiplog) { - printk("Error on locking lock (%s) Called from %s:%d\n", lname); - } -} - -void mylock(lock_t *lock, int log_lock) { - if(!strcmp(lock->lname, "task_creat_gen")) // kluge. This means the lock is new, and SHOULD be unlocked - unlock(lock); - - if(!log_lock) { - // task_ref_cnt_mod_wrapper(1); - pthread_mutex_lock(&lock->m); - modify_locks_held_count_wrapper(1); - lock->owner = pthread_self(); - lock->pid = current_pid(current); - lock->uid = current_uid(); - strlcpy(lock->comm, current_comm(), 16); - // task_ref_cnt_mod_wrapper(-1); - } else { - pthread_mutex_lock(&lock->m); - lock->owner = pthread_self(); - lock->pid = current_pid(current); - lock->uid = current_uid(); - strncpy(lock->comm, current_comm(), 16); - } - return; -} - -void atomic_l_unlockf(void) { - if(!doEnableExtraLocking) - return; - int res = 0; - strncpy((char *)&atomic_l_lock.lname,"\0", 1); - res = pthread_mutex_unlock(&atomic_l_lock.m); - if(res) { - printk("ERROR: unlocking locking lock\n"); - } else { - atomic_l_lock.pid = -1; // Reset - } - - modify_locks_held_count_wrapper(-1); -} - -void complex_lockt(lock_t *lock, int log_lock) { - if (lock->pid == current_pid(current)) - return; - - unsigned int count = 0; - int random_wait = WAIT_SLEEP + rand() % WAIT_SLEEP; - struct timespec lock_pause = {0, random_wait}; - long count_max = (WAIT_MAX_UPPER - random_wait); - - while (pthread_mutex_trylock(&lock->m)) { - count++; - if (nanosleep(&lock_pause, NULL) == -1) { - // Handle error - } - if (count > count_max) { - if (!log_lock) { - printk("ERROR: Possible deadlock, aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d)\n", - current_pid(current), current_comm(), lock->comm, lock->pid); - pthread_mutex_unlock(&lock->m); - modify_locks_held_count_wrapper(-1); - } - return; - } - } - - modify_locks_held_count_wrapper(1); - - if (count > count_max * 0.90) { - if (!log_lock) - printk("Warning: large lock attempt count (%d), aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d) \n", - count, current_pid(current), current_comm(), lock->comm, lock->pid); - } - - lock->owner = pthread_self(); - lock->pid = current_pid(current); - lock->uid = current_uid(); - strncpy(lock->comm, current_comm(), sizeof(lock->comm) - 1); - lock->comm[sizeof(lock->comm) - 1] = '\0'; // Null-terminate just in case -} - -int trylock(lock_t *lock) { - atomic_l_lockf("trylock\0", 0); - int status = pthread_mutex_trylock(&lock->m); - atomic_l_unlockf(); -#if LOCK_DEBUG - if (!status) { - lock->debug.file = file; - lock->debug.line = line; - extern int current_pid(struct task *task); - lock->debug.pid = current_pid(current); - } -#endif - if((!status) && (current_pid(current) > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke - modify_locks_held_count_wrapper(1); - - //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = current_pid(current); - strncpy(lock->comm, current_comm(), 16); - } - return status; -} - -int trylocknl(lock_t *lock, char *comm, int pid) { - //Don't log, avoid recursion - int status = pthread_mutex_trylock(&lock->m); -#if LOCK_DEBUG - if (!status) { - lock->debug.file = file; - lock->debug.line = line; - extern int current_pid(current); - lock->debug.pid = current_pid(current); - } -#endif - if(!status) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke - modify_locks_held_count_wrapper(1); - - //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = pid; - strncpy(lock->comm, comm, 16); - } - return status; -} - diff --git a/util/ro_locks.h b/util/ro_locks.h index 2f762d2670..f94b26e282 100644 --- a/util/ro_locks.h +++ b/util/ro_locks.h @@ -7,9 +7,16 @@ #ifndef RO_LOCKS_H #define RO_LOCKS_H -#include -#include -#include +#include +#include "misc.h" +#include "debug.h" +#include "kernel/errno.h" +#include "kernel/log.h" +#include "pthread.h" +//#include "util/sync.h" + +extern void modify_locks_held_count_wrapper(int value); +extern void task_ref_cnt_mod_wrapper(int value); typedef struct { pthread_mutex_t m; @@ -34,14 +41,170 @@ typedef struct { #endif } lock_t; +extern lock_t atomic_l_lock; // Used to make all lock operations atomic, even read->write and right->read -mke +extern bool doEnableExtraLocking; + void lock_init(lock_t *lock, char lname[16]); -void unlock(lock_t *lock); -void atomic_l_lockf(char lname[16], int skiplog); -void mylock(lock_t *lock, int log_lock); -void atomic_l_unlockf(void); -void complex_lockt(lock_t *lock, int log_lock); -int trylock(lock_t *lock); -int trylocknl(lock_t *lock, char *comm, int pid); + +static inline void unlock(lock_t *lock) { + //pid_t pid = current_pid(); + + lock->owner = zero_init(pthread_t); + lock->pid = -1; // + lock->comm[0] = 0; + modify_locks_held_count_wrapper(-1); + pthread_mutex_unlock(&lock->m); + +#if LOCK_DEBUG + assert(lock->debug.initialized); + assert(lock->debug.file && "Attempting to unlock an unlocked lock"); + lock->debug = (struct lock_debug) { .initialized = true }; +#endif + return; +} + +static inline void atomic_l_lockf(char lname[16], int skiplog) { + if(!doEnableExtraLocking) + return; + + int res = 0; + /* if(atomic_l_lock.pid > 0) { + if(current_pid(current) != atomic_l_lock.pid) { // Potential deadlock situation. Also weird. --mke */ + res = pthread_mutex_lock(&atomic_l_lock.m); + /* atomic_l_lock.pid = current_pid(current); + } else if(!skiplog) { + printk("WARNING: Odd attempt by process (%s:%d) to attain same locking lock twice. Ignoring\n", current_comm(current), current_pid(current)); + res = 0; + } + } */ + if(!res) { + // strlcpy((char *)&atomic_l_lock.comm, current_comm(current), 16); + strlcpy((char *)&atomic_l_lock.lname, lname, 16); + modify_locks_held_count_wrapper(1); + } else if (!skiplog) { + printk("Error on locking lock (%s) Called from %s:%d\n", lname); + } +} + +static inline void mylock(lock_t *lock, int log_lock) { + // struct task *foo = current; // Debugging + if(!strcmp(lock->lname, "task_creat_gen")) // kluge. This means the lock is new, and SHOULD be unlocked + unlock(lock); + task_ref_cnt_mod_wrapper(1); + pthread_mutex_lock(&lock->m); + if(!log_lock) { + modify_locks_held_count_wrapper(1); + } + lock->owner = pthread_self(); + //lock->pid = current_pid(current); + //lock->uid = current_uid(current); + /* if(!log_lock) { + strlcpy(lock->comm, current_comm(current), 16); + } else { + strncpy(lock->comm, current_comm(current), 16); + } */ + task_ref_cnt_mod_wrapper(-1); + return; +} + +static inline void atomic_l_unlockf(void) { + if(!doEnableExtraLocking) + return; + int res = 0; + strncpy((char *)&atomic_l_lock.lname,"\0", 1); + res = pthread_mutex_unlock(&atomic_l_lock.m); + if(res) { + printk("ERROR: unlocking locking lock\n"); + } else { + atomic_l_lock.pid = -1; // Reset + } + + modify_locks_held_count_wrapper(-1); +} + +static inline void complex_lockt(lock_t *lock, int log_lock) { + //if (lock->pid == pid) + // return; + + unsigned int count = 0; + int random_wait = WAIT_SLEEP + rand() % WAIT_SLEEP; + struct timespec lock_pause = {0, random_wait}; + long count_max = (WAIT_MAX_UPPER - random_wait); + + while (pthread_mutex_trylock(&lock->m)) { + count++; + if (nanosleep(&lock_pause, NULL) == -1) { + // Handle error + } + if (count > count_max) { + if (!log_lock) { + // printk("ERROR: Possible deadlock, aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d)\n", + // current_pid(current), current_comm(current), lock->comm, lock->pid); + pthread_mutex_unlock(&lock->m); + modify_locks_held_count_wrapper(-1); + } + return; + } + } + + modify_locks_held_count_wrapper(1); + + /* if (count > count_max * 0.90) { + if (!log_lock) + printk("Warning: large lock attempt count (%d), aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d) \n", + count, current_pid(current), current_comm(current), lock->comm, lock->pid); + } */ + + lock->owner = pthread_self(); + //lock->pid = current_pid(current); + //lock->uid = current_uid(current); + // strncpy(lock->comm, current_comm(current), sizeof(lock->comm) - 1); + lock->comm[sizeof(lock->comm) - 1] = '\0'; // Null-terminate just in case +} + +static inline int trylock(lock_t *lock) { + atomic_l_lockf("trylock\0", 0); + int status = pthread_mutex_trylock(&lock->m); + atomic_l_unlockf(); +#if LOCK_DEBUG + if (!status) { + lock->debug.file = file; + lock->debug.line = line; + extern int current_pid(struct task *task); + lock->debug.pid = current_pid(current); + } +#endif + // if((!status) && (current_pid(current) > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke + modify_locks_held_count_wrapper(1); + + //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + // lock->pid = current_pid(current); + //strncpy(lock->comm, current_comm(current), 16); + // } + return status; +} + +static inline int trylocknl(lock_t *lock, char *comm, int pid) { + //Don't log, avoid recursion + int status = pthread_mutex_trylock(&lock->m); +#if LOCK_DEBUG + if (!status) { + lock->debug.file = file; + lock->debug.line = line; + extern int current_pid(current); + lock->debug.pid = current_pid(current); + } +#endif + if(!status) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke + modify_locks_held_count_wrapper(1); + + //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + lock->pid = pid; + strncpy(lock->comm, comm, 16); + } + return status; +} + #define lock(lock, log_lock) mylock(lock, log_lock) //#define trylock(lock) trylock(lock, __FILE__, __LINE__) diff --git a/util/rw_locks.c b/util/rw_locks.c index 42c2526577..839af56d55 100644 --- a/util/rw_locks.c +++ b/util/rw_locks.c @@ -66,7 +66,7 @@ void _read_lock(wrlock_t *lock) { lock->pid = current_pid(current); if(lock->pid > 9) - strncpy((char *)lock->comm, current_comm(), 16); + strncpy((char *)lock->comm, current_comm(current), 16); task_ref_cnt_mod(current, -1); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -79,7 +79,7 @@ void read_lock(wrlock_t *lock) { // Wrapper so that external calls lock, interna void _read_unlock(wrlock_t *lock) { if(lock->val <= 0) { - printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(current), current_comm(), lock->val); + printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(current), current_comm(current), lock->val); lock->val = 0; lock->pid = -1; lock->comm[0] = 0; @@ -89,7 +89,7 @@ void _read_unlock(wrlock_t *lock) { } assert(lock->val > 0); if (pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(current), current_comm()); + printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(current), current_comm(current)); lock->val--; modify_locks_held_count_wrapper(-1); //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); @@ -109,9 +109,9 @@ void read_unlock(wrlock_t *lock) { void _write_unlock(wrlock_t *lock) { if(pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(current), current_comm()); + printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(current), current_comm(current)); if(lock->val != -1) { - printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(current), current_comm()); + printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(current), current_comm(current)); } //assert(lock->val == -1); lock->val = lock->line = lock->pid = 0; @@ -139,7 +139,7 @@ void wrlock_init(wrlock_t *lock) { #endif #ifdef JUSTLOG if (pthread_rwlock_init(&lock->l, pattr)) - printk("URGENT: wrlock_init() error(PID: %d Process: %s)\n",current_pid(current), current_comm()); + printk("URGENT: wrlock_init() error(PID: %d Process: %s)\n",current_pid(current), current_comm(current)); #else if (pthread_rwlock_init(&lock->l, pattr)) __builtin_trap(); #endif @@ -148,9 +148,10 @@ void wrlock_init(wrlock_t *lock) { } void _lock_destroy(wrlock_t *lock) { + int tmp = current->reference.count; #ifdef JUSTLOG if (pthread_rwlock_destroy(&lock->l) != 0) { - printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(current), current_comm(),task_ref_cnt_get(current, 0)); + printk("URGENT: lock_destroy(%x) on active lock. (PID: %d Process: %s Critical Region Count: %d)\n",&lock->l, current_pid(current), current_comm(current),task_ref_cnt_get(current, 0)); } #else if (pthread_rwlock_destroy(&lock->l) != 0) __builtin_trap(); @@ -176,7 +177,7 @@ void _write_lock(wrlock_t *lock) { // Write lock // lock->line = line; lock->pid = current_pid(current); if(lock->pid > 9) - strncpy((char *)lock->comm, current_comm(), 16); + strncpy((char *)lock->comm, current_comm(current), 16); //STRACE("write_lock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -259,7 +260,7 @@ int trylockw(wrlock_t *lock) { modify_locks_held_count_wrapper(1); //STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); lock->pid = current_pid(current); - strncpy(lock->comm, current_comm(), 16); + strncpy(lock->comm, current_comm(current), 16); } return status; } diff --git a/util/sync.c b/util/sync.c index d9e51f0390..01f49acfd8 100644 --- a/util/sync.c +++ b/util/sync.c @@ -67,10 +67,11 @@ int wait_for_ignore_signals(cond_t *cond, lock_t *lock, struct timespec *timeout if(current->uid == 501) { // This is here for testing of the process lockup issue. -mke rc = pthread_cond_timedwait_relative_np(&cond->cond, &lock->m, &trigger_time); - //if((rc == ETIMEDOUT) && current->parent != NULL) { + // if((rc == ETIMEDOUT) && current->parent != NULL) { if(rc == ETIMEDOUT) { if(current->children.next != NULL) { notify(cond); // This is a terrible hack that seems to avoid processes getting stuck. + // return 0; } } From f6f3cf6d3c23ff2f84c07af879f5be503fc5f802 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sat, 16 Dec 2023 03:19:44 -0800 Subject: [PATCH 16/23] o Made most of the rw locking static inline to improve performance. Reliability on parallel compile test has never been this good before o Still a WIP though --- util/ro_locks.h | 1 - util/rw_locks.c | 209 ------------------------------------------ util/rw_locks.h | 235 ++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 219 insertions(+), 226 deletions(-) diff --git a/util/ro_locks.h b/util/ro_locks.h index f94b26e282..f086fa6fcf 100644 --- a/util/ro_locks.h +++ b/util/ro_locks.h @@ -13,7 +13,6 @@ #include "kernel/errno.h" #include "kernel/log.h" #include "pthread.h" -//#include "util/sync.h" extern void modify_locks_held_count_wrapper(int value); extern void task_ref_cnt_mod_wrapper(int value); diff --git a/util/rw_locks.c b/util/rw_locks.c index 839af56d55..7b56c2c951 100644 --- a/util/rw_locks.c +++ b/util/rw_locks.c @@ -16,118 +16,6 @@ bool current_is_valid(void); // on darwin pthread_rwlock_t is already like this, on linux you can configure // it to prefer writers. not worrying about anything else right now. -void loop_lock_generic(wrlock_t *lock, int is_write) { - task_ref_cnt_mod(current, 1); - modify_locks_held_count_wrapper(1); - - unsigned count = 0; - int random_wait = is_write ? WAIT_SLEEP + rand() % 100 : WAIT_SLEEP + rand() % WAIT_SLEEP/4; - struct timespec lock_pause = {0, random_wait}; - long count_max = (WAIT_MAX_UPPER - random_wait); - count_max = (is_write && count_max < 25000) ? 25000 : count_max; - - while((is_write ? pthread_rwlock_trywrlock(&lock->l) : pthread_rwlock_tryrdlock(&lock->l))) { - count++; - if(count > count_max) { - handle_lock_error(lock, is_write ? "loop_lock_write" : "loop_lock_read"); - count = 0; - } - atomic_l_unlockf(); - nanosleep(&lock_pause, NULL); - atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0); - } - - task_ref_cnt_mod(current, -1); -} - - - -void _read_lock(wrlock_t *lock) { - task_ref_cnt_mod(current, 1); - loop_lock_read(lock); - //pthread_rwlock_rdlock(&lock->l); - // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke - if(lock->val) { - lock->val++; - } else if (lock->val > -1){ // Deal with insanity. -mke - lock->val++; - } else { - printk("ERROR: _read_lock() val is %d\n", lock->val); - lock->val++; - } - - if(lock->val > 1000) { // We likely have a problem. - printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); - read_unlock_and_destroy(lock); - task_ref_cnt_mod(current, -1); - //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - return; - } - - lock->pid = current_pid(current); - if(lock->pid > 9) - strncpy((char *)lock->comm, current_comm(current), 16); - task_ref_cnt_mod(current, -1); - //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); -} - -void read_lock(wrlock_t *lock) { // Wrapper so that external calls lock, internal calls using _read_unlock() don't -mke - atomic_l_lockf("r_lock\0", 0); - _read_lock(lock); - atomic_l_unlockf(); -} - -void _read_unlock(wrlock_t *lock) { - if(lock->val <= 0) { - printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(current), current_comm(current), lock->val); - lock->val = 0; - lock->pid = -1; - lock->comm[0] = 0; - modify_locks_held_count_wrapper(-1); - //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - return; - } - assert(lock->val > 0); - if (pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(current), current_comm(current)); - lock->val--; - modify_locks_held_count_wrapper(-1); - //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); -} - -void read_unlock(wrlock_t *lock) { - if(lock->pid != current_pid(current) && (lock->pid != -1)) { - atomic_l_lockf("r_unlock\0", 0); - _read_unlock(lock); - } else { // We can unlock our own lock without additional locking. -mke - _read_unlock(lock); - return; - } - if(lock->pid != current_pid(current) && (lock->pid != -1)) - atomic_l_unlockf(); -} - -void _write_unlock(wrlock_t *lock) { - if(pthread_rwlock_unlock(&lock->l) != 0) - printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(current), current_comm(current)); - if(lock->val != -1) { - printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(current), current_comm(current)); - } - //assert(lock->val == -1); - lock->val = lock->line = lock->pid = 0; - lock->pid = -1; - lock->comm[0] = 0; - //STRACE("write_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->file = NULL; - modify_locks_held_count_wrapper(-1); -} - -void write_unlock(wrlock_t *lock) { // Wrap it. External calls lock, internal calls using _write_unlock() don't -mke - atomic_l_lockf("w_unlock\0", 0); - _write_unlock(lock); - atomic_l_unlockf(); - return; -} void wrlock_init(wrlock_t *lock) { pthread_rwlockattr_t *pattr = NULL; @@ -168,102 +56,5 @@ void lock_destroy(wrlock_t *lock) { atomic_l_unlockf(); } -void _write_lock(wrlock_t *lock) { // Write lock - loop_lock_write(lock); - - // assert(lock->val == 0); - lock->val = -1; - // lock->file = file; - // lock->line = line; - lock->pid = current_pid(current); - if(lock->pid > 9) - strncpy((char *)lock->comm, current_comm(current), 16); - //STRACE("write_lock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); -} - -void write_lock(wrlock_t *lock) { - atomic_l_lockf("_w_lock", 0); - _write_lock(lock); - atomic_l_unlockf(); -} - -void handle_lock_error(wrlock_t *lock, const char *func) { - printk("ERROR: %s(%x) tries exceeded %d, dealing with likely deadlock. (Lock held by PID: %d Process: %s) \n", - func, lock, WAIT_MAX_UPPER, lock->pid, lock->comm); - - if(pid_get((dword_t)lock->pid) == NULL) { - printk("ERROR: %s(%x) locking PID(%d) is gone for task %s\n", func, lock, lock->pid, lock->comm); - pthread_rwlock_unlock(&lock->l); - } else { - printk("ERROR: %s(%x) locking PID(%d), %s is apparently wedged\n", func, lock, lock->pid, lock->comm); - pthread_rwlock_unlock(&lock->l); - } - - if(lock->val > 1) { - lock->val--; - } else if(lock->val == 1) { - _read_unlock(lock); - } else if(lock->val < 0) { - _write_unlock(lock); - } -} - -void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke - task_ref_cnt_mod(current, 1); - atomic_l_lockf("rtw_lock\0", 0); - _read_unlock(lock); - _write_lock(lock); - atomic_l_unlockf(); - task_ref_cnt_mod(current, -1); -} - -void write_to_read_lock(wrlock_t *lock) { // Try to atomically swap a Write lock to a RO lock. -mke - task_ref_cnt_mod(current, 1); - atomic_l_lockf("wtr_lock\0", 0); - _write_unlock(lock); - _read_lock(lock); - atomic_l_unlockf(); - task_ref_cnt_mod(current, -1); -} - -void write_unlock_and_destroy(wrlock_t *lock) { - task_ref_cnt_mod(current, 1); - atomic_l_lockf("wuad_lock\0", 0); - _write_unlock(lock); - _lock_destroy(lock); - atomic_l_unlockf(); - task_ref_cnt_mod(current, -1); -} - -void read_unlock_and_destroy(wrlock_t *lock) { - atomic_l_lockf("ruad_lock", 0); - if(trylockw(lock)) // It should be locked, but just in case. Likely masking underlying issue. -mke - _read_unlock(lock); - - _lock_destroy(lock); - atomic_l_unlockf(); -} - -int trylockw(wrlock_t *lock) { - atomic_l_lockf("trylockw\0", 0); - int status = pthread_rwlock_trywrlock(&lock->l); - atomic_l_unlockf(); -#if LOCK_DEBUG - if (!status) { - lock->debug.file = file; - lock->debug.line = line; - extern int current_pid(current); - lock->debug.pid = current_pid(current); - } -#endif - if(status == 0) { - modify_locks_held_count_wrapper(1); - //STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); - lock->pid = current_pid(current); - strncpy(lock->comm, current_comm(current), 16); - } - return status; -} - //#define trylockw(lock) trylockw(lock, __FILE__, __LINE__) diff --git a/util/rw_locks.h b/util/rw_locks.h index ca632bbd4b..b7f8658ffb 100644 --- a/util/rw_locks.h +++ b/util/rw_locks.h @@ -4,13 +4,24 @@ // // Created by Michael Miller on 11/29/23. // - #ifndef RW_LOCK_H #define RW_LOCK_H +#include +#include "misc.h" +#include "debug.h" +#include "kernel/errno.h" +#include "kernel/log.h" +#include "pthread.h" #include #include +extern void modify_locks_held_count_wrapper(int value); +extern void task_ref_cnt_mod_wrapper(int value); + +#define loop_lock_read(lock) loop_lock_generic(lock, 0) +#define loop_lock_write(lock) loop_lock_generic(lock, 1) + typedef struct { pthread_rwlock_t l; atomic_int val; @@ -20,24 +31,216 @@ typedef struct { int pid; char comm[16]; char lname[16]; + struct { + pthread_mutex_t lock; + int count; // If positive, don't delete yet, wait_to_delete + bool ready_to_be_freed; // Should be false initially + } reference; } wrlock_t; void wrlock_init(wrlock_t *lock); -void read_lock(wrlock_t *lock); -void write_lock(wrlock_t *lock); -void read_unlock(wrlock_t *lock); -void write_unlock(wrlock_t *lock); -void read_to_write_lock(wrlock_t *lock); -void write_to_read_lock(wrlock_t *lock); -void write_unlock_and_destroy(wrlock_t *lock); -void read_unlock_and_destroy(wrlock_t *lock); -void read_to_write_lock(wrlock_t *lock); -void read_unlock_and_destroy(wrlock_t *lock); -// void lock_destroy(wrlock_t *lock); // Not used outside of rw_locks.c, no need to be exposed -void handle_lock_error(wrlock_t *lock, const char *func); -int trylockw(wrlock_t *lock); +static inline void read_unlock_and_destroy(wrlock_t *lock); +static inline int trylockw(wrlock_t *lock); -#define loop_lock_read(lock) loop_lock_generic(lock, 0) -#define loop_lock_write(lock) loop_lock_generic(lock, 1) +extern void _lock_destroy(wrlock_t *lock); + +static inline void _read_unlock(wrlock_t *lock) { + if(lock->val <= 0) { + //printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(current), current_comm(current), lock->val); + printk("ERROR: read_unlock(%x) error(val: %d)\n",lock, lock->val); + lock->val = 0; + lock->pid = -1; + lock->comm[0] = 0; + //modify_locks_held_count_wrapper(-1); + //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + return; + } + assert(lock->val > 0); + if (pthread_rwlock_unlock(&lock->l) != 0) +// printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(current), current_comm(current)); + printk("URGENT: read_unlock(%x) failed\n", lock); + lock->val--; + //modify_locks_held_count_wrapper(-1); + //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); +} + +static inline void read_unlock(wrlock_t *lock) { + /* if(lock->pid != current_pid(current) && (lock->pid != -1)) { + atomic_l_lockf("r_unlock\0", 0); + _read_unlock(lock); + } else { */ // We can unlock our own lock without additional locking. -mke + _read_unlock(lock); + return; + //} + //if(lock->pid != current_pid(current) && (lock->pid != -1)) + // atomic_l_unlockf(); +} + +static inline void _write_unlock(wrlock_t *lock) { + if(pthread_rwlock_unlock(&lock->l) != 0) + // printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(current), current_comm(current)); + printk("URGENT: write_unlock(%x:%d) error on unlock\n", lock, lock->val); + if(lock->val != -1) { + //printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(current), current_comm(current)); + printk("ERROR: write_unlock(%x) on lock with val of %d\n", lock, lock->val); + } + //assert(lock->val == -1); + lock->val = lock->line = lock->pid = 0; + lock->pid = -1; + lock->comm[0] = 0; + //STRACE("write_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + lock->file = NULL; + //modify_locks_held_count_wrapper(-1); +} + +static inline void write_unlock(wrlock_t *lock) { // Wrap it. External calls lock, internal calls using _write_unlock() don't -mke + atomic_l_lockf("w_unlock\0", 0); + _write_unlock(lock); + atomic_l_unlockf(); + return; +} + +static inline void loop_lock_generic(wrlock_t *lock, int is_write) { + task_ref_cnt_mod_wrapper(1); + //modify_locks_held_count_wrapper(1); + + unsigned count = 0; + int random_wait = is_write ? WAIT_SLEEP + rand() % 100 : WAIT_SLEEP + rand() % WAIT_SLEEP/4; + struct timespec lock_pause = {0, random_wait}; + long count_max = (WAIT_MAX_UPPER - random_wait); + count_max = (is_write && count_max < 25000) ? 25000 : count_max; + + while((is_write ? pthread_rwlock_trywrlock(&lock->l) : pthread_rwlock_tryrdlock(&lock->l))) { + count++; + if(count > count_max) { + if(lock->val > 1) { + lock->val--; + } else if(lock->val == 1) { + _read_unlock(lock); + } else if(lock->val < 0) { + _write_unlock(lock); + } + count = 0; + } + atomic_l_unlockf(); + nanosleep(&lock_pause, NULL); + atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0); + } + + task_ref_cnt_mod_wrapper(-1); +} + +static inline void _read_lock(wrlock_t *lock) { + task_ref_cnt_mod_wrapper(1); + loop_lock_read(lock); + //pthread_rwlock_rdlock(&lock->l); + // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke + if(lock->val) { + lock->val++; + } else if (lock->val > -1){ // Deal with insanity. -mke + lock->val++; + } else { + printk("ERROR: _read_lock() val is %d\n", lock->val); + lock->val++; + } + + if(lock->val > 1000) { // We likely have a problem. + printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); + read_unlock_and_destroy(lock); + task_ref_cnt_mod_wrapper(-1); + //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + return; + } + + /* lock->pid = current_pid(current); + if(lock->pid > 9) + strncpy((char *)lock->comm, current_comm(current), 16); */ + task_ref_cnt_mod_wrapper(-1); + //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); +} + +static inline void read_lock(wrlock_t *lock) { // Wrapper so that external calls lock, internal calls using _read_unlock() don't -mke + atomic_l_lockf("r_lock\0", 0); + _read_lock(lock); + atomic_l_unlockf(); +} + + +static inline void _write_lock(wrlock_t *lock) { // Write lock + loop_lock_write(lock); + + // assert(lock->val == 0); + lock->val = -1; + // lock->file = file; + // lock->line = line; + /* lock->pid = current_pid(current); + if(lock->pid > 9) + strncpy((char *)lock->comm, current_comm(current), 16); */ + //STRACE("write_lock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); +} + +static inline void write_lock(wrlock_t *lock) { + atomic_l_lockf("_w_lock", 0); + _write_lock(lock); + atomic_l_unlockf(); +} + + +static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke + task_ref_cnt_mod_wrapper(1); + atomic_l_lockf("rtw_lock\0", 0); + _read_unlock(lock); + _write_lock(lock); + atomic_l_unlockf(); + task_ref_cnt_mod_wrapper(-1); +} + +static inline void write_to_read_lock(wrlock_t *lock) { // Try to atomically swap a Write lock to a RO lock. -mke + task_ref_cnt_mod_wrapper(1); + atomic_l_lockf("wtr_lock\0", 0); + _write_unlock(lock); + _read_lock(lock); + atomic_l_unlockf(); + task_ref_cnt_mod_wrapper(-1); +} + +static inline void write_unlock_and_destroy(wrlock_t *lock) { + task_ref_cnt_mod_wrapper(1); + atomic_l_lockf("wuad_lock\0", 0); + _write_unlock(lock); + _lock_destroy(lock); + atomic_l_unlockf(); + task_ref_cnt_mod_wrapper(-1); +} + +static inline void read_unlock_and_destroy(wrlock_t *lock) { + atomic_l_lockf("ruad_lock", 0); + if(trylockw(lock)) // It should be locked, but just in case. Likely masking underlying issue. -mke + _read_unlock(lock); + + _lock_destroy(lock); + atomic_l_unlockf(); +} + +static inline int trylockw(wrlock_t *lock) { + atomic_l_lockf("trylockw\0", 0); + int status = pthread_rwlock_trywrlock(&lock->l); + atomic_l_unlockf(); +#if LOCK_DEBUG + if (!status) { + lock->debug.file = file; + lock->debug.line = line; + extern int current_pid(current); + lock->debug.pid = current_pid(current); + } +#endif + if(status == 0) { + //modify_locks_held_count_wrapper(1); + //STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); + //lock->pid = current_pid(current); + //strncpy(lock->comm, current_comm(current), 16); + } + return status; +} #endif // RW_LOCK_H From 08c8b844d9f911b2b74285ba295072d3a125bacc Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sat, 16 Dec 2023 16:03:48 -0800 Subject: [PATCH 17/23] o Mostly converting some stuff in task.c and sync.c to static inline. Possibly a small performance improvement o Still very unstable --- kernel/resource_locking.h | 10 ---- kernel/task.c | 90 ++---------------------------------- kernel/task.h | 96 ++++++++++++++++++++++++++++++++++++--- util/rw_locks.h | 4 +- util/sync.c | 5 -- 5 files changed, 96 insertions(+), 109 deletions(-) diff --git a/kernel/resource_locking.h b/kernel/resource_locking.h index 86ea394fc8..dfc332dade 100644 --- a/kernel/resource_locking.h +++ b/kernel/resource_locking.h @@ -2,13 +2,3 @@ // Because sometimes we can't #include "kernel/task.h" -mke // Deprecated - -/* void task_ref_cnt_mod(struct task *task, int value); -void task_ref_cnt_mod_wrapper(int); -int task_ref_cnt_get(struct task *task); -void mem_ref_cnt_mod(struct mem*, int, char*, int); -int mem_ref_cnt_val_get(struct mem *mem); -unsigned locks_held_count(struct task*); -void modify_locks_held_count(struct task*, int); -bool current_is_valid(void); */ - diff --git a/kernel/task.c b/kernel/task.c index 7ed0e99d89..c873ce99d1 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -337,45 +337,6 @@ void update_thread_name(void) { #endif } -void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke - // Keep track of how many threads are referencing this task - if(!doEnableExtraLocking) { // If they want to fly by the seat of their pants... -mke - return; - } - - if(task == NULL) { - if(current != NULL) { - task = current; - } else { - return; - } - } - - bool ilocked = false; - - if (trylocknl(&task->general_lock, task->comm, task->pid) != _EBUSY) { - ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. - } - - pthread_mutex_lock(&task->reference.lock); - - if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke - printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d)\n", task->comm, task->pid, task->reference.count, value); - if(ilocked == true) - unlock(&task->general_lock); - - pthread_mutex_unlock(&task->reference.lock); - - return; - } - - task->reference.count = task->reference.count + value; - - pthread_mutex_unlock(&task->reference.lock); - - if(ilocked == true) - unlock(&task->general_lock); -} void task_ref_cnt_mod_wrapper(int value) { // sync.h can't know about the definition of task struct due to recursive include files. -mke @@ -385,33 +346,10 @@ void task_ref_cnt_mod_wrapper(int value) { return; } -void modify_locks_held_count(struct task *task, int value) { // value Should only be -1 or 1. -mke - if((task == NULL) && (current != NULL)) { - task = current; - } else { - return; - } - - pthread_mutex_lock(&task->locks_held.lock); - if((task->locks_held.count + value < 0) && task->pid > 9) { - // if((task->pid > 2) && (!strcmp(task->comm, "init"))) // Why ask why? -mke - printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); - return; - } - task->locks_held.count = task->locks_held.count + value; - pthread_mutex_unlock(&task->locks_held.lock); -} - -// -unsigned task_ref_cnt_get(struct task *task, unsigned lock_if_zero) { - unsigned tmp = 0; - pthread_mutex_lock(&task->reference.lock); // This would make more - tmp = task->reference.count; - if(tmp > 1000) // Work around brain damage. Remove when said brain damage is fixed - tmp = 0; - pthread_mutex_unlock(&task->reference.lock); - - return tmp; +void modify_locks_held_count_wrapper(int value) { // sync.h can't know about the definition of struct due to recursive include files. -mke + if(current != NULL) + modify_locks_held_count(current, value); + return; } bool current_is_valid(void) { @@ -421,23 +359,3 @@ bool current_is_valid(void) { return false; } -unsigned locks_held_count(struct task *task) { - // return 0; // Short circuit for now - if(task->pid < 10) // Here be monsters. -mke - return 0; - if(task->locks_held.count > 0) { - return(task->locks_held.count -1); - } - unsigned tmp = 0; - pthread_mutex_lock(&task->locks_held.lock); - tmp = task->locks_held.count; - pthread_mutex_unlock(&task->locks_held.lock); - - return tmp; -} - -void modify_locks_held_count_wrapper(int value) { // sync.h can't know about the definition of struct due to recursive include files. -mke - if(current != NULL) - modify_locks_held_count(current, value); - return; -} diff --git a/kernel/task.h b/kernel/task.h index a9b14d128c..1484282e62 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -12,7 +12,9 @@ #include "util/timer.h" #include "util/sync.h" -extern void task_ref_cnt_mod_wrapper(int value); +// extern void task_ref_cnt_mod_wrapper(int value); + +void task_ref_cnt_mod_wrapper(int value); // Define a structure for the pending deletion queue struct task_pending_deletion { @@ -248,15 +250,97 @@ __attribute__((always_inline)) inline int task_may_block_end(void) { #define TASK_MAY_BLOCK for (int i = task_may_block_start(); i < 1; task_may_block_end(), i++) -void task_ref_cnt_mod(struct task *task, int value); -void task_ref_cnt_mod_wrapper(int value); -unsigned task_ref_cnt_get(struct task *task, unsigned lock_if_zero); -void modify_locks_held_count(struct task *task, int value); void modify_locks_held_count_wrapper(int value); -unsigned locks_held_count(struct task *task); void init_pending_queues(void); void cleanup_pending_deletions(void); +static inline void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke + // Keep track of how many threads are referencing this task + if(!doEnableExtraLocking) { // If they want to fly by the seat of their pants... -mke + return; + } + + if(task == NULL) { + if(current != NULL) { + task = current; + } else { + return; + } + } + + bool ilocked = false; + + if (trylocknl(&task->general_lock, task->comm, task->pid) != _EBUSY) { + ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. + } + + pthread_mutex_lock(&task->reference.lock); + + if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke + printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d)\n", task->comm, task->pid, task->reference.count, value); + if(ilocked == true) + unlock(&task->general_lock); + + pthread_mutex_unlock(&task->reference.lock); + + return; + } + + task->reference.count = task->reference.count + value; + + pthread_mutex_unlock(&task->reference.lock); + + if(ilocked == true) + unlock(&task->general_lock); +} + + +static inline void modify_locks_held_count(struct task *task, int value) { // value Should only be -1 or 1. -mke + if((task == NULL) && (current != NULL)) { + task = current; + } else { + return; + } + + pthread_mutex_lock(&task->locks_held.lock); + if((task->locks_held.count + value < 0) && task->pid > 9) { + // if((task->pid > 2) && (!strcmp(task->comm, "init"))) // Why ask why? -mke + printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); + return; + } + task->locks_held.count = task->locks_held.count + value; + pthread_mutex_unlock(&task->locks_held.lock); +} + +// +static inline unsigned task_ref_cnt_get(struct task *task, unsigned lock_if_zero) { + unsigned tmp = 0; + pthread_mutex_lock(&task->reference.lock); // This would make more + tmp = task->reference.count; + if(tmp > 1000) // Work around brain damage. Remove when said brain damage is fixed + tmp = 0; + pthread_mutex_unlock(&task->reference.lock); + + return tmp; +} + + +static inline unsigned locks_held_count(struct task *task) { + // return 0; // Short circuit for now + if(task->pid < 10) // Here be monsters. -mke + return 0; + if(task->locks_held.count > 0) { + return(task->locks_held.count -1); + } + unsigned tmp = 0; + pthread_mutex_lock(&task->locks_held.lock); + tmp = task->locks_held.count; + pthread_mutex_unlock(&task->locks_held.lock); + + return tmp; +} + + bool current_is_valid(void); // fun little utility function static inline int current_pid(struct task *task) { diff --git a/util/rw_locks.h b/util/rw_locks.h index b7f8658ffb..9d6f287cc0 100644 --- a/util/rw_locks.h +++ b/util/rw_locks.h @@ -16,8 +16,8 @@ #include #include -extern void modify_locks_held_count_wrapper(int value); -extern void task_ref_cnt_mod_wrapper(int value); +//extern void modify_locks_held_count_wrapper(int value); +//extern void task_ref_cnt_mod_wrapper(int value); #define loop_lock_read(lock) loop_lock_generic(lock, 0) #define loop_lock_write(lock) loop_lock_generic(lock, 1) diff --git a/util/sync.c b/util/sync.c index 01f49acfd8..af000f0606 100644 --- a/util/sync.c +++ b/util/sync.c @@ -123,11 +123,6 @@ void notify_once(cond_t *cond) { __thread sigjmp_buf unwind_buf; __thread bool should_unwind = false; -unsigned locks_held_count_wrapper(void) { // sync.h can't know about the definition of struct due to recursive include files. -mke - if(current != NULL) - return(locks_held_count(current)); - return 0; -} void sigusr1_handler(int sig) { if (should_unwind) { From 34e8f9eb6212d64fb0970a39ea931c41271c1443 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 17 Dec 2023 11:42:09 -0800 Subject: [PATCH 18/23] o Fix bug in task reference tracking o Removed deprecated resource_locking.h file --- app/AppDelegate.m | 4 +-- .../xcshareddata/xcschemes/iSH.xcscheme | 35 ++----------------- kernel/resource_locking.h | 4 --- kernel/user.c | 5 ++- 4 files changed, 8 insertions(+), 40 deletions(-) delete mode 100644 kernel/resource_locking.h diff --git a/app/AppDelegate.m b/app/AppDelegate.m index 99fb49a3eb..5f596322fe 100644 --- a/app/AppDelegate.m +++ b/app/AppDelegate.m @@ -58,7 +58,7 @@ static void ios_handle_exit(struct task *task, int code) { if (task->parent != NULL && task->parent->parent != NULL) { // unlock(&pids_lock); unlock(&task->general_lock); - task_ref_cnt_mod(task, 1); + task_ref_cnt_mod(task, -1); return; } // pid should be saved now since task would be freed @@ -66,7 +66,7 @@ static void ios_handle_exit(struct task *task, int code) { //unlock(&pids_lock); unlock(&task->general_lock); - task_ref_cnt_mod(task, 1); + task_ref_cnt_mod(task, -1); dispatch_async(dispatch_get_main_queue(), ^{ [[NSNotificationCenter defaultCenter] postNotificationName:ProcessExitedNotification object:nil diff --git a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme index a4fe874306..cf366ef4b3 100644 --- a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme +++ b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme @@ -92,7 +92,8 @@ selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" enableASanStackUseAfterReturn = "YES" - enableUBSanitizer = "YES" + disableMainThreadChecker = "YES" + disablePerformanceAntipatternChecker = "YES" launchStyle = "0" useCustomWorkingDirectory = "NO" ignoresPersistentStateOnLaunch = "NO" @@ -110,38 +111,6 @@ ReferencedContainer = "container:iSH-AOK.xcodeproj"> - - - - - - - - - - - - - - addr + count) chunk_end = addr + count; + const char *ptr = mem_ptr(task->mem, p, MEM_READ); + if (ptr == NULL) { return 1; } @@ -47,11 +49,12 @@ static int __user_write_task(struct task *task, addr_t addr, const void *buf, si } int user_read_task(struct task *task, addr_t addr, void *buf, size_t count) { + mem_ref_cnt_mod(task->mem, 1); read_lock(&task->mem->lock); - int res = __user_read_task(task, addr, buf, count); read_unlock(&task->mem->lock); + mem_ref_cnt_mod(task->mem, -1); return res; } From f9332a49dffe0f70422dbc976ec8497ab6623e76 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 17 Dec 2023 14:35:33 -0800 Subject: [PATCH 19/23] o Build is now properly noted as 506 o Fixed bug --- iSH-AOK.xcodeproj/project.pbxproj | 24 ++++++++++++------------ kernel/mmap.c | 3 ++- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/iSH-AOK.xcodeproj/project.pbxproj b/iSH-AOK.xcodeproj/project.pbxproj index 476a8eb99c..e6393bcf99 100644 --- a/iSH-AOK.xcodeproj/project.pbxproj +++ b/iSH-AOK.xcodeproj/project.pbxproj @@ -2573,7 +2573,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2599,7 +2599,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2634,7 +2634,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2658,7 +2658,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2683,7 +2683,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2794,7 +2794,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2812,7 +2812,7 @@ CODE_SIGN_ENTITLEMENTS = iSHFileProviderRelease.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; @@ -2915,7 +2915,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2930,7 +2930,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -2958,7 +2958,7 @@ CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", @@ -2981,7 +2981,7 @@ buildSettings = { CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; MARKETING_VERSION = 1.3; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK"; @@ -3006,7 +3006,7 @@ CODE_SIGN_ENTITLEMENTS = app/FileProvider/iSHFileProvider.entitlements; CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_STYLE = Automatic; - CURRENT_PROJECT_VERSION = 505; + CURRENT_PROJECT_VERSION = 506; DEVELOPMENT_TEAM = UYU5FM4LQ4; INFOPLIST_FILE = app/FileProvider/Info.plist; PRODUCT_BUNDLE_IDENTIFIER = "app.ish.iSH-AOK.FileProvider"; diff --git a/kernel/mmap.c b/kernel/mmap.c index 228b33a4ed..f9ed3f3f23 100644 --- a/kernel/mmap.c +++ b/kernel/mmap.c @@ -182,8 +182,9 @@ int_t sys_mremap(addr_t addr, dword_t old_len, dword_t new_len, dword_t flags) { pages_t new_pages = PAGE(new_len); // shrinking always works + int tmp = current->mem->reference.count; // Debugging if (new_pages <= old_pages) { - while(task_ref_cnt_get(current, 0)) { + while(task_ref_cnt_get(current, 0) > 1) { // Sometimes this is one. Figure out if this is OK. FIXME nanosleep(&lock_pause, NULL); } int err = pt_unmap(current->mem, PAGE(addr) + new_pages, old_pages - new_pages); From 4db220de56d6a0a7a7cb143f08ce4c55f714f4d8 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Mon, 18 Dec 2023 09:43:46 -0800 Subject: [PATCH 20/23] o Upgrade Xcdoe options to make it happy o Remove wrapper functions taht weren't needed o Minor code cleanup --- app/iOSFS.m | 6 +- fs/poll.c | 3 +- fs/sock.c | 2 +- iSH-AOK.xcodeproj/project.pbxproj | 5 +- .../xcschemes/Screenshots.xcscheme | 2 +- .../xcshareddata/xcschemes/iSH+Linux.xcscheme | 2 +- .../xcshareddata/xcschemes/iSH.xcscheme | 2 +- .../xcshareddata/xcschemes/ish-cli.xcscheme | 2 +- kernel/task.c | 66 +++++++++++++++---- kernel/task.h | 66 +------------------ util/ro_locks.h | 26 ++++---- util/rw_locks.h | 36 +++++----- util/sync.h | 4 +- 13 files changed, 103 insertions(+), 119 deletions(-) diff --git a/app/iOSFS.m b/app/iOSFS.m index d9ea705944..3abac999e5 100644 --- a/app/iOSFS.m +++ b/app/iOSFS.m @@ -16,6 +16,8 @@ #include "fs/path.h" #include "fs/real.h" +extern inline void task_ref_cnt_mod(struct task *task, int value); + const NSFileCoordinatorWritingOptions NSFileCoordinatorWritingForCreating = NSFileCoordinatorWritingForMerging; @interface DirectoryPicker : NSObject @@ -240,7 +242,7 @@ static int combine_error(NSError *coordinatorError, int err) { __block NSError *error = nil; __block struct fd *fd; __block dispatch_semaphore_t file_opened = dispatch_semaphore_create(0); - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void){ void (^operation)(NSURL *url) = ^(NSURL *url) { fd = realfs_open(mount, path_for_url_in_mount(mount, url, path), flags, mode); @@ -266,7 +268,7 @@ static int combine_error(NSError *coordinatorError, int err) { } [coordinator coordinateReadingItemAtURL:url options:options error:&error byAccessor:operation]; }); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); dispatch_semaphore_wait(file_opened, DISPATCH_TIME_FOREVER); diff --git a/fs/poll.c b/fs/poll.c index 265f4a8dbf..7be8137b0e 100644 --- a/fs/poll.c +++ b/fs/poll.c @@ -330,7 +330,8 @@ void poll_destroy(struct poll *poll) { struct poll_fd *poll_fd; struct poll_fd *tmp; - while(task_ref_cnt_get(current, 0) > 1) { + int fug = current->reference.count; // Debugging. Xcode 15.1 can't 'decode' 'current' or any of its components. :-( + while(task_ref_cnt_get(current, 0) > 2) { nanosleep(&lock_pause, NULL); } list_for_each_entry_safe(&poll->poll_fds, poll_fd, tmp, fds) { diff --git a/fs/sock.c b/fs/sock.c index 9dcccffc54..1e5872e08d 100644 --- a/fs/sock.c +++ b/fs/sock.c @@ -79,7 +79,7 @@ static struct fd *sock_getfd(fd_t sock_fd) { return sock; } -static uint32_t unix_socket_next_id() { +static uint32_t unix_socket_next_id(void) { static uint32_t next_id = 0; static lock_t next_id_lock = LOCK_INITIALIZER; lock(&next_id_lock, 0); diff --git a/iSH-AOK.xcodeproj/project.pbxproj b/iSH-AOK.xcodeproj/project.pbxproj index e6393bcf99..78ff01c86a 100644 --- a/iSH-AOK.xcodeproj/project.pbxproj +++ b/iSH-AOK.xcodeproj/project.pbxproj @@ -114,7 +114,6 @@ 5D8CFA852B1198B300D50E57 /* RTCDevice.m in Sources */ = {isa = PBXBuildFile; fileRef = 5D8CFA842B1198B300D50E57 /* RTCDevice.m */; }; 5D8CFA862B1198B300D50E57 /* RTCDevice.m in Sources */ = {isa = PBXBuildFile; fileRef = 5D8CFA842B1198B300D50E57 /* RTCDevice.m */; }; 5D8CFA872B1198B300D50E57 /* RTCDevice.m in Sources */ = {isa = PBXBuildFile; fileRef = 5D8CFA842B1198B300D50E57 /* RTCDevice.m */; }; - 5D9897D028B6B953003D3670 /* AppStore.xcconfig in Resources */ = {isa = PBXBuildFile; fileRef = 5D9897CE28B6B953003D3670 /* AppStore.xcconfig */; }; 5DA0A8342AAE21D000397280 /* BatteryStatus.m in Sources */ = {isa = PBXBuildFile; fileRef = 5DA0A8332AAE21D000397280 /* BatteryStatus.m */; }; 5DD383EB2AAE33330013A847 /* UIDevice.m in Sources */ = {isa = PBXBuildFile; fileRef = 5DD383EA2AAE33330013A847 /* UIDevice.m */; }; BB0F552E239F8A790032A2A1 /* Icons.plist in Resources */ = {isa = PBXBuildFile; fileRef = BB0F552D239F8A790032A2A1 /* Icons.plist */; }; @@ -1646,8 +1645,9 @@ BB792B461F96D8E000FFB7A4 /* Project object */ = { isa = PBXProject; attributes = { + BuildIndependentTargetsInParallel = YES; CLASSPREFIX = ""; - LastUpgradeCheck = 1410; + LastUpgradeCheck = 1510; TargetAttributes = { 497F6CE3254E5E4C00C82F46 = { CreatedOnToolsVersion = 12.2; @@ -1792,7 +1792,6 @@ BB38599827BCEE6B000A1082 /* metal.png in Resources */, BB2B4DAD231D998300CB578B /* term.js in Resources */, BB38599A27BCEE6B000A1082 /* is.png in Resources */, - 5D9897D028B6B953003D3670 /* AppStore.xcconfig in Resources */, BB38599E27BCEE6B000A1082 /* circular.png in Resources */, BB38599F27BCEE6B000A1082 /* icon1337.png in Resources */, BB3859A127BCEE6B000A1082 /* iinhash.png in Resources */, diff --git a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/Screenshots.xcscheme b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/Screenshots.xcscheme index 403381165e..4930dc5ca0 100644 --- a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/Screenshots.xcscheme +++ b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/Screenshots.xcscheme @@ -1,6 +1,6 @@ general_lock, task->comm, task->pid) != _EBUSY) { + ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. + } + + pthread_mutex_lock(&task->reference.lock); + + if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke + printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d)\n", task->comm, task->pid, task->reference.count, value); + if(ilocked == true) + unlock(&task->general_lock); + + pthread_mutex_unlock(&task->reference.lock); + + return; + } + + task->reference.count = task->reference.count + value; + + pthread_mutex_unlock(&task->reference.lock); + + if(ilocked == true) + unlock(&task->general_lock); +} + dword_t get_count_of_blocked_tasks(void) { // task_ref_cnt_mod(current, 1); // Not needed? dword_t res = 0; @@ -337,19 +377,21 @@ void update_thread_name(void) { #endif } - -void task_ref_cnt_mod_wrapper(int value) { - // sync.h can't know about the definition of task struct due to recursive include files. -mke - if((current != NULL) && (doEnableExtraLocking)) - task_ref_cnt_mod(current, value); +inline void modify_locks_held_count(struct task *task, int value) { // value Should only be -1 or 1. -mke + if((task == NULL) && (current != NULL)) { + task = current; + } else { + return; + } - return; -} - -void modify_locks_held_count_wrapper(int value) { // sync.h can't know about the definition of struct due to recursive include files. -mke - if(current != NULL) - modify_locks_held_count(current, value); - return; + pthread_mutex_lock(&task->locks_held.lock); + if((task->locks_held.count + value < 0) && task->pid > 9) { + // if((task->pid > 2) && (!strcmp(task->comm, "init"))) // Why ask why? -mke + printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); + return; + } + task->locks_held.count = task->locks_held.count + value; + pthread_mutex_unlock(&task->locks_held.lock); } bool current_is_valid(void) { diff --git a/kernel/task.h b/kernel/task.h index 1484282e62..963643993e 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -12,9 +12,7 @@ #include "util/timer.h" #include "util/sync.h" -// extern void task_ref_cnt_mod_wrapper(int value); - -void task_ref_cnt_mod_wrapper(int value); +extern inline void task_ref_cnt_mod(struct task *task, int value); // Define a structure for the pending deletion queue struct task_pending_deletion { @@ -237,80 +235,22 @@ void update_thread_name(void); // of functions which can block the task, we mark our task as blocked and // unblock it after the function is executed. __attribute__((always_inline)) inline int task_may_block_start(void) { - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); current->io_block = 1; return 0; } __attribute__((always_inline)) inline int task_may_block_end(void) { current->io_block = 0; - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); return 0; } #define TASK_MAY_BLOCK for (int i = task_may_block_start(); i < 1; task_may_block_end(), i++) -void modify_locks_held_count_wrapper(int value); void init_pending_queues(void); void cleanup_pending_deletions(void); -static inline void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke - // Keep track of how many threads are referencing this task - if(!doEnableExtraLocking) { // If they want to fly by the seat of their pants... -mke - return; - } - - if(task == NULL) { - if(current != NULL) { - task = current; - } else { - return; - } - } - - bool ilocked = false; - - if (trylocknl(&task->general_lock, task->comm, task->pid) != _EBUSY) { - ilocked = true; // Make sure this is locked, and unlock it later if we had to lock it. - } - - pthread_mutex_lock(&task->reference.lock); - - if(((task->reference.count + value) < 0) && (task->pid > 9)) { // Prevent our unsigned value attempting to go negative. -mke - printk("ERROR: Attempt to decrement task reference count to be negative, ignoring(%s:%d) (%d - %d)\n", task->comm, task->pid, task->reference.count, value); - if(ilocked == true) - unlock(&task->general_lock); - - pthread_mutex_unlock(&task->reference.lock); - - return; - } - - task->reference.count = task->reference.count + value; - - pthread_mutex_unlock(&task->reference.lock); - - if(ilocked == true) - unlock(&task->general_lock); -} - - -static inline void modify_locks_held_count(struct task *task, int value) { // value Should only be -1 or 1. -mke - if((task == NULL) && (current != NULL)) { - task = current; - } else { - return; - } - - pthread_mutex_lock(&task->locks_held.lock); - if((task->locks_held.count + value < 0) && task->pid > 9) { - // if((task->pid > 2) && (!strcmp(task->comm, "init"))) // Why ask why? -mke - printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); - return; - } - task->locks_held.count = task->locks_held.count + value; - pthread_mutex_unlock(&task->locks_held.lock); -} // static inline unsigned task_ref_cnt_get(struct task *task, unsigned lock_if_zero) { diff --git a/util/ro_locks.h b/util/ro_locks.h index f086fa6fcf..f7a6d6821c 100644 --- a/util/ro_locks.h +++ b/util/ro_locks.h @@ -14,8 +14,10 @@ #include "kernel/log.h" #include "pthread.h" -extern void modify_locks_held_count_wrapper(int value); -extern void task_ref_cnt_mod_wrapper(int value); +extern __thread struct task *current; + +extern inline void modify_locks_held_count(struct task *task, int value); +extern inline void task_ref_cnt_mod(struct task *task, int value); typedef struct { pthread_mutex_t m; @@ -51,7 +53,7 @@ static inline void unlock(lock_t *lock) { lock->owner = zero_init(pthread_t); lock->pid = -1; // lock->comm[0] = 0; - modify_locks_held_count_wrapper(-1); + modify_locks_held_count(current, -1); pthread_mutex_unlock(&lock->m); #if LOCK_DEBUG @@ -79,7 +81,7 @@ static inline void atomic_l_lockf(char lname[16], int skiplog) { if(!res) { // strlcpy((char *)&atomic_l_lock.comm, current_comm(current), 16); strlcpy((char *)&atomic_l_lock.lname, lname, 16); - modify_locks_held_count_wrapper(1); + modify_locks_held_count(current, 1); } else if (!skiplog) { printk("Error on locking lock (%s) Called from %s:%d\n", lname); } @@ -89,10 +91,10 @@ static inline void mylock(lock_t *lock, int log_lock) { // struct task *foo = current; // Debugging if(!strcmp(lock->lname, "task_creat_gen")) // kluge. This means the lock is new, and SHOULD be unlocked unlock(lock); - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); pthread_mutex_lock(&lock->m); if(!log_lock) { - modify_locks_held_count_wrapper(1); + modify_locks_held_count(current, 1); } lock->owner = pthread_self(); //lock->pid = current_pid(current); @@ -102,7 +104,7 @@ static inline void mylock(lock_t *lock, int log_lock) { } else { strncpy(lock->comm, current_comm(current), 16); } */ - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); return; } @@ -118,7 +120,7 @@ static inline void atomic_l_unlockf(void) { atomic_l_lock.pid = -1; // Reset } - modify_locks_held_count_wrapper(-1); + modify_locks_held_count(current, -1); } static inline void complex_lockt(lock_t *lock, int log_lock) { @@ -140,13 +142,13 @@ static inline void complex_lockt(lock_t *lock, int log_lock) { // printk("ERROR: Possible deadlock, aborted lock attempt(PID: %d Process: %s) (Previously Owned:%s:%d)\n", // current_pid(current), current_comm(current), lock->comm, lock->pid); pthread_mutex_unlock(&lock->m); - modify_locks_held_count_wrapper(-1); + modify_locks_held_count(current, -1); } return; } } - modify_locks_held_count_wrapper(1); + modify_locks_held_count(current, 1); /* if (count > count_max * 0.90) { if (!log_lock) @@ -174,7 +176,7 @@ static inline int trylock(lock_t *lock) { } #endif // if((!status) && (current_pid(current) > 10)) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke - modify_locks_held_count_wrapper(1); + modify_locks_held_count(current, 1); //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); // lock->pid = current_pid(current); @@ -195,7 +197,7 @@ static inline int trylocknl(lock_t *lock, char *comm, int pid) { } #endif if(!status) {// iSH-AOK crashes if low number processes are not excluded. Might be able to go lower then 10? -mke - modify_locks_held_count_wrapper(1); + modify_locks_held_count(current, 1); //STRACE("trylock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); lock->pid = pid; diff --git a/util/rw_locks.h b/util/rw_locks.h index 9d6f287cc0..6beb86dae9 100644 --- a/util/rw_locks.h +++ b/util/rw_locks.h @@ -16,8 +16,8 @@ #include #include -//extern void modify_locks_held_count_wrapper(int value); -//extern void task_ref_cnt_mod_wrapper(int value); +extern inline void modify_locks_held_count(struct task *task, int value); +extern inline void task_ref_cnt_mod(struct task *task, int value); #define loop_lock_read(lock) loop_lock_generic(lock, 0) #define loop_lock_write(lock) loop_lock_generic(lock, 1) @@ -51,7 +51,7 @@ static inline void _read_unlock(wrlock_t *lock) { lock->val = 0; lock->pid = -1; lock->comm[0] = 0; - //modify_locks_held_count_wrapper(-1); + //modify_locks_held_count(current, -1); //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); return; } @@ -60,7 +60,7 @@ static inline void _read_unlock(wrlock_t *lock) { // printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(current), current_comm(current)); printk("URGENT: read_unlock(%x) failed\n", lock); lock->val--; - //modify_locks_held_count_wrapper(-1); + //modify_locks_held_count(current, -1); //STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -90,7 +90,7 @@ static inline void _write_unlock(wrlock_t *lock) { lock->comm[0] = 0; //STRACE("write_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); lock->file = NULL; - //modify_locks_held_count_wrapper(-1); + //modify_locks_held_count(current, -1); } static inline void write_unlock(wrlock_t *lock) { // Wrap it. External calls lock, internal calls using _write_unlock() don't -mke @@ -101,8 +101,8 @@ static inline void write_unlock(wrlock_t *lock) { // Wrap it. External calls lo } static inline void loop_lock_generic(wrlock_t *lock, int is_write) { - task_ref_cnt_mod_wrapper(1); - //modify_locks_held_count_wrapper(1); + task_ref_cnt_mod(current, 1); + //modify_locks_held_count(current, 1); unsigned count = 0; int random_wait = is_write ? WAIT_SLEEP + rand() % 100 : WAIT_SLEEP + rand() % WAIT_SLEEP/4; @@ -127,11 +127,11 @@ static inline void loop_lock_generic(wrlock_t *lock, int is_write) { atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0); } - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } static inline void _read_lock(wrlock_t *lock) { - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); loop_lock_read(lock); //pthread_rwlock_rdlock(&lock->l); // assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke @@ -147,7 +147,7 @@ static inline void _read_lock(wrlock_t *lock) { if(lock->val > 1000) { // We likely have a problem. printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm); read_unlock_and_destroy(lock); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); return; } @@ -155,7 +155,7 @@ static inline void _read_lock(wrlock_t *lock) { /* lock->pid = current_pid(current); if(lock->pid > 9) strncpy((char *)lock->comm, current_comm(current), 16); */ - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); //STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); } @@ -187,30 +187,30 @@ static inline void write_lock(wrlock_t *lock) { static inline void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); atomic_l_lockf("rtw_lock\0", 0); _read_unlock(lock); _write_lock(lock); atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } static inline void write_to_read_lock(wrlock_t *lock) { // Try to atomically swap a Write lock to a RO lock. -mke - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); atomic_l_lockf("wtr_lock\0", 0); _write_unlock(lock); _read_lock(lock); atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } static inline void write_unlock_and_destroy(wrlock_t *lock) { - task_ref_cnt_mod_wrapper(1); + task_ref_cnt_mod(current, 1); atomic_l_lockf("wuad_lock\0", 0); _write_unlock(lock); _lock_destroy(lock); atomic_l_unlockf(); - task_ref_cnt_mod_wrapper(-1); + task_ref_cnt_mod(current, -1); } static inline void read_unlock_and_destroy(wrlock_t *lock) { @@ -235,7 +235,7 @@ static inline int trylockw(wrlock_t *lock) { } #endif if(status == 0) { - //modify_locks_held_count_wrapper(1); + //modify_locks_held_count(current, 1); //STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line); //lock->pid = current_pid(current); //strncpy(lock->comm, current_comm(current), 16); diff --git a/util/sync.h b/util/sync.h index b6a192bf88..c5b1486944 100644 --- a/util/sync.h +++ b/util/sync.h @@ -16,9 +16,8 @@ #define LOCK_DEBUG 0 -// The following are in log.c. There should probably be in a log.h that gets included instead. +extern inline void modify_locks_held_count(struct task *task, int value); -unsigned locks_held_count_wrapper(void); // The following is in task.c extern struct pid *pid_get(dword_t id); @@ -79,7 +78,6 @@ static inline void sigunwind_end(void) { void cond_init(cond_t *cond); void cond_destroy(cond_t *cond); //static bool is_signal_pending(lock_t *lock); // Not used externally to sync.c, doesn't eneed to be exposed -unsigned locks_held_count_wrapper(void); int wait_for(cond_t *cond, lock_t *lock, struct timespec *timeout); int wait_for_ignore_signals(cond_t *cond, lock_t *lock, struct timespec *timeout); void notify(cond_t *cond); From e27707f6db90f1c49296bf63423573e9b18d2638 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Mon, 18 Dec 2023 12:34:24 -0800 Subject: [PATCH 21/23] o Changed build options as part of debugging o For Alpine this build seems very stable. Less so for Debian 10, for reasons that are not clear --- .../xcshareddata/xcschemes/iSH.xcscheme | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme index 12ef660172..33f18b8a60 100644 --- a/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme +++ b/iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme @@ -100,7 +100,9 @@ debugDocumentVersioning = "YES" debugServiceExtension = "internal" enableGPUValidationMode = "1" - allowLocationSimulation = "YES"> + allowLocationSimulation = "YES" + consoleMode = "0" + structuredConsoleMode = "1"> + + + + + + Date: Sat, 23 Dec 2023 15:13:04 -0800 Subject: [PATCH 22/23] o Mostly tweaking, with some groundwork for future enhancements --- fs/proc/pid.c | 6 +++- kernel/exec.c | 75 +++++++++++++++++++++++++------------------------ kernel/task.c | 23 +++++++-------- util/ro_locks.c | 1 + util/ro_locks.h | 68 ++++++++++++++++++++++++++++++++++---------- util/sync.c | 17 +++++++---- util/sync.h | 5 ++++ 7 files changed, 126 insertions(+), 69 deletions(-) diff --git a/fs/proc/pid.c b/fs/proc/pid.c index 9d02d72050..b36ac83495 100644 --- a/fs/proc/pid.c +++ b/fs/proc/pid.c @@ -23,10 +23,12 @@ static struct task *proc_get_task(struct proc_entry *entry) { struct task *task = pid_get_task(entry->pid); if (task == NULL) unlock(&pids_lock); + //task_ref_cnt_mod(task, 1); return task; } -static void proc_put_task(struct task *UNUSED(task)) { +static void proc_put_task(struct task *task) { unlock(&pids_lock); + //task_ref_cnt_mod(task, -1); } static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) { @@ -307,10 +309,12 @@ static int proc_pid_fd_readlink(struct proc_entry *entry, char *buf) { static int proc_pid_exe_readlink(struct proc_entry *entry, char *buf) { struct task *task = proc_get_task(entry); + // task->mm->exefile->refcount++; // Always note interest as soon as possible if ((task == NULL) || task->exiting == true) return _ESRCH; lock(&task->general_lock, 0); int err = generic_getpath(task->mm->exefile, buf); + // task->mm->exefile->refcount--; unlock(&task->general_lock); proc_put_task(task); return err; diff --git a/kernel/exec.c b/kernel/exec.c index 59d21a7b3d..d51d776bb7 100644 --- a/kernel/exec.c +++ b/kernel/exec.c @@ -212,13 +212,14 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, // general_lock protects current->mm. otherwise procfs might read the // pointer before it's released and then try to lock it after it's // released. - lock(¤t->general_lock, 0); - mm_release(current->mm); - task_set_mm(current, mm_new()); - unlock(¤t->general_lock); - write_lock(¤t->mem->lock); + struct task* save = current; + lock(&save->general_lock, 0); + mm_release(save->mm); + task_set_mm(save, mm_new()); + unlock(&save->general_lock); + write_lock(&save->mem->lock); - current->mm->exefile = fd_retain(fd); + save->mm->exefile = fd_retain(fd); addr_t load_addr = 0; // used for AX_PHDR bool load_addr_set = false; @@ -248,8 +249,8 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, // we have to know where the brk starts addr_t brk = bias + ph[i].vaddr + ph[i].memsize; - if (brk > current->mm->start_brk) - current->mm->start_brk = current->mm->brk = BYTES_ROUND_UP(brk); + if (brk > save->mm->start_brk) + save->mm->start_brk = save->mm->brk = BYTES_ROUND_UP(brk); } addr_t entry = bias + header.entry_point; @@ -273,31 +274,31 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, // FIXME disgusting hack: musl's dynamic linker has a one-page hole, and // I'd rather not put the vdso in that hole. so find a two-page hole and // add one. - page_t vdso_page = pt_find_hole(current->mem, vdso_pages + 1); + page_t vdso_page = pt_find_hole(save->mem, vdso_pages + 1); if (vdso_page == BAD_PAGE) goto beyond_hope; vdso_page += 1; - if ((err = pt_map(current->mem, vdso_page, vdso_pages, (void *) vdso_data, 0, 0)) < 0) + if ((err = pt_map(save->mem, vdso_page, vdso_pages, (void *) vdso_data, 0, 0)) < 0) goto beyond_hope; - mem_pt(current->mem, vdso_page)->data->name = "[vdso]"; - current->mm->vdso = vdso_page << PAGE_BITS; - addr_t vdso_entry = current->mm->vdso + ((struct elf_header *) vdso_data)->entry_point; + mem_pt(save->mem, vdso_page)->data->name = "[vdso]"; + save->mm->vdso = vdso_page << PAGE_BITS; + addr_t vdso_entry = save->mm->vdso + ((struct elf_header *) vdso_data)->entry_point; // map 3 empty "vvar" pages to satisfy ptraceomatic - page_t vvar_page = pt_find_hole(current->mem, VVAR_PAGES); + page_t vvar_page = pt_find_hole(save->mem, VVAR_PAGES); if (vvar_page == BAD_PAGE) goto beyond_hope; - if ((err = pt_map_nothing(current->mem, vvar_page, VVAR_PAGES, 0)) < 0) + if ((err = pt_map_nothing(save->mem, vvar_page, VVAR_PAGES, 0)) < 0) goto beyond_hope; - mem_pt(current->mem, vvar_page)->data->name = "[vvar]"; + mem_pt(save->mem, vvar_page)->data->name = "[vvar]"; // STACK TIME! // allocate 1 page of stack at 0xffffd, and let it grow down - if ((err = pt_map_nothing(current->mem, 0xffffd, 1, P_WRITE | P_GROWSDOWN)) < 0) + if ((err = pt_map_nothing(save->mem, 0xffffd, 1, P_WRITE | P_GROWSDOWN)) < 0) goto beyond_hope; // that was the last memory mapping - write_unlock(¤t->mem->lock); + write_unlock(&save->mem->lock); dword_t sp = 0xffffe000; // on 32-bit linux, there's 4 empty bytes at the very bottom of the stack. // on 64-bit linux, there's 8. make ptraceomatic happy. (a major theme in this file) @@ -312,11 +313,11 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, addr_t envp_addr = sp = args_copy(sp, envp); if (sp == 0) goto beyond_hope; - current->mm->argv_end = sp; + save->mm->argv_end = sp; addr_t argv_addr = sp = args_copy(sp, argv); if (sp == 0) goto beyond_hope; - current->mm->argv_start = sp; + save->mm->argv_start = sp; sp = align_stack(sp); addr_t platform_addr = sp = copy_string(sp, "i686"); @@ -336,7 +337,7 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, // declare elf aux now so we can know how big it is struct aux_ent aux[] = { {AX_SYSINFO, vdso_entry}, - {AX_SYSINFO_EHDR, current->mm->vdso}, + {AX_SYSINFO_EHDR, save->mm->vdso}, {AX_HWCAP, 0x00000000}, // suck that {AX_PAGESZ, PAGE_SIZE}, {AX_CLKTCK, 0x64}, @@ -390,30 +391,30 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, p += sizeof(dword_t); // null terminator // copy auxv - current->mm->auxv_start = p; + save->mm->auxv_start = p; if (user_put(p, aux)) goto beyond_hope; p += sizeof(aux); - current->mm->auxv_end = p; + save->mm->auxv_end = p; - current->mm->stack_start = sp; - current->cpu.esp = sp; - current->cpu.eip = entry; - current->cpu.fcw = 0x37f; + save->mm->stack_start = sp; + save->cpu.esp = sp; + save->cpu.eip = entry; + save->cpu.fcw = 0x37f; // This code was written when I discovered that the glibc entry point // interprets edx as the address of a function to call on exit, as // specified in the ABI. This register is normally set by the dynamic // linker, so everything works fine until you run a static executable. - current->cpu.eax = 0; - current->cpu.ebx = 0; - current->cpu.ecx = 0; - current->cpu.edx = 0; - current->cpu.esi = 0; - current->cpu.edi = 0; - current->cpu.ebp = 0; - collapse_flags(¤t->cpu); - current->cpu.eflags = 0; + save->cpu.eax = 0; + save->cpu.ebx = 0; + save->cpu.ecx = 0; + save->cpu.edx = 0; + save->cpu.esi = 0; + save->cpu.edi = 0; + save->cpu.ebp = 0; + collapse_flags(&save->cpu); + save->cpu.eflags = 0; err = 0; out_free_interp: @@ -429,7 +430,7 @@ static intptr_t elf_exec(struct fd *fd, const char *file, struct exec_args argv, beyond_hope: // TODO force sigsegv - write_unlock(¤t->mem->lock); + write_unlock(&save->mem->lock); goto out_free_interp; } diff --git a/kernel/task.c b/kernel/task.c index df0a9f8ae0..b055ac8f7f 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -198,6 +198,7 @@ struct task *task_create_(struct task *parent) { task->locks_held.count = 0; // counter used to keep track of pending locks associated with task. Do not delete when locks are present. -mke task->reference.count = 0; // counter used to delay task deletion if positive. --mke task->reference.ready_to_be_freed = false; + pthread_mutex_init(&task->reference.lock, NULL); return task; } @@ -293,13 +294,14 @@ void run_at_boot(void) { // Stuff we run only once, at boot time. } void task_run_current(void) { - struct cpu_state *cpu = ¤t->cpu; + struct task* save = current; // Because I kinda suspect that current gets messed up sometimes + struct cpu_state *cpu = &save->cpu; struct tlb tlb = {}; - tlb_refresh(&tlb, ¤t->mem->mmu); + tlb_refresh(&tlb, &save->mem->mmu); while (true) { - read_lock(¤t->mem->lock); - task_ref_cnt_mod(current, 1); + read_lock(&save->mem->lock); + task_ref_cnt_mod(save, 1); if(!doEnableMulticore) { pthread_mutex_lock(&multicore_lock); @@ -307,21 +309,21 @@ void task_run_current(void) { int interrupt = cpu_run_to_interrupt(cpu, &tlb); - read_unlock(¤t->mem->lock); + read_unlock(&save->mem->lock); if(!doEnableMulticore) pthread_mutex_unlock(&multicore_lock); //struct timespec while_pause = {0 /*secs*/, WAIT_SLEEP /*nanosecs*/}; - if(current->parent != NULL) { - current->parent->group->group_count_in_int++; // Keep track of how many children the parent has + if(save->parent != NULL) { + save->parent->group->group_count_in_int++; // Keep track of how many children the parent has handle_interrupt(interrupt); - current->parent->group->group_count_in_int--; + save->parent->group->group_count_in_int--; } else { handle_interrupt(interrupt); } - task_ref_cnt_mod(current, -1); + task_ref_cnt_mod(save, -1); } } @@ -386,8 +388,7 @@ inline void modify_locks_held_count(struct task *task, int value) { // value Sho pthread_mutex_lock(&task->locks_held.lock); if((task->locks_held.count + value < 0) && task->pid > 9) { - // if((task->pid > 2) && (!strcmp(task->comm, "init"))) // Why ask why? -mke - printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); + printk("ERROR: Attempt to decrement locks_held count below zero, ignoring\n"); return; } task->locks_held.count = task->locks_held.count + value; diff --git a/util/ro_locks.c b/util/ro_locks.c index 1a452078c7..9fd2c8bd0b 100644 --- a/util/ro_locks.c +++ b/util/ro_locks.c @@ -23,6 +23,7 @@ lock_t atomic_l_lock; void lock_init(lock_t *lock, char lname[16]) { int ret = pthread_mutex_init(&lock->m, NULL); + // pthread_cond_init(&lock->cond, NULL); if (ret != 0) { // Handle the error according to your application's needs printk("ERROR: Failed to initialize mutex: %s:(%s)\n", lname, strerror(ret)); diff --git a/util/ro_locks.h b/util/ro_locks.h index f7a6d6821c..c9c3c2cddf 100644 --- a/util/ro_locks.h +++ b/util/ro_locks.h @@ -13,6 +13,7 @@ #include "kernel/errno.h" #include "kernel/log.h" #include "pthread.h" +#include // For timespec and clock_gettime extern __thread struct task *current; @@ -20,24 +21,25 @@ extern inline void modify_locks_held_count(struct task *task, int value); extern inline void task_ref_cnt_mod(struct task *task, int value); typedef struct { - pthread_mutex_t m; - pthread_t owner; - int pid; - int uid; - char comm[16]; - char lname[16]; - bool wait4; + pthread_mutex_t m; // Mutex for the lock + pthread_cond_t cond; // Condition variable for timeout + pthread_t owner; // Thread ID of the owner + int pid; // Process ID of the owner + int uid; // User ID of the owner + char comm[16]; // Command name associated with the owner + char lname[16]; // Name of the lock (for debugging/logging) + bool wait4; // Flag to indicate if the lock is in use struct { - pthread_mutex_t lock; - int count; // If positive, don't delete yet, wait_to_delete - bool ready_to_be_freed; // Should be false initially + pthread_mutex_t lock; // Additional lock for reference management + int count; // Reference count + bool ready_to_be_freed; // Flag to indicate if the object is ready to be freed } reference; #if LOCK_DEBUG struct lock_debug { - const char *file; - int line; - int pid; - bool initialized; + const char *file; // File where the lock was acquired (for debugging) + int line; // Line number where the lock was acquired (for debugging) + int pid; // Process ID when the lock was acquired (for debugging) + bool initialized; // Flag to indicate if the lock is initialized (for debugging) } debug; #endif } lock_t; @@ -49,10 +51,15 @@ void lock_init(lock_t *lock, char lname[16]); static inline void unlock(lock_t *lock) { //pid_t pid = current_pid(); - + lock->owner = zero_init(pthread_t); lock->pid = -1; // lock->comm[0] = 0; + + /* lock->wait4 = false; + pthread_cond_signal(&lock->cond); + pthread_mutex_unlock(&lock->m); + */ modify_locks_held_count(current, -1); pthread_mutex_unlock(&lock->m); @@ -108,6 +115,34 @@ static inline void mylock(lock_t *lock, int log_lock) { return; } +#define LOCK_TIMEOUT_SECONDS 5 + +static inline int mylock_with_timeout(lock_t *lock, int log_lock) { + struct timespec timeout; + clock_gettime(CLOCK_REALTIME, &timeout); + timeout.tv_sec += LOCK_TIMEOUT_SECONDS; + + pthread_mutex_lock(&lock->m); + while (lock->wait4) { + int res = pthread_cond_timedwait(&lock->cond, &lock->m, &timeout); + if (res == ETIMEDOUT) { + // Handle timeout: unlock mutex and return error code + pthread_mutex_unlock(&lock->m); + printk("ERROR: lock(%d) timeout\n", lock); + return ETIMEDOUT; + } + } + lock->wait4 = true; + pthread_mutex_unlock(&lock->m); + + // Rest of the locking logic + task_ref_cnt_mod(current, 1); + lock->owner = pthread_self(); + task_ref_cnt_mod(current, -1); + + return 0; // Success +} + static inline void atomic_l_unlockf(void) { if(!doEnableExtraLocking) return; @@ -206,8 +241,11 @@ static inline int trylocknl(lock_t *lock, char *comm, int pid) { return status; } +//#define complex_lockt(lock, log_lock) mylock_with_timeout(lock, log_lock) // Lets try simplifying locking for now +//#define complex_lockt(lock, log_lock) mylock(lock, log_lock) // Lets try simplifying locking for now #define lock(lock, log_lock) mylock(lock, log_lock) +//#define lock(lock, log_lock) mylock_with_timeout(lock, log_lock) //#define trylock(lock) trylock(lock, __FILE__, __LINE__) //#define trylocknl(lock, comm, pid) trylocknl(lock, comm, pid, __FILE__, __LINE__) diff --git a/util/sync.c b/util/sync.c index af000f0606..90df48e697 100644 --- a/util/sync.c +++ b/util/sync.c @@ -11,13 +11,20 @@ extern bool doEnableExtraLocking; extern pthread_mutex_t wait_for_lock; // Synchroniztion lock void cond_init(cond_t *cond) { - pthread_condattr_t attr; - pthread_condattr_init(&attr); + pthread_condattr_t cond_attr; + pthread_condattr_init(&cond_attr); #if __linux__ - pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); + pthread_condattr_setclock(&cond_attr, CLOCK_MONOTONIC); #endif - pthread_cond_init(&cond->cond, &attr); + pthread_cond_init(&cond->cond, &cond_attr); + pthread_condattr_destroy(&cond_attr); // Clean up the condition variable attribute + + // Initialize the mutex without specific attributes + pthread_mutex_init(&cond->reference.lock, NULL); + + cond->reference.count = 0; } + void cond_destroy(cond_t *cond) { pthread_cond_destroy(&cond->cond); } @@ -59,7 +66,7 @@ int wait_for_ignore_signals(cond_t *cond, lock_t *lock, struct timespec *timeout struct lock_debug lock_tmp = lock->debug; lock->debug = (struct lock_debug) { .initialized = lock->debug.initialized }; #endif - if (!timeout) { // We timeout anyway after fifteen seconds. It appears the process wakes up briefly before returning here if there is nothing else pending. This is kluge. -mke + if (!timeout) { // We timeout anyway after fifteen seconds. It appears the process wakes up briefly before returning here if there is nothing else pending. This is KLUGE. -mke struct timespec trigger_time; trigger_time.tv_sec = 15; trigger_time.tv_nsec = 0; diff --git a/util/sync.h b/util/sync.h index c5b1486944..6a10b5c671 100644 --- a/util/sync.h +++ b/util/sync.h @@ -39,6 +39,11 @@ extern lock_t atomic_l_lock; // Used to make all lock operations atomic, even re typedef struct { pthread_cond_t cond; + struct { + pthread_mutex_t lock; + int count; // If positive, don't delete yet, wait_to_delete + bool ready_to_be_freed; // Should be false initially + } reference; } cond_t; #define COND_INITIALIZER ((cond_t) {PTHREAD_COND_INITIALIZER}) From 083aec8e5400e6001d7775d89e23c53e4feb3c5a Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Sun, 24 Dec 2023 05:38:45 -0800 Subject: [PATCH 23/23] o Fixed one rare crash the app bug, failed to fix another. o Releasing anyway as it is still much more stable than previous releases --- fs/generic.c | 24 ++++++++++++++---------- kernel/user.c | 4 ++-- util/rw_locks.h | 2 +- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/fs/generic.c b/fs/generic.c index 614ea2c083..532e4a8aeb 100644 --- a/fs/generic.c +++ b/fs/generic.c @@ -104,16 +104,20 @@ struct fd *generic_open(const char *path, int flags, int mode) { } int generic_getpath(struct fd *fd, char *buf) { - int err = fd->mount->fs->getpath(fd, buf); - if (err < 0) - return err; - if (strlen(buf) + strlen(fd->mount->point) >= MAX_PATH) - return _ENAMETOOLONG; - memmove(buf + strlen(fd->mount->point), buf, strlen(buf) + 1); - memcpy(buf, fd->mount->point, strlen(fd->mount->point)); - if (buf[0] == '\0') - memcpy(buf, "/", 2); - return 0; + if(fd->ops != NULL) { + int err = fd->mount->fs->getpath(fd, buf); + if (err < 0) + return err; + if (strlen(buf) + strlen(fd->mount->point) >= MAX_PATH) + return _ENAMETOOLONG; + memmove(buf + strlen(fd->mount->point), buf, strlen(buf) + 1); + memcpy(buf, fd->mount->point, strlen(fd->mount->point)); + if (buf[0] == '\0') + memcpy(buf, "/", 2); + return 0; + } else { + return -EBADF; + } } int generic_accessat(struct fd *dirfd, const char *path_raw, int mode) { diff --git a/kernel/user.c b/kernel/user.c index 0de32bee94..e798f11157 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -49,12 +49,12 @@ static int __user_write_task(struct task *task, addr_t addr, const void *buf, si } int user_read_task(struct task *task, addr_t addr, void *buf, size_t count) { - mem_ref_cnt_mod(task->mem, 1); + mem_ref_cnt_mod(task->mem, 10); // Try a large number. Oddly ends up being zero sometimes when _user_read_task() is invoked below read_lock(&task->mem->lock); int res = __user_read_task(task, addr, buf, count); read_unlock(&task->mem->lock); - mem_ref_cnt_mod(task->mem, -1); + mem_ref_cnt_mod(task->mem, -10); return res; } diff --git a/util/rw_locks.h b/util/rw_locks.h index 6beb86dae9..842d7660a5 100644 --- a/util/rw_locks.h +++ b/util/rw_locks.h @@ -82,7 +82,7 @@ static inline void _write_unlock(wrlock_t *lock) { printk("URGENT: write_unlock(%x:%d) error on unlock\n", lock, lock->val); if(lock->val != -1) { //printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(current), current_comm(current)); - printk("ERROR: write_unlock(%x) on lock with val of %d\n", lock, lock->val); + // printk("ERROR: write_unlock(%x) on lock with val of %d\n", lock, lock->val); // Comment out for now. Much noise, little impact (So far as I can tell) } //assert(lock->val == -1); lock->val = lock->line = lock->pid = 0;