Skip to content

Commit

Permalink
o Another checkpoint. Note that STRACE is currently defined to log to…
Browse files Browse the repository at this point in the history
… the kernel buffer. WIP
  • Loading branch information
Mike Miller committed Dec 9, 2023
1 parent 0f8681f commit 68db120
Show file tree
Hide file tree
Showing 10 changed files with 48 additions and 23 deletions.
3 changes: 3 additions & 0 deletions app/AppDelegate.m
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ @interface AppDelegate ()
static void ios_handle_exit(struct task *task, int code) {
// we are interested in init and in children of init
// this is called with pids_lock as an implementation side effect, please do not cite as an example of good API design
task_ref_cnt_mod(task, 1);
lock(&task->general_lock, 0);
complex_lockt(&pids_lock, 0);
if(task->pid > MAX_PID) {// Corruption
Expand All @@ -57,13 +58,15 @@ static void ios_handle_exit(struct task *task, int code) {
if (task->parent != NULL && task->parent->parent != NULL) {
unlock(&pids_lock);
unlock(&task->general_lock);
task_ref_cnt_mod(task, 1);
return;
}
// pid should be saved now since task would be freed
pid_t pid = task->pid;

unlock(&pids_lock);
unlock(&task->general_lock);
task_ref_cnt_mod(task, 1);
dispatch_async(dispatch_get_main_queue(), ^{
[[NSNotificationCenter defaultCenter] postNotificationName:ProcessExitedNotification
object:nil
Expand Down
3 changes: 2 additions & 1 deletion debug.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ extern int log_override;
extern void (*die_handler)(const char *msg);
_Noreturn void die(const char *msg, ...);

#define STRACE(msg, ...) TRACE_(strace, msg, ##__VA_ARGS__)
// #define STRACE(msg, ...) TRACE_(strace, msg, ##__VA_ARGS__)
#define STRACE(fmt, ...) printk(fmt, ##__VA_ARGS__)

#if defined(__i386__) || defined(__x86_64__)
#define debugger __asm__("int3")
Expand Down
15 changes: 7 additions & 8 deletions emu/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ int pt_unmap(struct mem *mem, page_t start, pages_t pages) {

int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) {
for (page_t page = start; page < start + pages; mem_next_page(mem, &page)) {
while(task_ref_cnt_get(current, 0) >3) {
while(mem_ref_cnt_get(mem) > 1) { // Being 1 is normal as pt_copy_on_write() increments the ref count
nanosleep(&lock_pause, NULL);
}
struct pt_entry *pt = mem_pt(mem, page);
Expand All @@ -207,7 +207,7 @@ int pt_unmap_always(struct mem *mem, page_t start, pages_t pages) {
if (--data->refcount == 0) {
// vdso wasn't allocated with mmap, it's just in our data segment
if (data->data != vdso_data) {
while(task_ref_cnt_get(current, 0) > 3) {
while(mem_ref_cnt_get(mem)) {
nanosleep(&lock_pause, NULL);
}
int err = munmap(data->data, data->size);
Expand Down Expand Up @@ -255,9 +255,8 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) {
}

int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) {
while(task_ref_cnt_get(current, 0) > 1) { // Will be at least 1, anything higher means another thread is accessing
nanosleep(&lock_pause, NULL);
}
mem_ref_cnt_mod(src, 1);
mem_ref_cnt_mod(dst, 1);
for (page_t page = start; page < start + pages; mem_next_page(src, &page)) {
struct pt_entry *entry = mem_pt(src, page);
if (entry == NULL)
Expand All @@ -272,11 +271,11 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page
dst_entry->offset = entry->offset;
dst_entry->flags = entry->flags;
}
while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections
nanosleep(&lock_pause, NULL);
}
mem_changed(src);
mem_changed(dst);
mem_ref_cnt_mod(src, -1);
mem_ref_cnt_mod(dst, -1);

return 0;
}

Expand Down
4 changes: 2 additions & 2 deletions fs/poll.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ void poll_destroy(struct poll *poll) {
struct poll_fd *poll_fd;
struct poll_fd *tmp;

while(task_ref_cnt_get(current, 0)) {
while(task_ref_cnt_get(current, 0) > 1) {
nanosleep(&lock_pause, NULL);
}
list_for_each_entry_safe(&poll->poll_fds, poll_fd, tmp, fds) {
Expand All @@ -341,7 +341,7 @@ void poll_destroy(struct poll *poll) {
free(poll_fd);
}

while(task_ref_cnt_get(current, 0)) {
while(task_ref_cnt_get(current, 0) > 1) {
nanosleep(&lock_pause, NULL);
}

Expand Down
6 changes: 6 additions & 0 deletions jit/jit.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,12 @@ struct jit {
struct {
struct list blocks[2];
} *page_hash;

struct {
pthread_mutex_t lock;
int count; // If positive, don't delete yet, wait_to_delete
bool ready_to_be_freed; // Should be false initially
} reference;

lock_t lock;
wrlock_t jetsam_lock;
Expand Down
15 changes: 11 additions & 4 deletions kernel/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,27 +102,34 @@ static int load_entry(struct prg_header ph, addr_t bias, struct fd *fd) {
// of the load entry or the end of the page, whichever comes first
addr_t file_end = addr + filesize;
dword_t tail_size = PAGE_SIZE - PGOFFSET(file_end);

if (tail_size == PAGE_SIZE)
// if you can calculate tail_size better and not have to do this please let me know
tail_size = 0;

if (tail_size != 0) {
// Unlock and lock the mem because the user functions must be
// called without locking mem.
if(trylockw(&current->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke
if(trylockw(&current->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke
write_unlock(&current->mem->lock);

mem_ref_cnt_mod(current->mem, 1);
user_memset(file_end, 0, tail_size);
write_lock(&current->mem->lock);
mem_ref_cnt_mod(current->mem, -1);
}
if (tail_size > bss_size)
tail_size = bss_size;

// then map the pages from after the file mapping up to and including the end of bss
if (bss_size - tail_size != 0)
if ((err = pt_map_nothing(current->mem, PAGE_ROUND_UP(addr + filesize),
PAGE_ROUND_UP(bss_size - tail_size), flags)) < 0)
return err;

if ((err = pt_map_nothing(current->mem, PAGE_ROUND_UP(addr + filesize),
PAGE_ROUND_UP(bss_size - tail_size), flags)) < 0)

return err;
}

return 0;
}

Expand Down
6 changes: 4 additions & 2 deletions kernel/log.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,9 @@ inline int current_pid(void) {
task_ref_cnt_mod(current, 1);
if(current != NULL) {
if (current->exiting != true) {
int tmp = current->pid;
task_ref_cnt_mod(current, -1);
return current->pid;
return tmp;
} else {
task_ref_cnt_mod(current, -1);
return -1;
Expand All @@ -230,8 +231,9 @@ inline int current_uid(void) {
task_ref_cnt_mod(current, 1);
if(current != NULL) {
if (current->exiting != true) {
int tmp = current->uid;
task_ref_cnt_mod(current, -1);
return current->uid;
return tmp;
} else {
task_ref_cnt_mod(current, -1);
return -1;
Expand Down
6 changes: 3 additions & 3 deletions kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -466,9 +466,9 @@ struct sighand *sighand_copy(struct sighand *sighand) {
}

void sighand_release(struct sighand *sighand) {
while(task_ref_cnt_get(current, 0) > 2) { // Wait for now, task is in one or more critical sections
nanosleep(&lock_pause, NULL);
}
// while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections
// nanosleep(&lock_pause, NULL);
// }
if (--sighand->refcount == 0) {
free(sighand);
}
Expand Down
5 changes: 2 additions & 3 deletions kernel/task.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,8 @@ bool should_wait(struct task *t) {

void task_destroy(struct task *task, int caller) {
if(trylock(&task->general_lock) == (_EBUSY)) { // Get it if a lock does not exist
task->exiting = true;
lock(&task->general_lock, 0);
task->exiting = true;
}

//printk("TD(%s:%d): Called by %d\n", task->comm, task->pid, caller);
Expand Down Expand Up @@ -258,8 +258,8 @@ void task_run_current(void) {
tlb_refresh(&tlb, &current->mem->mmu);

while (true) {
task_ref_cnt_mod(current, 1);
read_lock(&current->mem->lock);
task_ref_cnt_mod(current, 1);

if(!doEnableMulticore) {
pthread_mutex_lock(&multicore_lock);
Expand Down Expand Up @@ -369,7 +369,6 @@ void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1
return;
}


task->reference.count = task->reference.count + value;

pthread_mutex_unlock(&task->reference.lock);
Expand Down
8 changes: 8 additions & 0 deletions kernel/user.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,23 @@ int user_read(addr_t addr, void *buf, size_t count) {

int user_write_task(struct task *task, addr_t addr, const void *buf, size_t count) {
read_lock(&task->mem->lock);
task_ref_cnt_mod(current, 1);
mem_ref_cnt_mod(current->mem, 1);
int res = __user_write_task(task, addr, buf, count, false);
read_unlock(&task->mem->lock);
task_ref_cnt_mod(current, -1);
mem_ref_cnt_mod(current->mem, -1);
return res;
}

int user_write_task_ptrace(struct task *task, addr_t addr, const void *buf, size_t count) {
read_lock(&task->mem->lock);
task_ref_cnt_mod(current, 1);
mem_ref_cnt_mod(current->mem, 1);
int res = __user_write_task(task, addr, buf, count, true);
read_unlock(&task->mem->lock);
task_ref_cnt_mod(current, -1);
mem_ref_cnt_mod(current->mem, -1);
return res;
}

Expand Down

0 comments on commit 68db120

Please sign in to comment.