Skip to content

Commit

Permalink
o Code cleanup and fix to mitigate ongoing brain damage
Browse files Browse the repository at this point in the history
  • Loading branch information
Mike Miller committed Nov 29, 2023
1 parent 73e9d22 commit c733475
Show file tree
Hide file tree
Showing 16 changed files with 16 additions and 111 deletions.
19 changes: 0 additions & 19 deletions emu/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,6 @@ void mem_destroy(struct mem *mem) {
free(mem->pgdir[i]);
}

//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);

do {
nanosleep(&lock_pause, NULL);
} while((critical_region_count(current) > 1) && (current->pid > 1) ); // Wait for now, task is in one or more critical sections
Expand All @@ -79,8 +77,6 @@ void mem_destroy(struct mem *mem) {

write_unlock_and_destroy(&mem->lock);

//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);

}

#define PGDIR_TOP(page) ((page) >> 10)
Expand All @@ -97,52 +93,41 @@ static struct pt_entry *mem_pt_new(struct mem *mem, page_t page) {

struct pt_entry *mem_pt(struct mem *mem, page_t page) {

//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);

if (mem->pgdir[PGDIR_TOP(page)] != NULL) { // Check if defined. Likely still leaves a potential race condition as no locking currently. -MKE FIXME
struct pt_entry *pgdir = mem->pgdir[PGDIR_TOP(page)];
if (pgdir == NULL) {
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return NULL;
}

struct pt_entry *entry = &pgdir[PGDIR_BOTTOM(page)];
if (entry->data == NULL) {
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return NULL;
}

//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return entry;
} else {
mem->pgdir[PGDIR_TOP(page)] = NULL;
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return NULL;
}

//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
}

static void mem_pt_del(struct mem *mem, page_t page) {
//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
struct pt_entry *entry = mem_pt(mem, page);
if (entry != NULL) {
while(critical_region_count(current) > 4) { // mark
nanosleep(&lock_pause, NULL);
}
entry->data = NULL;
}
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
}

void mem_next_page(struct mem *mem, page_t *page) {
(*page)++;
if (*page >= MEM_PAGES)
return;
//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
while (*page < MEM_PAGES && mem->pgdir[PGDIR_TOP(*page)] == NULL)
*page = (*page - PGDIR_BOTTOM(*page)) + MEM_PGDIR_SIZE;
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
}

page_t pt_find_hole(struct mem *mem, pages_t size) {
Expand Down Expand Up @@ -346,8 +331,6 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) {
if (type != MEM_WRITE_PTRACE && !(entry->flags & P_WRITE))
return NULL;

////mofify_critical_region_counter(current, 1, __FILE__, __LINE__);

if (type == MEM_WRITE_PTRACE) {
// TODO: Is P_WRITE really correct? The page shouldn't be writable without ptrace.
entry->flags |= P_WRITE | P_COW;
Expand All @@ -361,11 +344,9 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) {

if (entry->flags & P_COW) {
lock(&current->general_lock, 0); // prevent elf_exec from doing mm_release while we are in flight? -mke
//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
read_to_write_lock(&mem->lock);
void *copy = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
void *data = (char *) entry->data->data + entry->offset;
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);

// copy/paste from above
modify_critical_region_count(current, 1,__FILE__, __LINE__);
Expand Down
13 changes: 0 additions & 13 deletions emu/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,13 @@
#include "kernel/resource_locking.h"

void tlb_refresh(struct tlb *tlb, struct mmu *mmu) {
//mofify_critical_region_counter(current, 1, __FILE__, __LINE__); // WORKING ON -mke
if (tlb->mmu == mmu && tlb->mem_changes == mmu->changes) {
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return;
}
tlb->mmu = mmu;
tlb->dirty_page = TLB_PAGE_EMPTY;
tlb->mem_changes = mmu->changes;
tlb_flush(tlb);
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
}

void tlb_flush(struct tlb *tlb) {
Expand All @@ -24,48 +21,38 @@ void tlb_flush(struct tlb *tlb) {
}

void tlb_free(struct tlb *tlb) {
////mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
free(tlb);
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
}

bool __tlb_read_cross_page(struct tlb *tlb, addr_t addr, char *value, unsigned size) {
////mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
char *ptr1 = __tlb_read_ptr(tlb, addr);
if (ptr1 == NULL) {
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return false;
}
char *ptr2 = __tlb_read_ptr(tlb, (PAGE(addr) + 1) << PAGE_BITS);
if (ptr2 == NULL) {
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return false;
}
size_t part1 = PAGE_SIZE - PGOFFSET(addr);
assert(part1 < size);
memcpy(value, ptr1, part1);
memcpy(value + part1, ptr2, size - part1);
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return true;
}

bool __tlb_write_cross_page(struct tlb *tlb, addr_t addr, const char *value, unsigned size) {
////mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
char *ptr1 = __tlb_write_ptr(tlb, addr);
if (ptr1 == NULL) {
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return false;
}
char *ptr2 = __tlb_write_ptr(tlb, (PAGE(addr) + 1) << PAGE_BITS);
if (ptr2 == NULL) {
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return false;
}
size_t part1 = PAGE_SIZE - PGOFFSET(addr);
assert(part1 < size);
memcpy(ptr1, value, part1);
memcpy(ptr2, value + part1, size - part1);
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return true;
}

Expand Down
4 changes: 0 additions & 4 deletions fs/proc/pid.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) {
if ((task == NULL) || (task->exiting == true))
return _ESRCH;

////mofify_critical_region_counter(task, 1, __FILE__, __LINE__);
if(!strcmp(task->general_lock.lname, "task_creat_gen")) // Work around. Sometimes the general lock is locked when it shouldn't be
unlock(&task->general_lock);
lock(&task->general_lock, 0);
Expand Down Expand Up @@ -117,7 +116,6 @@ static int proc_pid_stat_show(struct proc_entry *entry, struct proc_data *buf) {
//unlock(&task->sighand->lock);
unlock(&task->group->lock);
unlock(&task->general_lock);
////mofify_critical_region_counter(task, -1, __FILE__, __LINE__);
proc_put_task(task);
return 0;
}
Expand Down Expand Up @@ -167,8 +165,6 @@ static int proc_pid_cmdline_show(struct proc_entry *entry, struct proc_data *buf
if ((task == NULL) || (task->exiting == true))
return _ESRCH;

////mofify_critical_region_counter(task, 1, __FILE__, __LINE__);

int err = 0;
lock(&task->general_lock, 0);

Expand Down
4 changes: 2 additions & 2 deletions fs/proc/root.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,13 +334,13 @@ static bool proc_root_readdir(struct proc_entry *UNUSED(entry), unsigned long *i
pid++;
} while (pid <= MAX_PID && pid_get_task(pid) == NULL);
//unlock(&pids_lock);
modify_critical_region_count(current, -1, __FILE__, __LINE__);
if (pid > MAX_PID) {
modify_critical_region_count(current, -1, __FILE__, __LINE__);
return false;
}
*next_entry = (struct proc_entry) {&proc_pid, .pid = pid};
*index = pid + PROC_ROOT_LEN;
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
modify_critical_region_count(current, -1, __FILE__, __LINE__);
return true;
}

Expand Down
19 changes: 2 additions & 17 deletions jit/jit.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ void jit_invalidate_page(struct jit *jit, page_t page) {
while(critical_region_count(current) > 4) { // It's all a bit magic, but I think this is doing something useful. -mke
nanosleep(&lock_pause, NULL);
}
//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
// mofify_critical_region_count(current, 1, __FILE__, __LINE__);
jit_invalidate_range(jit, page, page + 1);
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
// mofify_critical_region_count(current, -1, __FILE__, __LINE__);
}

void jit_invalidate_all(struct jit *jit) {
Expand Down Expand Up @@ -185,20 +185,11 @@ static void jit_block_disconnect(struct jit *jit, struct jit_block *block) {
}

static void jit_block_free(struct jit *jit, struct jit_block *block) {
// critical_region_count_increase(current);
jit_block_disconnect(jit, block);
free(block);
//critical_region_count_decrease(current);
}

static void jit_free_jetsam(struct jit *jit) {
/* if(!strcmp(current->comm, "go")) {
// Sleep for a bit if this is go. Kludge alert. -mke
struct timespec wait;
wait.tv_sec = 3; // Be anal and set both to zero. -mke
wait.tv_nsec = 0;
nanosleep(&wait, NULL);
} */
struct jit_block *block, *tmp;
list_for_each_entry_safe(&jit->jetsam, block, tmp, jetsam) {
list_remove(&block->jetsam);
Expand Down Expand Up @@ -227,7 +218,6 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) {
addr_t ip = frame->cpu.eip;
size_t cache_index = jit_cache_hash(ip);
struct jit_block *block = cache[cache_index];
//////mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
if (block == NULL || block->addr != ip) {
lock(&jit->lock, 0);
block = jit_lookup(jit, ip);
Expand All @@ -240,7 +230,6 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) {
cache[cache_index] = block;
unlock(&jit->lock);
}
//////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
struct jit_block *last_block = frame->last_block;
if (last_block != NULL &&
(last_block->jump_ip[0] != NULL ||
Expand All @@ -253,18 +242,14 @@ static int cpu_step_to_interrupt(struct cpu_state *cpu, struct tlb *tlb) {
if (last_block->jump_ip[i] != NULL &&
(*last_block->jump_ip[i] & 0xffffffff) == block->addr) {
*last_block->jump_ip[i] = (unsigned long) block->code;
//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
list_add(&block->jumps_from[i], &last_block->jumps_from_links[i]);
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
}
}
}

unlock(&jit->lock);
}

//////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);

frame->last_block = block;

// block may be jetsam, but that's ok, because it can't be freed until
Expand Down
1 change: 0 additions & 1 deletion kernel/calls.c
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,6 @@ void handle_page_fault_interrupt(struct cpu_state *cpu) {
.code = mem_segv_reason(current->mem, cpu->segfault_addr),
.fault.addr = cpu->segfault_addr,
};
//current->zombie = true;
dump_stack(8);
deliver_signal(current, SIGSEGV_, info);
}
Expand Down
2 changes: 0 additions & 2 deletions kernel/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,10 @@ static int load_entry(struct prg_header ph, addr_t bias, struct fd *fd) {
if (tail_size != 0) {
// Unlock and lock the mem because the user functions must be
// called without locking mem.
//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
if(trylockw(&current->mem->lock)) // Test to see if it is actually locked. This is likely masking an underlying problem. -mke
write_unlock(&current->mem->lock, __FILE__, __LINE__);
user_memset(file_end, 0, tail_size);
write_lock(&current->mem->lock);
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
}
if (tail_size > bss_size)
tail_size = bss_size;
Expand Down
4 changes: 2 additions & 2 deletions kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,10 +301,10 @@ dword_t sys_futex(addr_t uaddr, dword_t op, dword_t val, addr_t timeout_or_val2,
switch (op & FUTEX_CMD_MASK_) {
case FUTEX_WAIT_:
STRACE("futex(FUTEX_WAIT, %#x, %d, 0x%x {%ds %dns}) = ...\n", uaddr, val, timeout_or_val2, timeout.tv_sec, timeout.tv_nsec);
modify_critical_region_count(current, 1, __FILE__, __LINE__);
//modify_critical_region_count(current, 1, __FILE__, __LINE__);
dword_t return_val;
return_val = futex_wait(uaddr, val, timeout_or_val2 ? &timeout : NULL);
modify_critical_region_count(current, -1, __FILE__, __LINE__);
//modify_critical_region_count(current, -1, __FILE__, __LINE__);
return return_val;
case FUTEX_WAKE_:
STRACE("futex(FUTEX_WAKE, %#x, %d)", uaddr, val);
Expand Down
4 changes: 1 addition & 3 deletions kernel/log.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,9 @@ static size_t do_syslog(int type, addr_t buf_addr, int_t len) {
}
}
size_t sys_syslog(int_t type, addr_t buf_addr, int_t len) {
////mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
lock(&log_lock, 0);
size_t retval = do_syslog(type, buf_addr, len);
unlock(&log_lock);
////mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return retval;
}

Expand Down Expand Up @@ -264,8 +262,8 @@ int current_uid(void) {
}

char * current_comm(void) {
static char comm[16];
modify_critical_region_count(current, 1, __FILE__, __LINE__);
static char comm[16];
if(current != NULL) {
if(strcmp(current->comm, "")) {
strncpy(comm, current->comm, 16);
Expand Down
4 changes: 0 additions & 4 deletions kernel/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,9 @@ static addr_t mmap_common(addr_t addr, dword_t len, dword_t prot, dword_t flags,
if ((flags & MMAP_PRIVATE) && (flags & MMAP_SHARED))
return _EINVAL;

//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
write_lock(&current->mem->lock);
addr_t res = do_mmap(addr, len, prot, flags, fd_no, offset);
write_unlock(&current->mem->lock, __FILE__, __LINE__);
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);
return res;
}

Expand Down Expand Up @@ -156,11 +154,9 @@ int_t sys_munmap(addr_t addr, uint_t len) {
if (len == 0)
return _EINVAL;

//mofify_critical_region_counter(current, 1, __FILE__, __LINE__);
write_lock(&current->mem->lock);
int err = pt_unmap_always(current->mem, PAGE(addr), PAGE_ROUND_UP(len));
write_unlock(&current->mem->lock, __FILE__, __LINE__);
//mofify_critical_region_counter(current, -1, __FILE__, __LINE__);

if (err < 0)
return _EINVAL;
Expand Down
Loading

0 comments on commit c733475

Please sign in to comment.