Skip to content

Commit

Permalink
o Made most of the rw locking static inline to improve performance. R…
Browse files Browse the repository at this point in the history
…eliability on parallel compile test has never been this good before

o Still a WIP though
  • Loading branch information
Mike Miller committed Dec 16, 2023
1 parent 203871c commit f6f3cf6
Show file tree
Hide file tree
Showing 3 changed files with 219 additions and 226 deletions.
1 change: 0 additions & 1 deletion util/ro_locks.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
#include "kernel/errno.h"
#include "kernel/log.h"
#include "pthread.h"
//#include "util/sync.h"

extern void modify_locks_held_count_wrapper(int value);
extern void task_ref_cnt_mod_wrapper(int value);
Expand Down
209 changes: 0 additions & 209 deletions util/rw_locks.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,118 +16,6 @@ bool current_is_valid(void);
// on darwin pthread_rwlock_t is already like this, on linux you can configure
// it to prefer writers. not worrying about anything else right now.

void loop_lock_generic(wrlock_t *lock, int is_write) {
task_ref_cnt_mod(current, 1);
modify_locks_held_count_wrapper(1);

unsigned count = 0;
int random_wait = is_write ? WAIT_SLEEP + rand() % 100 : WAIT_SLEEP + rand() % WAIT_SLEEP/4;
struct timespec lock_pause = {0, random_wait};
long count_max = (WAIT_MAX_UPPER - random_wait);
count_max = (is_write && count_max < 25000) ? 25000 : count_max;

while((is_write ? pthread_rwlock_trywrlock(&lock->l) : pthread_rwlock_tryrdlock(&lock->l))) {
count++;
if(count > count_max) {
handle_lock_error(lock, is_write ? "loop_lock_write" : "loop_lock_read");
count = 0;
}
atomic_l_unlockf();
nanosleep(&lock_pause, NULL);
atomic_l_lockf(is_write ? "llw\0" : "ll_read\0", 0);
}

task_ref_cnt_mod(current, -1);
}



void _read_lock(wrlock_t *lock) {
task_ref_cnt_mod(current, 1);
loop_lock_read(lock);
//pthread_rwlock_rdlock(&lock->l);
// assert(lock->val >= 0); // If it isn't >= zero we have a problem since that means there is a write lock somehow. -mke
if(lock->val) {
lock->val++;
} else if (lock->val > -1){ // Deal with insanity. -mke
lock->val++;
} else {
printk("ERROR: _read_lock() val is %d\n", lock->val);
lock->val++;
}

if(lock->val > 1000) { // We likely have a problem.
printk("WARNING: _read_lock(%x) has 1000+ pending read locks. (File: %s, Line: %d) Breaking likely deadlock/process corruption(PID: %d Process: %s.\n", lock, lock->file, lock->line,lock->pid, lock->comm);
read_unlock_and_destroy(lock);
task_ref_cnt_mod(current, -1);
//STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line);
return;
}

lock->pid = current_pid(current);
if(lock->pid > 9)
strncpy((char *)lock->comm, current_comm(current), 16);
task_ref_cnt_mod(current, -1);
//STRACE("read_lock(%d, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line);
}

void read_lock(wrlock_t *lock) { // Wrapper so that external calls lock, internal calls using _read_unlock() don't -mke
atomic_l_lockf("r_lock\0", 0);
_read_lock(lock);
atomic_l_unlockf();
}

void _read_unlock(wrlock_t *lock) {
if(lock->val <= 0) {
printk("ERROR: read_unlock(%x) error(PID: %d Process: %s count %d) (%s:%d)\n",lock, current_pid(current), current_comm(current), lock->val);
lock->val = 0;
lock->pid = -1;
lock->comm[0] = 0;
modify_locks_held_count_wrapper(-1);
//STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line);
return;
}
assert(lock->val > 0);
if (pthread_rwlock_unlock(&lock->l) != 0)
printk("URGENT: read_unlock(%x) error(PID: %d Process: %s)\n", lock, current_pid(current), current_comm(current));
lock->val--;
modify_locks_held_count_wrapper(-1);
//STRACE("read_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line);
}

void read_unlock(wrlock_t *lock) {
if(lock->pid != current_pid(current) && (lock->pid != -1)) {
atomic_l_lockf("r_unlock\0", 0);
_read_unlock(lock);
} else { // We can unlock our own lock without additional locking. -mke
_read_unlock(lock);
return;
}
if(lock->pid != current_pid(current) && (lock->pid != -1))
atomic_l_unlockf();
}

void _write_unlock(wrlock_t *lock) {
if(pthread_rwlock_unlock(&lock->l) != 0)
printk("URGENT: write_unlock(%x:%d) error(PID: %d Process: %s) \n", lock, lock->val, current_pid(current), current_comm(current));
if(lock->val != -1) {
printk("ERROR: write_unlock(%x) on lock with val of %d (PID: %d Process: %s )\n", lock, lock->val, current_pid(current), current_comm(current));
}
//assert(lock->val == -1);
lock->val = lock->line = lock->pid = 0;
lock->pid = -1;
lock->comm[0] = 0;
//STRACE("write_unlock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line);
lock->file = NULL;
modify_locks_held_count_wrapper(-1);
}

void write_unlock(wrlock_t *lock) { // Wrap it. External calls lock, internal calls using _write_unlock() don't -mke
atomic_l_lockf("w_unlock\0", 0);
_write_unlock(lock);
atomic_l_unlockf();
return;
}

void wrlock_init(wrlock_t *lock) {
pthread_rwlockattr_t *pattr = NULL;
Expand Down Expand Up @@ -168,102 +56,5 @@ void lock_destroy(wrlock_t *lock) {
atomic_l_unlockf();
}

void _write_lock(wrlock_t *lock) { // Write lock
loop_lock_write(lock);

// assert(lock->val == 0);
lock->val = -1;
// lock->file = file;
// lock->line = line;
lock->pid = current_pid(current);
if(lock->pid > 9)
strncpy((char *)lock->comm, current_comm(current), 16);
//STRACE("write_lock(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line);
}

void write_lock(wrlock_t *lock) {
atomic_l_lockf("_w_lock", 0);
_write_lock(lock);
atomic_l_unlockf();
}

void handle_lock_error(wrlock_t *lock, const char *func) {
printk("ERROR: %s(%x) tries exceeded %d, dealing with likely deadlock. (Lock held by PID: %d Process: %s) \n",
func, lock, WAIT_MAX_UPPER, lock->pid, lock->comm);

if(pid_get((dword_t)lock->pid) == NULL) {
printk("ERROR: %s(%x) locking PID(%d) is gone for task %s\n", func, lock, lock->pid, lock->comm);
pthread_rwlock_unlock(&lock->l);
} else {
printk("ERROR: %s(%x) locking PID(%d), %s is apparently wedged\n", func, lock, lock->pid, lock->comm);
pthread_rwlock_unlock(&lock->l);
}

if(lock->val > 1) {
lock->val--;
} else if(lock->val == 1) {
_read_unlock(lock);
} else if(lock->val < 0) {
_write_unlock(lock);
}
}

void read_to_write_lock(wrlock_t *lock) { // Try to atomically swap a RO lock to a Write lock. -mke
task_ref_cnt_mod(current, 1);
atomic_l_lockf("rtw_lock\0", 0);
_read_unlock(lock);
_write_lock(lock);
atomic_l_unlockf();
task_ref_cnt_mod(current, -1);
}

void write_to_read_lock(wrlock_t *lock) { // Try to atomically swap a Write lock to a RO lock. -mke
task_ref_cnt_mod(current, 1);
atomic_l_lockf("wtr_lock\0", 0);
_write_unlock(lock);
_read_lock(lock);
atomic_l_unlockf();
task_ref_cnt_mod(current, -1);
}

void write_unlock_and_destroy(wrlock_t *lock) {
task_ref_cnt_mod(current, 1);
atomic_l_lockf("wuad_lock\0", 0);
_write_unlock(lock);
_lock_destroy(lock);
atomic_l_unlockf();
task_ref_cnt_mod(current, -1);
}

void read_unlock_and_destroy(wrlock_t *lock) {
atomic_l_lockf("ruad_lock", 0);
if(trylockw(lock)) // It should be locked, but just in case. Likely masking underlying issue. -mke
_read_unlock(lock);

_lock_destroy(lock);
atomic_l_unlockf();
}

int trylockw(wrlock_t *lock) {
atomic_l_lockf("trylockw\0", 0);
int status = pthread_rwlock_trywrlock(&lock->l);
atomic_l_unlockf();
#if LOCK_DEBUG
if (!status) {
lock->debug.file = file;
lock->debug.line = line;
extern int current_pid(current);
lock->debug.pid = current_pid(current);
}
#endif
if(status == 0) {
modify_locks_held_count_wrapper(1);
//STRACE("trylockw(%x, %s(%d), %s, %d\n", lock, lock->comm, lock->pid, file, line);
lock->pid = current_pid(current);
strncpy(lock->comm, current_comm(current), 16);
}
return status;
}

//#define trylockw(lock) trylockw(lock, __FILE__, __LINE__)

Loading

0 comments on commit f6f3cf6

Please sign in to comment.