-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathblocking.c
198 lines (169 loc) · 6.23 KB
/
blocking.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
#include "blocking.h"
extern task_list_t running;
extern task_list_t ready[MAX_NUM_TASKS];
extern uint32_t ready_mask;
extern bool running_handled;
os_semaphore_t sem_list[MAX_SEMAPHORES];
os_error_t os_new_semaphore(os_semaphore_id_t *sem_id, uint32_t init_count) {
// used to keep track of number of semaphores and mutexes
static uint32_t num_sems = 0;
// all static memory so there is a fixed number of semaphores allowed
if (num_sems >= MAX_SEMAPHORES) {
return OS_ERR_MEM;
}
// set the id to the right index in the semaphore array
*sem_id = num_sems;
sem_list[num_sems].count = init_count;
// set the blocked lists to be empty
for (int i = 0; i < NUM_PRIORITIES; i++) {
sem_list[num_sems].blocked[i].head = NULL;
sem_list[num_sems].blocked[i].tail = NULL;
}
// lists start empty
sem_list[num_sems].blocked_mask = 0;
num_sems++;
return OS_OK;
}
void os_wait(os_semaphore_id_t sem_id) {
disable_irq();
// check if we need to block first
while (sem_list[sem_id].count == 0) {
enqueue(running.head, sem_list[sem_id].blocked + running.head->priority,
&sem_list[sem_id].blocked_mask);
// running task has been put somewhere, so scheduler shouldn't also add
// to ready
running_handled = true;
os_schedule();
enable_irq();
// interrupt will trigger here if a context switch is needed
disable_irq();
}
sem_list[sem_id].count--;
enable_irq();
}
void os_signal(os_semaphore_id_t sem_id) {
disable_irq();
sem_list[sem_id].count++;
// get the highest priority blocked task list
task_list_t *list = highest_priority_list(sem_list[sem_id].blocked,
sem_list[sem_id].blocked_mask);
// check if there are any tasks currently blocked on it
// if list is NULL there is nothing blocked
if (list != NULL) {
// add its head to the appropriate ready list
tcb_t *task = dequeue(list, &sem_list[sem_id].blocked_mask);
enqueue(task, ready + task->priority, &ready_mask);
// only call the scheduler if the unblocked task is higher prio
if (running.head->priority > task->priority) {
os_schedule();
}
}
enable_irq();
}
os_error_t os_new_mutex(os_semaphore_id_t *mutex_id, os_mutex_attribs_t attribs) {
os_error_t status = os_new_semaphore(mutex_id, 1);
if (status != OS_OK) {
return status;
}
// update the mutex specific elements of the struct
sem_list[*mutex_id].current_owner = NULL;
sem_list[*mutex_id].owner_orig_prio = LOWEST_PRIORITY;
sem_list[*mutex_id].mode = attribs;
return OS_OK;
}
void os_acquire(os_mutex_id_t mutex_id) {
disable_irq();
// if you already have it acquired, you good, keep it up man!
if (sem_list[mutex_id].mode & MUTEX_MODE_OWNER &&
sem_list[mutex_id].current_owner != NULL &&
sem_list[mutex_id].current_owner->id == running.head->id) {
enable_irq();
return;
}
// check if we need to block first
while (sem_list[mutex_id].count == 0) {
// add this task to correct blocked list
enqueue(running.head, sem_list[mutex_id].blocked + running.head->priority,
&sem_list[mutex_id].blocked_mask);
// if prio inheritance is enabled
// see if the current mutex owner needs to acquire higher prio
if (sem_list[mutex_id].mode & MUTEX_MODE_INHER &&
running.head->priority < sem_list[mutex_id].current_owner->priority) {
// temporarily change its priority
sem_list[mutex_id].current_owner->priority = running.head->priority;
// add in lower priority owner to ready list again, but at new prio
enqueue(sem_list[mutex_id].current_owner,
ready + running.head->priority, &ready_mask);
}
// let scheduler know it should not add this task to ready list
running_handled = true;
os_schedule();
enable_irq();
// context switch fill fire now
disable_irq();
}
sem_list[mutex_id].count--;
// we have now been unblocked/never blocked, so need to change ownership
sem_list[mutex_id].current_owner = running.head;
sem_list[mutex_id].owner_orig_prio = running.head->priority;
enable_irq();
}
os_error_t os_release(os_mutex_id_t mutex_id) {
// if ownership is enabled, fail and return if this is not the owner
disable_irq();
if (sem_list[mutex_id].mode & MUTEX_MODE_OWNER &&
sem_list[mutex_id].current_owner != NULL &&
running.head->id != sem_list[mutex_id].current_owner->id) {
enable_irq();
return OS_ERR_PERM;
}
// successfully released
sem_list[mutex_id].count++;
// reset owner to NULL if ownership is enabled
if (sem_list[mutex_id].mode & MUTEX_MODE_OWNER) {
sem_list[mutex_id].current_owner = NULL;
}
// get the highest priority blocked task list
task_list_t *list = highest_priority_list(sem_list[mutex_id].blocked,
sem_list[mutex_id].blocked_mask);
// if priority inheritance is enabled
// set its prio to the highest blocked task on this mutex
// or its original prio, whatever prio is highest
if (sem_list[mutex_id].mode & MUTEX_MODE_INHER) {
// save the prio val coming into this part
uint32_t init_prio = running.head->priority;
// fix the prio value to either original or next highest
// currently blocked on the mutex
if (list != NULL && list->head->priority < running.head->priority) {
running.head->priority = list->head->priority;
} else {
running.head->priority = sem_list[mutex_id].owner_orig_prio;
}
// if this section changes the priority at all, this means the tcb
// exists already in the ready lists at the original prio
// so it should NOT get added back to ready by the scheduler
if (init_prio != running.head->priority) {
// if it is still not back to its orig prio, that means another
// task was also higher prio and blocked on it,
// so it needs to be added onto that prio ready list
if (running.head->priority != sem_list[mutex_id].owner_orig_prio) {
enqueue(running.head, ready + running.head->priority, &ready_mask);
}
// tell the scheduler to NOT add running task to ready list
running_handled = true;
}
}
// check if there are any tasks currently blocked on it
// if list is NULL there is nothing blocked
if (list != NULL) {
// add its head to the appropriate ready list
tcb_t *task = dequeue(list, &sem_list[mutex_id].blocked_mask);
enqueue(task, ready + task->priority, &ready_mask);
// only call the scheduler if the unblocked task is higher prio
if (running.head->priority > task->priority) {
os_schedule();
}
}
enable_irq();
return OS_OK;
}