aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorChristian Cunningham <cc@localhost>2022-02-19 16:47:22 -0700
committerChristian Cunningham <cc@localhost>2022-02-19 16:47:22 -0700
commit871d1663aba635dd41c48e7def0a58c5267fd1e7 (patch)
tree0f220985396373849f596b3a271d718a413aa774 /src
parent9de5e07bc3920d4b03903eb5b78847973075059b (diff)
Commented Scheduler
Temp comment out old mutex locking
Diffstat (limited to 'src')
-rw-r--r--src/sys/schedule.c54
-rw-r--r--src/util/mutex.c74
2 files changed, 72 insertions, 56 deletions
diff --git a/src/sys/schedule.c b/src/sys/schedule.c
index 347b91a..b068963 100644
--- a/src/sys/schedule.c
+++ b/src/sys/schedule.c
@@ -55,23 +55,6 @@ struct RStack get_stack(void)
return r;
}
-void draw_stacks(void)
-{
- unsigned long ioff = 0;
- unsigned long yoff = STACK_DRAW_YOFF;
- for(int i = 0; i < MAX_THREADS; i++) {
- if(stacks_table[i])
- draw_cbox(ioff, yoff, STACK_DRAW_SIZE, STACK_DRAW_SIZE, 0xFFFFFF);
- else
- draw_cbox(ioff, yoff, STACK_DRAW_SIZE, STACK_DRAW_SIZE, 0x000000);
- ioff += STACK_DRAW_SIZE;
- if(ioff % STACK_DRAW_WIDTH == 0) {
- yoff += STACK_DRAW_SIZE;
- ioff = 0;
- }
- }
-}
-
void add_thread(void* pc, void* arg, unsigned char priority)
{
//void* sp = get_stack();
@@ -79,6 +62,7 @@ void add_thread(void* pc, void* arg, unsigned char priority)
//struct Thread* thread = (struct Thread*)malloca(sizeof(struct Thread), 4);
struct Thread* thread = (struct Thread*)kmalloc(sizeof(struct Thread));
thread->pc = pc;
+ // Valid stack has been obtained for this thread
if (r.sp) {
thread->sp_base = r.idx;
unsigned long* argp = r.sp;
@@ -89,26 +73,35 @@ void add_thread(void* pc, void* arg, unsigned char priority)
thread->sp = (void*)argp;
thread->status = THREAD_READY;
sched_stack_count++;
- } else {
+ }
+ // Couldn't allocate a proper stack
+ else {
thread->sp_base = r.idx;
thread->sp = r.sp;
thread->status = THREAD_SERROR;
}
thread->mptr = (void*)0;
thread->pid = nextpid++;
+ // Reset next pid on overflow
+ if (nextpid < FIRST_AVAIL_PID) {
+ nextpid = FIRST_AVAIL_PID;
+ }
thread->priority = priority % PRIORITIES;
thread->old_priority = -1;
thread->preempt = 0;
// Add Thread* to scheduler's appropriate buffer
struct ThreadQueues* tq = &scheduler.thread_queues[thread->priority];
struct ThreadRotBuffer* trb;
+ // Add to stack error queue if stack was not obtained
if (thread->status == THREAD_SERROR) {
trb = &tq->serror;
- } else {
+ }
+ else {
trb = &tq->ready;
}
trb->queue[trb->woffset++] = thread;
trb->woffset %= TQUEUE_MAX;
+ // Schedule if this was called in usermode
unsigned long mode = getmode() & 0x1F;
if (mode == 0x10) {
sys0(SYS_YIELD_HIGH);
@@ -154,12 +147,14 @@ void uart_scheduler(void)
struct Thread* next_thread(void)
{
struct Thread* next = &usrloopthread;
+ // Recurse through all priorities to try to find a ready thread
for (int p = 0; p < PRIORITIES; p++) {
struct ThreadRotBuffer* rb = &scheduler.thread_queues[p].ready;
if (rb->roffset == rb->woffset)
continue;
return rb->queue[rb->roffset];
}
+ // No thread found, use basic usrloopthread while waiting for new thread
return next;
}
@@ -171,8 +166,11 @@ void* get_rthread_roffset(void)
void yield(void)
{
struct Thread* rthread = scheduler.rthread;
+ // usrloopthread should not be yielded
if (rthread == &usrloopthread)
return;
+ // Put current thread at the end of its ready queue,
+ // thus any threads of the same priority can be run first
unsigned char priority = rthread->priority;
struct ThreadRotBuffer* trb = &scheduler.thread_queues[priority].ready;
trb->roffset += 1;
@@ -184,40 +182,51 @@ void yield(void)
void sched_mutex_yield(void* m)
{
struct Thread* rthread = scheduler.rthread;
+ // usrloopthread should not be yielded
if (rthread == &usrloopthread)
return;
unsigned char priority = rthread->priority;
+ // Signify which lock this thread is waiting for
rthread->mptr = m;
struct ThreadRotBuffer* trbb = &scheduler.thread_queues[priority].ready;
struct ThreadRotBuffer* trbm = &scheduler.thread_queues[priority].mwait;
+ // Move to next thread in the current thread priority's ready queue
trbb->roffset += 1;
trbb->roffset %= TQUEUE_MAX;
+ // Add thread to waiting queue
trbm->queue[trbm->woffset++] = rthread;
trbm->woffset %= TQUEUE_MAX;
// Find the thread with the mutex
struct ThreadQueues* tq;
+ // Search through each priority
for (int i = 0; i < PRIORITIES; i++) {
tq = &scheduler.thread_queues[i];
struct ThreadRotBuffer* trb = &tq->ready;
+ // Search through each queue at the current priority
for (int i = 0; i < TQUEUE_CNT; i++) {
unsigned long roffset = trb->roffset;
unsigned long woffset = trb->woffset;
+ // Search through the threads
while(roffset != woffset) {
// Found thread
if (trb->queue[roffset]->pid == ((struct Mutex*)m)->pid) {
// Promote the thread to the new priority
if (trb->queue[roffset]->priority > priority) {
trbb->queue[trbb->woffset++] = trb->queue[roffset];
+ // Set the old priority if not set
if(trb->queue[roffset]->old_priority == 0xFF)
trb->queue[roffset]->old_priority = trb->queue[roffset]->priority;
+ // Promote the priority
trb->queue[roffset]->priority = priority;
trbb->woffset %= TQUEUE_MAX;
unsigned long coffset = roffset;
+ // Fill gap where the thread was removed
while (coffset != woffset) {
trb->queue[coffset] = trb->queue[(coffset+1)%TQUEUE_MAX];
coffset++;
coffset %= TQUEUE_MAX;
}
+ // Move the woffset back since the gap was filled in
if (trb->woffset == 0)
trb->woffset = TQUEUE_MAX-1;
else
@@ -228,6 +237,7 @@ void sched_mutex_yield(void* m)
roffset++;
roffset %= TQUEUE_MAX;
}
+ // Check next queue in given priority
trb += 1;
}
}
@@ -235,11 +245,15 @@ void sched_mutex_yield(void* m)
void sched_mutex_resurrect(void* m)
{
+ // Look through each priority
for (int p = 0; p < PRIORITIES; p++) {
struct ThreadRotBuffer* trbm = &scheduler.thread_queues[p].mwait;
unsigned long roffset = trbm->roffset;
+ // Look through the lock wait queue
while (roffset != trbm->woffset) {
+ // Check if the thread is waiting for the released mutex
if (trbm->queue[roffset]->mptr == m) {
+ // Ressurect the thread
trbm->queue[roffset]->mptr = 0;
struct ThreadRotBuffer* trb = &scheduler.thread_queues[trbm->queue[roffset]->priority].ready;
trb->queue[trb->woffset++] = trbm->queue[roffset];
@@ -251,12 +265,14 @@ void sched_mutex_resurrect(void* m)
coffset++;
coffset %= TQUEUE_MAX;
}
+ // Move the woffset back since the space was filled
if(trbm->woffset == 0)
trbm->woffset = TQUEUE_MAX-1;
else
trbm->woffset--;
// Move the read pointer ahead
struct Thread* rthread = scheduler.rthread;
+ // Move the current thread to its old priority if it was promoted earlier
if (rthread->old_priority != 0xFF) {
struct ThreadRotBuffer* rtrb = &scheduler.thread_queues[rthread->priority].ready;
struct ThreadRotBuffer* ntrb = &scheduler.thread_queues[rthread->old_priority].ready;
diff --git a/src/util/mutex.c b/src/util/mutex.c
index 5583d09..995ef62 100644
--- a/src/util/mutex.c
+++ b/src/util/mutex.c
@@ -4,43 +4,43 @@
#include <sys/schedule.h>
#include <util/mutex.h>
-unsigned char lock_mutex(struct Mutex* m, unsigned long pid)
-{
- if (m->pid == NULL_PID) {
- // Use currently running thread's PID if no pid given
- if (pid == 0) {
- struct Thread* thread = scheduler.rthread;
- atm_lock(thread->pid, &m->pid);
- } else {
- atm_lock(pid, &m->pid);
- }
- return 0;
- }
- struct Thread* thread = scheduler.rthread;
- thread->status = THREAD_MWAIT;
- thread->mptr = m;
- return 1;
-}
-
-// Eventually, there will need to be a hook into the scheduling mechanism
-// that checks the currently running process and check that against the
-// mutex's pid lock
-unsigned char release_mutex(struct Mutex* m, unsigned long pid)
-{
- // Use current thread's PID if no pid
- if (pid == 0) {
- struct Thread* thread = scheduler.rthread;
- if (m->pid == thread->pid) {
- atm_release(&m->pid);
- return 0;
- }
- }
- else if (m->pid == pid) {
- atm_release(&m->pid);
- return 0;
- }
- return 1;
-}
+///unsigned char lock_mutex(struct Mutex* m, unsigned long pid)
+///{
+/// if (m->pid == NULL_PID) {
+/// // Use currently running thread's PID if no pid given
+/// if (pid == 0) {
+/// struct Thread* thread = scheduler.rthread;
+/// atm_lock(thread->pid, &m->pid);
+/// } else {
+/// atm_lock(pid, &m->pid);
+/// }
+/// return 0;
+/// }
+/// struct Thread* thread = scheduler.rthread;
+/// thread->status = THREAD_MWAIT;
+/// thread->mptr = m;
+/// return 1;
+///}
+///
+///// Eventually, there will need to be a hook into the scheduling mechanism
+///// that checks the currently running process and check that against the
+///// mutex's pid lock
+///unsigned char release_mutex(struct Mutex* m, unsigned long pid)
+///{
+/// // Use current thread's PID if no pid
+/// if (pid == 0) {
+/// struct Thread* thread = scheduler.rthread;
+/// if (m->pid == thread->pid) {
+/// atm_release(&m->pid);
+/// return 0;
+/// }
+/// }
+/// else if (m->pid == pid) {
+/// atm_release(&m->pid);
+/// return 0;
+/// }
+/// return 1;
+///}
struct Mutex* create_mutex(void* addr)
{