From 0b812480a8b81607802cdceb273b69680cd5082c Mon Sep 17 00:00:00 2001 From: Christian Cunningham Date: Fri, 11 Mar 2022 13:41:49 -0800 Subject: Statically Allocated Scheduling --- include/sys/schedule.h | 25 ++-- src/cpu/irq.c | 4 +- src/exceptions/svc.S | 2 +- src/sys/schedule.S | 23 ++-- src/sys/schedule.c | 329 +++++++++++++++++++++++++++++-------------------- src/tests/test.c | 26 ++-- 6 files changed, 237 insertions(+), 172 deletions(-) diff --git a/include/sys/schedule.h b/include/sys/schedule.h index 397ab2e..0205f54 100644 --- a/include/sys/schedule.h +++ b/include/sys/schedule.h @@ -30,24 +30,29 @@ struct Thread { unsigned short s_reserved; }; -struct ThreadRotBuffer { - unsigned int roffset; - unsigned int woffset; - struct Thread* queue[TQUEUE_MAX]; +struct ThreadEntry { + struct Thread* thread; + struct ThreadEntry* prev; + struct ThreadEntry* next; }; -struct ThreadQueues { - struct ThreadRotBuffer ready; - struct ThreadRotBuffer mwait; - struct ThreadRotBuffer swait; +struct ThreadEntryIterator { + struct ThreadEntry* entry; +}; + +struct ThreadQueue { + struct ThreadEntry entry[TQUEUE_MAX]; + struct ThreadEntryIterator read; + struct ThreadEntryIterator write; }; struct Scheduler { struct Thread* rthread; - struct ThreadQueues thread_queues[PRIORITIES]; + struct ThreadQueue ready[PRIORITIES]; + struct ThreadQueue mwait[PRIORITIES]; + struct ThreadQueue swait[PRIORITIES]; }; - void init_scheduler(void); void add_thread(void* pc, void* arg, unsigned char priority); void uart_scheduler(void); diff --git a/src/cpu/irq.c b/src/cpu/irq.c index d9d50d8..51bc7f2 100644 --- a/src/cpu/irq.c +++ b/src/cpu/irq.c @@ -47,7 +47,7 @@ void c_irq_handler(void) } // Add task to handle the data else { - //add_thread(handle_data, (void*)data, 1); + add_thread(handle_data, (void*)data, 1); } return; } @@ -60,7 +60,7 @@ void c_irq_handler(void) static char timer_lock = 0; if (!timer_lock) { timer_lock = 1; - add_thread(test_entry, 0, 2); + //add_thread(test_entry, 0, 2); timer_lock = 0; } *nexttime = *timer_chi + 30000000; diff --git a/src/exceptions/svc.S b/src/exceptions/svc.S index 1b0cc1f..3fb534e 100644 --- a/src/exceptions/svc.S +++ b/src/exceptions/svc.S @@ -39,7 +39,7 @@ svc_000003: // Free Thread in Table ldr r1, [r2, #0x1c] // thread offset mov r0, #0 ldr r2, =thread_table - add r2, r1 + add r2, r1, r2 str r0, [r2] b svc_exit svc_000004: // Lock Lock (usr_r0 = struct Lock*) diff --git a/src/sys/schedule.S b/src/sys/schedule.S index d7301fc..903e967 100644 --- a/src/sys/schedule.S +++ b/src/sys/schedule.S @@ -26,17 +26,18 @@ schedule: .globl cleanup cleanup: - // roffset++ - bl get_rthread_roffset - ldr r1, [r0, #0] - add r1, #1 - cmp r1, #0x100 /* TQUEUE_MAX */ - blo 1f - mov r1, #0 -1: - str r1, [r0, #0] - // Free thread in table - svc #3 + bl c_cleanup +// // roffset++ +// bl get_rthread_roffset +// ldr r1, [r0, #0] +// add r1, #1 +// cmp r1, #0x100 /* TQUEUE_MAX */ +// blo 1f +// mov r1, #0 +//1: +// str r1, [r0, #0] +// // Free thread in table +// svc #3 // usrloop -> rthread ldr r3, =scheduler ldr r2, =usrloopthread diff --git a/src/sys/schedule.c b/src/sys/schedule.c index 8c65c6f..3050e6e 100644 --- a/src/sys/schedule.c +++ b/src/sys/schedule.c @@ -20,31 +20,58 @@ void init_scheduler(void) usrloopthread.priority = -1; usrloopthread.old_priority = -1; usrloopthread.status = THREAD_READY; + usrloopthread.offset = -1; scheduler.rthread = &usrloopthread; - // Initialize Rotating Buffers - struct ThreadQueues* tq; - for (int i = 0; i < PRIORITIES; i++) { - tq = &scheduler.thread_queues[i]; - struct ThreadRotBuffer* trb = &tq->ready; - for (int i = 0; i < TQUEUE_CNT; i++) { - trb->roffset = 0; - trb->woffset = 0; - for (int j = 0; j < TQUEUE_MAX; j++) - trb->queue[j] = 0; - trb += 1; + + // Initialize Scheduling Queues + for (unsigned long p = 0; p < PRIORITIES; p++) { + for (unsigned long t = 0; t < TQUEUE_MAX; t++) { + /// Ready Queue + scheduler.ready[p].entry[t].thread = 0; + if (t == 0) + scheduler.ready[p].entry[t].prev = &scheduler.ready[p].entry[TQUEUE_MAX-1]; + else + scheduler.ready[p].entry[t].prev = &scheduler.ready[p].entry[(t-1)%TQUEUE_MAX]; + scheduler.ready[p].entry[t].next = &scheduler.ready[p].entry[(t+1)%TQUEUE_MAX]; + /// Mutex Wait Queue + scheduler.mwait[p].entry[t].thread = 0; + if (t == 0) + scheduler.mwait[p].entry[t].prev = &scheduler.mwait[p].entry[TQUEUE_MAX-1]; + else + scheduler.mwait[p].entry[t].prev = &scheduler.mwait[p].entry[(t-1)%TQUEUE_MAX]; + scheduler.mwait[p].entry[t].next = &scheduler.mwait[p].entry[(t+1)%TQUEUE_MAX]; + /// Signal Wait Queue + scheduler.swait[p].entry[t].thread = 0; + if (t == 0) + scheduler.swait[p].entry[t].prev = &scheduler.swait[p].entry[TQUEUE_MAX-1]; + else + scheduler.swait[p].entry[t].prev = &scheduler.swait[p].entry[(t-1)%TQUEUE_MAX]; + scheduler.swait[p].entry[t].next = &scheduler.swait[p].entry[(t+1)%TQUEUE_MAX]; } + // Ready Queue + scheduler.ready[p].read.entry = &scheduler.ready[p].entry[0]; + scheduler.ready[p].write.entry = &scheduler.ready[p].entry[0]; + // Mutex Wait Queue + scheduler.mwait[p].read.entry = &scheduler.mwait[p].entry[0]; + scheduler.mwait[p].write.entry = &scheduler.mwait[p].entry[0]; + // Signal Wait Queue + scheduler.swait[p].read.entry = &scheduler.swait[p].entry[0]; + scheduler.swait[p].write.entry = &scheduler.swait[p].entry[0]; } + // Initialize nextpid nextpid = FIRST_AVAIL_PID; + // Initialize Threads - Stack Base and Offsets for (unsigned long i = 0; i < MAX_THREADS; i++) { struct Thread* t = &threads[i]; t->offset = i; t->sp_base = 0x20000000 - STACK_SIZE*i; + thread_table[i] = 0; } } -struct Thread* get_available_thread(void) +struct Thread* get_unused_thread(void) { for(unsigned long i = 0; i < MAX_THREADS; i++) { if (thread_table[i] == 0) @@ -55,7 +82,7 @@ struct Thread* get_available_thread(void) void add_thread(void* pc, void* arg, unsigned char priority) { - struct Thread* thread = get_available_thread(); + struct Thread* thread = get_unused_thread(); thread_table[thread->offset] = 1; thread->pc = pc; unsigned long* argp = (void*)thread->sp_base; @@ -77,13 +104,17 @@ void add_thread(void* pc, void* arg, unsigned char priority) thread->priority = priority; thread->old_priority = -1; thread->preempt = 0; - // Add Thread* to scheduler's appropriate buffer - struct ThreadQueues* tq = &scheduler.thread_queues[thread->priority]; - struct ThreadRotBuffer* trb; - // Add to stack error queue if stack was not obtained - trb = &tq->ready; - trb->queue[trb->woffset++] = thread; - trb->woffset %= TQUEUE_MAX; + // Get the Ready Queue + struct ThreadQueue* ready_queue = &scheduler.ready[thread->priority]; + // Get the write pointer + struct ThreadEntryIterator* ready_write = &ready_queue->write; + // TODO: Check if it is possible to add + //if (ready_write->queue->thread == 0) { + // Add the thread to the write pointer + ready_write->entry->thread = thread; + // Move the write pointer to the next entry + ready_write->entry = ready_write->entry->next; + //} // Schedule if this was called in usermode unsigned long mode = getmode() & 0x1F; if (mode == 0x10) { @@ -96,32 +127,41 @@ void uart_scheduler(void) uart_string("Scheduler Info\n==============\nCurrent\n"); uart_hex((unsigned long)scheduler.rthread); uart_char(' '); - kmemshow32((void*)scheduler.rthread, 7); - struct ThreadQueues* tq; + kmemshow32((void*)scheduler.rthread, 9); for(int p = 0; p < PRIORITIES; p++) { uart_string("Priority "); uart_10(p); uart_char('\n'); - tq = &scheduler.thread_queues[p]; - struct ThreadRotBuffer* trb; - trb = &tq->ready; - for(int i = 0; i < TQUEUE_CNT; i++) { - if (trb->roffset == trb->woffset) { - trb += 1; - continue; - } - uart_string("Queue "); - uart_10(i); - uart_char('\n'); - unsigned long roffset = trb->roffset; - while (roffset != trb->woffset) { - uart_hex((unsigned long)trb->queue[roffset]); - uart_char(' '); - kmemshow32((void*)trb->queue[roffset], 7); - roffset++; - roffset %= TQUEUE_MAX; - } - trb += 1; + struct ThreadEntryIterator iter; + + struct ThreadQueue* rq = &scheduler.ready[p]; + uart_string("Ready Queue\n"); + iter.entry = rq->read.entry; + while (iter.entry != rq->write.entry) { + uart_hex((unsigned long)iter.entry->thread); + uart_char(' '); + kmemshow32((void*)iter.entry->thread, 9); + iter.entry = iter.entry->next; + } + + struct ThreadQueue* mq = &scheduler.mwait[p]; + uart_string("Mutex Wait Queue\n"); + iter.entry = mq->read.entry; + while (iter.entry != mq->write.entry) { + uart_hex((unsigned long)iter.entry->thread); + uart_char(' '); + kmemshow32((void*)iter.entry->thread, 9); + iter.entry = iter.entry->next; + } + + struct ThreadQueue* sq = &scheduler.swait[p]; + uart_string("Signal Wait Queue\n"); + iter.entry = sq->read.entry; + while (iter.entry != sq->write.entry) { + uart_hex((unsigned long)iter.entry->thread); + uart_char(' '); + kmemshow32((void*)iter.entry->thread, 9); + iter.entry = iter.entry->next; } } uart_string("==============\n"); @@ -129,21 +169,27 @@ void uart_scheduler(void) struct Thread* next_thread(void) { - struct Thread* next = &usrloopthread; // Recurse through all priorities to try to find a ready thread for (int p = 0; p < PRIORITIES; p++) { - struct ThreadRotBuffer* rb = &scheduler.thread_queues[p].ready; - if (rb->roffset == rb->woffset) + struct ThreadQueue* rq = &scheduler.ready[p]; + if (rq->read.entry == rq->write.entry) continue; - return rb->queue[rb->roffset]; + return rq->read.entry->thread; } // No thread found, use basic usrloopthread while waiting for new thread - return next; + return &usrloopthread; } -void* get_rthread_roffset(void) +void c_cleanup(void) { - return &scheduler.thread_queues[scheduler.rthread->priority].ready.roffset; + struct Thread* rt = scheduler.rthread; + struct ThreadEntryIterator* read = &scheduler.ready[rt->priority].read; + // Clear the thread pointer + read->entry->thread = 0; + // Move read pointer forward + read->entry = read->entry->next; + // Mark Thread Unused + thread_table[rt->offset] = 0; } void yield(void) @@ -155,11 +201,10 @@ void yield(void) // Put current thread at the end of its ready queue, // thus any threads of the same priority can be run first unsigned char priority = rthread->priority; - struct ThreadRotBuffer* trb = &scheduler.thread_queues[priority].ready; - trb->roffset += 1; - trb->roffset %= TQUEUE_MAX; - trb->queue[trb->woffset++] = rthread; - trb->woffset %= TQUEUE_MAX; + struct ThreadQueue* trq = &scheduler.ready[priority]; + trq->read.entry = trq->read.entry->next; + trq->write.entry->thread = rthread; + trq->write.entry = trq->write.entry->next; } void sched_mutex_yield(void* m) @@ -171,57 +216,52 @@ void sched_mutex_yield(void* m) unsigned char priority = rthread->priority; // Signify which lock this thread is waiting for rthread->mptr = m; - struct ThreadRotBuffer* trbb = &scheduler.thread_queues[priority].ready; - struct ThreadRotBuffer* trbm = &scheduler.thread_queues[priority].mwait; + struct ThreadQueue* trq = &scheduler.ready[priority]; + struct ThreadQueue* tmq = &scheduler.mwait[priority]; // Move to next thread in the current thread priority's ready queue - trbb->roffset += 1; - trbb->roffset %= TQUEUE_MAX; + trq->read.entry = trq->read.entry->next; // Add thread to waiting queue - trbm->queue[trbm->woffset++] = rthread; - trbm->woffset %= TQUEUE_MAX; + tmq->write.entry->thread = rthread; + tmq->write.entry = tmq->write.entry->next; // Find the thread with the mutex - struct ThreadQueues* tq; + struct ThreadEntryIterator iter; // Search through each priority - for (int i = 0; i < PRIORITIES; i++) { - tq = &scheduler.thread_queues[i]; - struct ThreadRotBuffer* trb = &tq->ready; - // Search through each queue at the current priority - for (int i = 0; i < TQUEUE_CNT; i++) { - unsigned long roffset = trb->roffset; - unsigned long woffset = trb->woffset; - // Search through the threads - while(roffset != woffset) { - // Found thread - if (trb->queue[roffset]->pid == ((struct Mutex*)m)->pid) { - // Promote the thread to the new priority - if (trb->queue[roffset]->priority > priority) { - trbb->queue[trbb->woffset++] = trb->queue[roffset]; - // Set the old priority if not set - if(trb->queue[roffset]->old_priority == 0xFF) - trb->queue[roffset]->old_priority = trb->queue[roffset]->priority; - // Promote the priority - trb->queue[roffset]->priority = priority; - trbb->woffset %= TQUEUE_MAX; - unsigned long coffset = roffset; - // Fill gap where the thread was removed - while (coffset != woffset) { - trb->queue[coffset] = trb->queue[(coffset+1)%TQUEUE_MAX]; - coffset++; - coffset %= TQUEUE_MAX; - } - // Move the woffset back since the gap was filled in - if (trb->woffset == 0) - trb->woffset = TQUEUE_MAX-1; - else - trb->woffset--; - } - return; + for (unsigned long p = 0; p < PRIORITIES; p++) { + struct ThreadQueue* rq = &scheduler.ready[p]; + iter = rq->read; + while (iter.entry != rq->write.entry) { + // Check if it is the Mutex's thread + if (iter.entry->thread->pid == ((struct Mutex*)m)->pid) { + // Promote the thread's priority + if (iter.entry->thread->priority > priority) { + // Add it to the higher priority queue + scheduler.ready[priority].write.entry->thread = iter.entry->thread; + // Move the Write Iterator Forward + scheduler.ready[priority].write.entry = scheduler.ready[priority].write.entry->next; + // Set the old priority if not set + if (iter.entry->thread->old_priority == 0xFF) + iter.entry->thread->old_priority = p; + + // Set the new priority + iter.entry->thread->priority = priority; + + // Prune the old priority entry + struct ThreadEntry* prev = iter.entry->prev; + struct ThreadEntry* next = iter.entry->next; + prev->next = next; + next->prev = prev; + // Put the newly freed entry at end of write queue + //iter.entry->prev = rq->write.entry; + //iter.entry->next = rq->write.entry->next; + if (iter.entry == rq->read.entry) + rq->read.entry = rq->read.entry->next; + rq->write.entry->next->prev = iter.entry; + rq->write.entry->next = iter.entry; + iter.entry->thread = 0; } - roffset++; - roffset %= TQUEUE_MAX; + return; } - // Check next queue in given priority - trb += 1; + iter.entry = iter.entry->next; } } } @@ -230,49 +270,68 @@ void sched_mutex_resurrect(void* m) { // Look through each priority for (int p = 0; p < PRIORITIES; p++) { - struct ThreadRotBuffer* trbm = &scheduler.thread_queues[p].mwait; - unsigned long roffset = trbm->roffset; - // Look through the lock wait queue - while (roffset != trbm->woffset) { - // Check if the thread is waiting for the released mutex - if (trbm->queue[roffset]->mptr == m) { - // Ressurect the thread - trbm->queue[roffset]->mptr = 0; - struct ThreadRotBuffer* trb = &scheduler.thread_queues[trbm->queue[roffset]->priority].ready; - trb->queue[trb->woffset++] = trbm->queue[roffset]; - trb->woffset %= TQUEUE_MAX; - // Copy all next backward to fill space - unsigned long coffset = roffset; - while (coffset != trbm->woffset) { - trbm->queue[coffset] = trbm->queue[(coffset+1)%TQUEUE_MAX]; - coffset++; - coffset %= TQUEUE_MAX; - } - // Move the woffset back since the space was filled - if(trbm->woffset == 0) - trbm->woffset = TQUEUE_MAX-1; - else - trbm->woffset--; - // Move the read pointer ahead + /// struct ThreadRotBuffer* trbm = &scheduler.thread_queues[p].mwait; + struct ThreadQueue* tmq = &scheduler.mwait[p]; + /// unsigned long roffset = trbm->roffset; + struct ThreadEntryIterator iter; + iter = tmq->read; + /// // Look through the lock wait queue + /// while (roffset != trbm->woffset) { + while (iter.entry != tmq->write.entry) { + /// // Check if the thread is waiting for the released mutex + /// if (trbm->queue[roffset]->mptr == m) { + if (iter.entry->thread->mptr == m) { + /// // Ressurect the thread + /// trbm->queue[roffset]->mptr = 0; + iter.entry->thread->mptr = 0; + /// struct ThreadRotBuffer* trb = &scheduler.thread_queues[trbm->queue[roffset]->priority].ready; + /// trb->queue[trb->woffset++] = trbm->queue[roffset]; + scheduler.ready[iter.entry->thread->priority].write.entry->thread = iter.entry->thread; + /// trb->woffset %= TQUEUE_MAX; + scheduler.ready[iter.entry->thread->priority].write.entry = scheduler.ready[iter.entry->thread->priority].write.entry->next; + /// // Move the read pointer ahead + if (iter.entry == tmq->read.entry) + tmq->read.entry = tmq->read.entry->next; + iter.entry->prev->next = iter.entry->next; + iter.entry->next->prev = iter.entry->prev; + tmq->write.entry->next->prev = iter.entry; + tmq->write.entry->next = iter.entry; + /// struct Thread* rthread = scheduler.rthread; struct Thread* rthread = scheduler.rthread; - // Move the current thread to its old priority if it was promoted earlier + /// // Move the current thread to its old priority if it was promoted earlier + /// if (rthread->old_priority != 0xFF) { if (rthread->old_priority != 0xFF) { - struct ThreadRotBuffer* rtrb = &scheduler.thread_queues[rthread->priority].ready; - struct ThreadRotBuffer* ntrb = &scheduler.thread_queues[rthread->old_priority].ready; - rtrb->roffset++; - rtrb->roffset %= TQUEUE_MAX; - if (ntrb->roffset == 0) - ntrb->roffset = TQUEUE_MAX-1; - else - ntrb->roffset--; - ntrb->queue[ntrb->roffset] = rthread; + /// struct ThreadRotBuffer* rtrb = &scheduler.thread_queues[rthread->priority].ready; + struct ThreadQueue* cur_trq = &scheduler.ready[rthread->priority]; + /// struct ThreadRotBuffer* ntrb = &scheduler.thread_queues[rthread->old_priority].ready; + struct ThreadQueue* old_trq = &scheduler.ready[rthread->old_priority]; + // Prune from the current ready queue + /// rtrb->roffset++; + /// rtrb->roffset %= TQUEUE_MAX; + cur_trq->read.entry->thread = 0; + cur_trq->read.entry = cur_trq->read.entry->next; + // Prepend to original ready queue + /// if (ntrb->roffset == 0) + /// ntrb->roffset = TQUEUE_MAX-1; + /// else + /// ntrb->roffset--; + /// ntrb->queue[ntrb->roffset] = rthread; + old_trq->read.entry = old_trq->read.entry->prev; + old_trq->read.entry->thread = rthread; + /// rthread->priority = rthread->old_priority; + /// rthread->old_priority = -1; rthread->priority = rthread->old_priority; rthread->old_priority = -1; + /// } } + /// return; return; + /// } } - roffset++; - roffset %= TQUEUE_MAX; + /// roffset++; + /// roffset %= TQUEUE_MAX; + iter.entry = iter.entry->next; + /// } } } } diff --git a/src/tests/test.c b/src/tests/test.c index 7f18627..2a5a100 100644 --- a/src/tests/test.c +++ b/src/tests/test.c @@ -1,5 +1,5 @@ #include -//#include +#include #include #include #include @@ -47,48 +47,48 @@ void ctest4(void); void ctest1(void) { draw_cletter(x++, y+2, 'S', 0xFF0000); - //uart_string("1 Started\n"); + uart_string("1 Started\n"); draw_cletter(x++, y+2, 'L', 0xFF0000); - //uart_string("1 Locking\n"); + uart_string("1 Locking\n"); lock(&testm); add_thread(ctest3, 0, 2); draw_cletter(x++, y+2, 'U', 0xFF0000); - //uart_string("1 Unlocking\n"); + uart_string("1 Unlocking\n"); unlock(&testm); draw_cletter(x++, y+2, 'F', 0xFF0000); - //uart_string("1 Finished\n"); + uart_string("1 Finished\n"); } void ctest2(void) { draw_cletter(x++, y+0, 'S', 0x0000FF); - //uart_string("2 Started\n"); + uart_string("2 Started\n"); add_thread(ctest4, 0, 3); draw_cletter(x++, y+0, 'L', 0x0000FF); - //uart_string("2 Locking\n"); + uart_string("2 Locking\n"); lock(&testm); draw_cletter(x++, y+0, 'U', 0x0000FF); - //uart_string("2 Unlocking\n"); + uart_string("2 Unlocking\n"); unlock(&testm); draw_cletter(x++, y+0, 'F', 0x0000FF); - //uart_string("2 Finished\n"); + uart_string("2 Finished\n"); } void ctest3(void) { draw_cletter(x++, y+1, 'S', 0x00FF00); - //uart_string("3 Started\n"); + uart_string("3 Started\n"); add_thread(ctest2, 0, 1); draw_cletter(x++, y+1, 'F', 0x00FF00); - //uart_string("3 Finished\n"); + uart_string("3 Finished\n"); } void ctest4(void) { draw_cletter(x++, y+2, 'S', 0xAFAF00); - //uart_string("4 Started\n"); + uart_string("4 Started\n"); draw_cletter(x++, y+2, 'F', 0xAFAF00); - //uart_string("4 Finished\n"); + uart_string("4 Finished\n"); } void btest(void) -- cgit v1.2.1