From aa8d179b2df2ce031a84172a70be88697dfda7c4 Mon Sep 17 00:00:00 2001 From: Christian Cunningham Date: Thu, 6 Jan 2022 23:20:04 -0800 Subject: Rewrote scheduler in Assembly --- src/sys/schedule.c | 120 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 80 insertions(+), 40 deletions(-) (limited to 'src/sys/schedule.c') diff --git a/src/sys/schedule.c b/src/sys/schedule.c index 1bb02c2..4b071c3 100644 --- a/src/sys/schedule.c +++ b/src/sys/schedule.c @@ -1,3 +1,4 @@ +#include #include #include #include @@ -15,6 +16,12 @@ struct Scheduler scheduler = { }, .rthread_ll = 0, }; +unsigned long syssp = 0; +struct cpu_context syscpu = { + .r4 = 0, .r5 = 0, .r6 = 0, .r7 = 0, + .r8 = 0, .r9 = 0, .r10 = 0, .r11 = 0, + .r12 = 0, .lr = 0, +}; void init_scheduler(void) { @@ -24,6 +31,7 @@ void init_scheduler(void) scheduler.tlist[i].data = 0; } scheduler.rthread_ll = 0; + scheduler.ctx = &syscpu; } unsigned char stacks_table[MAX_THREADS] = {0, }; @@ -42,12 +50,12 @@ void* get_stack(void) static unsigned long nextpid = 3; void add_thread(void (*thread_fxn)(void), unsigned char priority) { - struct Thread* thread = (struct Thread*)malloc(sizeof(struct Thread)); + struct Thread* thread = (struct Thread*)malloca(sizeof(struct Thread), 4); // Set the program counter to the entry thread->thread = thread_fxn; // Get a stack frame - thread->stack = get_stack(); - thread->stack_base = thread->stack; + thread->stack_base = get_stack(); + thread->stack = thread->stack_base; // Put in error state for no stack if(thread->stack == 0) thread->data.status = THREAD_STACK_ERROR; @@ -57,6 +65,8 @@ void add_thread(void (*thread_fxn)(void), unsigned char priority) thread->data.mutex_waiting = 0; // Set PID thread->data.pid = nextpid++; + thread->data.preempt_count = 0; + thread->data.cpu_context.lr = (unsigned long)cleanup; unsigned char p = priority; if (p >= PRIORITIES) { p = PRIORITIES - 1; @@ -81,70 +91,100 @@ struct LL* get_next_thread(void) return 0; } -unsigned long syssp = 0; -void schedule(void) +void schedule_c(void) { - // Preserve current process's registers - // in the current stack - asm volatile ("push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}"); + // Preserve registers in current context + preserve_ctx(scheduler.ctx); + // Get current thread struct LL* current_thread_ll = scheduler.rthread_ll; // Get next thread struct LL* next_thread_ll = get_next_thread(); // If there is a current thread - if (current_thread_ll) { + if (current_thread_ll != 0) { // If we are switching the thread if (current_thread_ll != next_thread_ll) { // Context switch struct Thread* current_thread = current_thread_ll->data; struct Thread* next_thread = next_thread_ll->data; - preservestack(current_thread); - preservepc(current_thread); - restorestack(next_thread); - //restoreregs(next_thread); + preserve_stack(current_thread); + //preserve_pc(current_thread); + current_thread->thread = (void*)current_thread->data.cpu_context.lr; + restore_stack(next_thread); scheduler.rthread_ll = next_thread_ll; + scheduler.ctx = &next_thread->data.cpu_context; } } - else if (next_thread_ll) { + else if (next_thread_ll != 0) { struct Thread* next_thread = next_thread_ll->data; - preservesysstack(&syssp); - //preservesysregs(®loc) - restorestack(next_thread); - //restoreregs(next_thread); + preserve_sys_stack(&syssp); + restore_stack(next_thread); scheduler.rthread_ll = next_thread_ll; + scheduler.ctx = &next_thread->data.cpu_context; } if (scheduler.rthread_ll) { struct Thread* rthread = scheduler.rthread_ll->data; - // Restore process's registers - asm volatile ("pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}"); - // Run the thread - i.e. jump to the pc - asm volatile ("blx lr"); - //rthread->thread(); - // Remove the currently running thread after completion - remove_running_thread(); - // Schedule the next thread - schedule(); + restore_ctx(scheduler.ctx); + asm volatile ("bx %0" :: "r"(rthread->thread)); } else { - //restoresysregs(®loc); - restoresysstack(&syssp); + scheduler.ctx = &syscpu; + restore_sys_stack(&syssp); + restore_ctx(scheduler.ctx); } } -void remove_running_thread(void) +void cleanup(void) { if (scheduler.rthread_ll != 0) { + // Mark the thread as finished + struct Thread* t = scheduler.rthread_ll->data; + uart_string("Cleaning up thread "); + uart_10(t->data.pid); + uart_char('\n'); + t->data.status = THREAD_FINISHED; + // Mark the stack space as free + unsigned long sidx = (unsigned long)(heap_end() - t->stack_base)/STACK_SIZE; + stacks_table[sidx] = 0; + // Remove the thread struct LL* ll = scheduler.rthread_ll; - if ((ll->next == ll->prev) && (ll->next == ll)) { - ll->data = 0; - } - else { - struct LL* prev = ll->prev; - struct LL* next = ll->next; - prev->next = ll->next; - next->prev = ll->prev; - free(ll); - } + struct LL* prev = ll->prev; + struct LL* next = ll->next; + prev->next = ll->next; + next->prev = ll->prev; + free(ll); scheduler.rthread_ll = 0; } + // Schedule next thread + //uart_string("Scheduling from cleanup!\n"); + //sched_info(); + //schedule(); + schedule(); +} + +void sched_info(void) +{ + disableirq(); + uart_string("Scheduler Information\n"); + for(unsigned long i = 0; i < PRIORITIES; i++) { + struct LL* ll = scheduler.tlist[i].next; + uart_string("Queue "); + uart_10(i); + while (ll != &scheduler.tlist[i]) { + uart_string("\nThread "); + struct Thread* t = ll->data; + uart_hex((unsigned long)t->thread);uart_char(' '); + uart_hex((unsigned long)t->stack);uart_char(' '); + uart_hex((unsigned long)t->stack_base);uart_char(' '); + uart_10(t->data.priority);uart_char(' '); + uart_10(t->data.preempt_count);uart_char(' '); + uart_10(t->data.status);uart_char(' '); + uart_hex((unsigned long)t->data.mutex_waiting);uart_char(' '); + uart_10(t->data.pid);uart_char('\n'); + memshow32((unsigned long*)&t->data.cpu_context, 10); + ll = ll->next; + } + uart_char('\n'); + } + enableirq(); } -- cgit v1.2.1