diff options
Diffstat (limited to 'src/sys')
-rw-r--r-- | src/sys/core.c | 36 | ||||
-rw-r--r-- | src/sys/schedule.S | 126 | ||||
-rw-r--r-- | src/sys/schedule.c | 120 |
3 files changed, 234 insertions, 48 deletions
diff --git a/src/sys/core.c b/src/sys/core.c index 204eb55..302e629 100644 --- a/src/sys/core.c +++ b/src/sys/core.c @@ -62,21 +62,41 @@ void sysinit(void) add_thread(testlocal, 0); add_thread(testlocal, 5); add_thread(testlocal, 8); - delay(0x80000000); + delay(0x20000000); schedule(); } struct Mutex testm = {.addr = (void*)0xDEADBEEF, .pid = NULL_PID}; +void testlocal1(void) +{ + unsigned long a = 5; + struct Thread* t = scheduler.rthread_ll->data; + uart_string("vRan Thread "); + uart_10(t->data.pid); + uart_string(" Pri. "); + uart_10(t->data.priority); + uart_string(" ...\n"); + add_thread(testlocal, 0); + schedule(); + a += t->data.pid; + uart_10(a); + uart_string(" Done!\n"); +} void testlocal(void) { - unsigned char testd = 0xDE; struct Thread* t = scheduler.rthread_ll->data; - delay(0x04000000); - testd -= 50; uart_string("Ran Thread "); - delay(0x04000000); uart_10(t->data.pid); - uart_char(' '); - uart_10(testd); - uart_char('\n'); + uart_string(" Pri. "); + uart_10(t->data.priority); + uart_string(" ...\n"); + //delay(0x80000000); + if (t->data.pid == 6) { + add_thread(testlocal, 0); + } else if (t->data.pid == 5) { + add_thread(testlocal1, 1); + schedule(); + sched_info(); + } + uart_string("Done!\n"); } diff --git a/src/sys/schedule.S b/src/sys/schedule.S new file mode 100644 index 0000000..18130d0 --- /dev/null +++ b/src/sys/schedule.S @@ -0,0 +1,126 @@ +.section .text +.globl schedule +// Implemented the scheduler in Assembly since the C defined was messing around with the program stacks +// This way, I can be confident that the stacks will be unchanged +// +// TODO: Mark threads as READY and RUNNING +// +schedule: + ldr r3, =scheduler + // r3 = struct Scheduler* + // Preserve context + //add r0, r3, #4 // struct cpu_context* ctx + ldr r0, [r3, #4] + // r0 = struct cpu_context* + str r4, [r0, #0x00] + str r5, [r0, #0x04] + str r6, [r0, #0x08] + str r7, [r0, #0x0c] + str r8, [r0, #0x10] + str r9, [r0, #0x14] + str r10, [r0, #0x18] + str r11, [r0, #0x1c] + str r12, [r0, #0x20] + str lr, [r0, #0x24] + // Get the next available thread + push {r3, lr} + bl get_next_thread + // r0 = struct LL* next_thread_ll + pop {r3, lr} + ldr r1, [r3, #0] + // r1 = struct LL* current_thread_ll + // Check if there is a valid currently running thread + cmp r1, #0 + beq schedule.current_thread_nexists +schedule.current_thread_exists: + cmp r0, r1 + beq schedule.run_current + // Next is not the same as the current + // Preserve stack of current + ldr r2, [r1, #0x8] // struct Thread* current + str sp, [r2, #0x4] // void* stack // Preserve stack + // Preserve program counter of current + str lr, [r2, #0x0] // void* thread // Preserve pc + ldr r2, [r0, #0x8] // struct Thread* next + // Set new stack pointer + ldr sp, [r2, #0x4] + add r2, r2, #0x18 + // Set new running thread + str r0, [r3, #0x0] // struct LL* next_thread_ll // Set new running thread + // Set new context + str r2, [r3, #0x4] // struct cpu_context* ctx // Set new context + b schedule.run_current +schedule.current_thread_nexists: + // r0 = struct LL* next_thread_ll + // r1 = 0 = struct LL* current_thread_ll + cmp r0, #0 + beq schedule.no_next_thread + ldr r1, [r0, #0x8] + // r1 = struct Thread* next_thread + // Store system stack pointer + ldr r2, =syssp + push {r1} + ldr r1, [r2] + cmp r1, #0 + pop {r1} + bne schedule.dont_overwrite_sys_stack + // Store if zero system stack + str sp, [r2] +schedule.dont_overwrite_sys_stack: + // Move stack to next thread's stack pointer + ldr sp, [r1, #0x4] // void* stack + // Store the running thread ll entry + str r0, [r3, #0x0] // struct LL* rthread_ll + // Set context + add r1, r1, #0x18 // struct cpu_context* + str r1, [r3, #0x4] // store to scheduler.ctx +schedule.run_current: + // Restore context + ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context + ldr r4, [r2, #0x00] + ldr r5, [r2, #0x04] + ldr r6, [r2, #0x08] + ldr r7, [r2, #0x0c] + ldr r8, [r2, #0x10] + ldr r9, [r2, #0x14] + ldr r10, [r2, #0x18] + ldr r11, [r2, #0x1c] + ldr r12, [r2, #0x20] + ldr lr, [r2, #0x24] + // Run + ldr r1, [r3, #0] + // r1 = struct LL* rthread_ll + ldr r1, [r1, #0x8] + // r1 = struct Thread* rthread + ldr r0, [r1, #0x0] + // r0 = void* thread + bx r0 +schedule.no_next_thread: + // r0 = 0 = struct LL* next_thread_ll + // r1 = 0 = struct LL* current_thread_ll + // No thread to run + // Restore sys context + ldr r0, =syscpu + str r0, [r3, #0x4] // Store context + ldr r0, =syssp + ldr r1, [r0] + cmp r1, #0 + beq schedule.exit + mov sp, r1 // Restore stack pointer + mov r1, #0 + str r1, [r0] // Clear stack pointer +schedule.exit: + // Restore context + ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context + // Restore register context + ldr r4, [r2, #0x00] + ldr r5, [r2, #0x04] + ldr r6, [r2, #0x08] + ldr r7, [r2, #0x0c] + ldr r8, [r2, #0x10] + ldr r9, [r2, #0x14] + ldr r10, [r2, #0x18] + ldr r11, [r2, #0x1c] + ldr r12, [r2, #0x20] + ldr lr, [r2, #0x24] + bx lr diff --git a/src/sys/schedule.c b/src/sys/schedule.c index 1bb02c2..4b071c3 100644 --- a/src/sys/schedule.c +++ b/src/sys/schedule.c @@ -1,3 +1,4 @@ +#include <cpu/irq.h> #include <drivers/uart.h> #include <sys/core.h> #include <sys/schedule.h> @@ -15,6 +16,12 @@ struct Scheduler scheduler = { }, .rthread_ll = 0, }; +unsigned long syssp = 0; +struct cpu_context syscpu = { + .r4 = 0, .r5 = 0, .r6 = 0, .r7 = 0, + .r8 = 0, .r9 = 0, .r10 = 0, .r11 = 0, + .r12 = 0, .lr = 0, +}; void init_scheduler(void) { @@ -24,6 +31,7 @@ void init_scheduler(void) scheduler.tlist[i].data = 0; } scheduler.rthread_ll = 0; + scheduler.ctx = &syscpu; } unsigned char stacks_table[MAX_THREADS] = {0, }; @@ -42,12 +50,12 @@ void* get_stack(void) static unsigned long nextpid = 3; void add_thread(void (*thread_fxn)(void), unsigned char priority) { - struct Thread* thread = (struct Thread*)malloc(sizeof(struct Thread)); + struct Thread* thread = (struct Thread*)malloca(sizeof(struct Thread), 4); // Set the program counter to the entry thread->thread = thread_fxn; // Get a stack frame - thread->stack = get_stack(); - thread->stack_base = thread->stack; + thread->stack_base = get_stack(); + thread->stack = thread->stack_base; // Put in error state for no stack if(thread->stack == 0) thread->data.status = THREAD_STACK_ERROR; @@ -57,6 +65,8 @@ void add_thread(void (*thread_fxn)(void), unsigned char priority) thread->data.mutex_waiting = 0; // Set PID thread->data.pid = nextpid++; + thread->data.preempt_count = 0; + thread->data.cpu_context.lr = (unsigned long)cleanup; unsigned char p = priority; if (p >= PRIORITIES) { p = PRIORITIES - 1; @@ -81,70 +91,100 @@ struct LL* get_next_thread(void) return 0; } -unsigned long syssp = 0; -void schedule(void) +void schedule_c(void) { - // Preserve current process's registers - // in the current stack - asm volatile ("push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}"); + // Preserve registers in current context + preserve_ctx(scheduler.ctx); + // Get current thread struct LL* current_thread_ll = scheduler.rthread_ll; // Get next thread struct LL* next_thread_ll = get_next_thread(); // If there is a current thread - if (current_thread_ll) { + if (current_thread_ll != 0) { // If we are switching the thread if (current_thread_ll != next_thread_ll) { // Context switch struct Thread* current_thread = current_thread_ll->data; struct Thread* next_thread = next_thread_ll->data; - preservestack(current_thread); - preservepc(current_thread); - restorestack(next_thread); - //restoreregs(next_thread); + preserve_stack(current_thread); + //preserve_pc(current_thread); + current_thread->thread = (void*)current_thread->data.cpu_context.lr; + restore_stack(next_thread); scheduler.rthread_ll = next_thread_ll; + scheduler.ctx = &next_thread->data.cpu_context; } } - else if (next_thread_ll) { + else if (next_thread_ll != 0) { struct Thread* next_thread = next_thread_ll->data; - preservesysstack(&syssp); - //preservesysregs(®loc) - restorestack(next_thread); - //restoreregs(next_thread); + preserve_sys_stack(&syssp); + restore_stack(next_thread); scheduler.rthread_ll = next_thread_ll; + scheduler.ctx = &next_thread->data.cpu_context; } if (scheduler.rthread_ll) { struct Thread* rthread = scheduler.rthread_ll->data; - // Restore process's registers - asm volatile ("pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}"); - // Run the thread - i.e. jump to the pc - asm volatile ("blx lr"); - //rthread->thread(); - // Remove the currently running thread after completion - remove_running_thread(); - // Schedule the next thread - schedule(); + restore_ctx(scheduler.ctx); + asm volatile ("bx %0" :: "r"(rthread->thread)); } else { - //restoresysregs(®loc); - restoresysstack(&syssp); + scheduler.ctx = &syscpu; + restore_sys_stack(&syssp); + restore_ctx(scheduler.ctx); } } -void remove_running_thread(void) +void cleanup(void) { if (scheduler.rthread_ll != 0) { + // Mark the thread as finished + struct Thread* t = scheduler.rthread_ll->data; + uart_string("Cleaning up thread "); + uart_10(t->data.pid); + uart_char('\n'); + t->data.status = THREAD_FINISHED; + // Mark the stack space as free + unsigned long sidx = (unsigned long)(heap_end() - t->stack_base)/STACK_SIZE; + stacks_table[sidx] = 0; + // Remove the thread struct LL* ll = scheduler.rthread_ll; - if ((ll->next == ll->prev) && (ll->next == ll)) { - ll->data = 0; - } - else { - struct LL* prev = ll->prev; - struct LL* next = ll->next; - prev->next = ll->next; - next->prev = ll->prev; - free(ll); - } + struct LL* prev = ll->prev; + struct LL* next = ll->next; + prev->next = ll->next; + next->prev = ll->prev; + free(ll); scheduler.rthread_ll = 0; } + // Schedule next thread + //uart_string("Scheduling from cleanup!\n"); + //sched_info(); + //schedule(); + schedule(); +} + +void sched_info(void) +{ + disableirq(); + uart_string("Scheduler Information\n"); + for(unsigned long i = 0; i < PRIORITIES; i++) { + struct LL* ll = scheduler.tlist[i].next; + uart_string("Queue "); + uart_10(i); + while (ll != &scheduler.tlist[i]) { + uart_string("\nThread "); + struct Thread* t = ll->data; + uart_hex((unsigned long)t->thread);uart_char(' '); + uart_hex((unsigned long)t->stack);uart_char(' '); + uart_hex((unsigned long)t->stack_base);uart_char(' '); + uart_10(t->data.priority);uart_char(' '); + uart_10(t->data.preempt_count);uart_char(' '); + uart_10(t->data.status);uart_char(' '); + uart_hex((unsigned long)t->data.mutex_waiting);uart_char(' '); + uart_10(t->data.pid);uart_char('\n'); + memshow32((unsigned long*)&t->data.cpu_context, 10); + ll = ll->next; + } + uart_char('\n'); + } + enableirq(); } |