aboutsummaryrefslogtreecommitdiff
path: root/src/sys
diff options
context:
space:
mode:
Diffstat (limited to 'src/sys')
-rw-r--r--src/sys/core.c70
-rw-r--r--src/sys/schedule.S155
-rw-r--r--src/sys/schedule.c164
3 files changed, 54 insertions, 335 deletions
diff --git a/src/sys/core.c b/src/sys/core.c
index cb9b765..1329c12 100644
--- a/src/sys/core.c
+++ b/src/sys/core.c
@@ -71,43 +71,43 @@ void sysinit(void)
void testlocal1(void)
{
- unsigned long a = 5;
- struct Thread* t = scheduler.rthread_ll->data;
- uart_string("vRan Thread ");
- uart_10(t->data.pid);
- uart_string(" Pri. ");
- uart_10(t->data.priority);
- uart_string(" ...\n");
- add_thread(testlocal, 0);
- schedule();
- a += t->data.pid;
- uart_10(a);
- uart_string(" Done!\n");
+ //unsigned long a = 5;
+ //struct Thread* t = scheduler.rthread_ll->data;
+ //uart_string("vRan Thread ");
+ //uart_10(t->data.pid);
+ //uart_string(" Pri. ");
+ //uart_10(t->data.priority);
+ //uart_string(" ...\n");
+ //add_thread(testlocal, 0);
+ //schedule();
+ //a += t->data.pid;
+ //uart_10(a);
+ //uart_string(" Done!\n");
}
void testlocal(void)
{
- struct Thread* t = scheduler.rthread_ll->data;
- uart_string("Ran Thread ");
- uart_10(t->data.pid);
- uart_string(" Pri. ");
- uart_10(t->data.priority);
- uart_string(" ...\n");
- //delay(0x80000000);
- if (t->data.pid == 5) {
- add_thread(testlocal1, 1);
- schedule();
- }
- if (t->data.pid == 3) {
- // Example
- /*
- while (uart_tx_full) {
- t->data.status = THREAD_WAITING;
- schedule();
- } // Will wait until uart_tx is not full
- */
- }
- uart_string("Done! ");
- uart_10(t->data.pid);
- uart_char('\n');
+ //struct Thread* t = scheduler.rthread_ll->data;
+ //uart_string("Ran Thread ");
+ //uart_10(t->data.pid);
+ //uart_string(" Pri. ");
+ //uart_10(t->data.priority);
+ //uart_string(" ...\n");
+ ////delay(0x80000000);
+ //if (t->data.pid == 5) {
+ // add_thread(testlocal1, 1);
+ // schedule();
+ //}
+ //if (t->data.pid == 3) {
+ // // Example
+ // /*
+ // while (uart_tx_full) {
+ // t->data.status = THREAD_WAITING;
+ // schedule();
+ // } // Will wait until uart_tx is not full
+ // */
+ //}
+ //uart_string("Done! ");
+ //uart_10(t->data.pid);
+ //uart_char('\n');
}
diff --git a/src/sys/schedule.S b/src/sys/schedule.S
index a46654c..298646a 100644
--- a/src/sys/schedule.S
+++ b/src/sys/schedule.S
@@ -1,143 +1,26 @@
.section ".text"
.globl schedule
-// TODO: Implement Scheduler for IRQ
-
-// Save Context
-// reg = struct cpu_context*
-.macro save_context reg
- str r4, [\reg, #0x00]
- str r5, [\reg, #0x04]
- str r6, [\reg, #0x08]
- str r7, [\reg, #0x0c]
- str r8, [\reg, #0x10]
- str r9, [\reg, #0x14]
- str r10, [\reg, #0x18]
- str r11, [\reg, #0x1c]
- str r12, [\reg, #0x20]
- str lr, [\reg, #0x24]
+// Implicit: SVC Mode
+.macro preserve_ctx
+ cps #0x1f // Sys mode
+ push {r0-r12, lr} // Restore usr regs
+ cps #0x10 // Svc mode
+ ldr r3, =scheduler // struct Scheduler
+ ldr r3, [r3, #4] // struct Thread* rthread
+ str lr, [r3, #0] // svc_lr -> void* pc
.endm
-// Restore Context
-// reg = struct cpu_context*
-.macro restore_context reg
- ldr r4, [\reg, #0x00]
- ldr r5, [\reg, #0x04]
- ldr r6, [\reg, #0x08]
- ldr r7, [\reg, #0x0c]
- ldr r8, [\reg, #0x10]
- ldr r9, [\reg, #0x14]
- ldr r10, [\reg, #0x18]
- ldr r11, [\reg, #0x1c]
- ldr r12, [\reg, #0x20]
- ldr lr , [\reg, #0x24]
+
+.macro restore_ctx
+ ldr r3, =scheduler // struct Scheduler
+ ldr r3, [r3, #4] // struct Thread* rthread
+ ldr lr, [r3, #0] // void* pc -> lr_svc
+ cps #0x1f // Sys mode
+ pop {r0-r12,lr} // Restore usr regs
+ cps #0x10 // Svc mode
.endm
-// Implemented the scheduler in Assembly since the C defined was messing around with the program stacks
-// This way, I can be confident that the stacks will be unchanged
schedule:
- ldr r3, =scheduler
- // Preserve context
- ldr r0, [r3, #4]
- // r0 = struct cpu_context*
- save_context r0
- // Get the next available thread
- push {r1-r3, lr}
- bl get_next_thread
- pop {r1-r3, lr}
- ldr r1, [r3, #0]
- // r3 = struct Scheduler*
- // r0 = struct LL* next_thread_ll
- // r1 = struct LL* current_thread_ll
- // Check if there is a valid currently running thread
- cmp r1, #0
- beq schedule.current_thread_nexists
-schedule.current_thread_exists:
- cmp r0, r1
- beq schedule.run_current
- cmp r0, #0
- moveq r0, r1 // Make the current running thread the next running thread if no next running thread
- // Next is not the same as the current
- // Preserve stack of current
- ldr r2, [r1, #0x8] // struct Thread* current
- ldrh r1, [r2, #0x0e]
- cmp r1, #2 // THREAD_WAITING
- beq schedule.temp_status
- cmp r1, #1 // THREAD_RUNNING
- bne schedule.dont_modify_status
-schedule.temp_status:
- mov r1, #0 // THREAD_READY
- strh r1, [r2, #0x0e]
-schedule.dont_modify_status:
- str sp, [r2, #0x4] // void* stack // Preserve stack
- // Preserve program counter of current
- str lr, [r2, #0x0] // void* thread // Preserve pc
- ldr r2, [r0, #0x8] // struct Thread* next
- // Set new stack pointer
- ldr sp, [r2, #0x4]
- // Set the thread as running
- mov r1, #1 // THREAD_RUNNING
- strh r1, [r2, #0x0e] // unsigned short status
- add r2, r2, #0x18
- // Set new running thread
- str r0, [r3, #0x0] // struct LL* next_thread_ll // Set new running thread
- // Set new context
- str r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
- b schedule.run_current
-schedule.current_thread_nexists:
- // r0 = struct LL* next_thread_ll
- // r1 = 0 = struct LL* current_thread_ll
- cmp r0, #0
- beq schedule.no_next_thread
- ldr r1, [r0, #0x8]
- // r1 = struct Thread* next_thread
- // Store system stack pointer
- ldr r2, =svcsp
- push {r1}
- ldr r1, [r2]
- cmp r1, #0
- pop {r1}
- bne schedule.dont_overwrite_sys_stack
- // Store if zero system stack
- str sp, [r2]
-schedule.dont_overwrite_sys_stack:
- // Move stack to next thread's stack pointer
- ldr sp, [r1, #0x4] // void* stack
- // Store the running thread ll entry
- str r0, [r3, #0x0] // struct LL* rthread_ll
- ldr r2, [r0, #0x8] // struct Thread* thread
- mov r0, #1 // THREAD_RUNNING
- strh r0, [r2, #0x0e]
- // Set context
- add r1, r1, #0x18 // struct cpu_context*
- str r1, [r3, #0x4] // store to scheduler.ctx
-schedule.run_current:
- // Restore context
- ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
- restore_context r2
- // Run
- ldr r1, [r3, #0]
- // r1 = struct LL* rthread_ll
- ldr r1, [r1, #0x8]
- // r1 = struct Thread* rthread
- ldr r0, [r1, #0x0]
- // r0 = void* thread
- bx r0
-schedule.no_next_thread:
- // r0 = 0 = struct LL* next_thread_ll
- // r1 = 0 = struct LL* current_thread_ll
- // No thread to run
- // Restore sys context
- ldr r0, =svccpu
- str r0, [r3, #0x4] // Store context
- ldr r0, =svcsp
- ldr r1, [r0]
- cmp r1, #0
- beq schedule.exit
- mov sp, r1 // Restore stack pointer
- mov r1, #0
- str r1, [r0] // Clear stack pointer
-schedule.exit:
- // Restore register context
- ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
- restore_context r2
- bx lr
+ //preserve_ctx
+ //restore_ctx
+ //bx lr
diff --git a/src/sys/schedule.c b/src/sys/schedule.c
index c300ae0..96dc678 100644
--- a/src/sys/schedule.c
+++ b/src/sys/schedule.c
@@ -1,167 +1,3 @@
-#include <cpu/irq.h>
-#include <drivers/uart.h>
-#include <globals.h>
-#include <sys/core.h>
-#include <sys/schedule.h>
-#include <util/mutex.h>
-
void init_scheduler(void)
{
- for(int i = 0; i < PRIORITIES; i++) {
- scheduler.tlist[i].prev = &scheduler.tlist[i];
- scheduler.tlist[i].next = &scheduler.tlist[i];
- scheduler.tlist[i].data = 0;
- }
- scheduler.rthread_ll = 0;
- scheduler.ctx = &svccpu;
-}
-
-void* get_stack(void)
-{
- for (int i = 0; i < MAX_THREADS; i++) {
- if (stacks_table[i] == 0) {
- stacks_table[i] = 1;
- return (void*)heap_end() - STACK_SIZE*i;
- }
- }
- return 0;
-}
-
-void add_thread(void (*thread_fxn)(void), unsigned char priority)
-{
- struct Thread* thread = (struct Thread*)malloca(sizeof(struct Thread), 4);
- // Set the program counter to the entry
- thread->thread = thread_fxn;
- // Get a stack frame
- thread->stack_base = get_stack();
- thread->stack = thread->stack_base;
- // Put in error state for no stack
- if(thread->stack == 0)
- thread->data.status = THREAD_STACK_ERROR;
- else
- thread->data.status = THREAD_READY;
- // Doesn't wait for mutex at start
- thread->data.mutex_waiting = 0;
- // Set PID
- thread->data.pid = nextpid++;
- thread->data.preempt_count = 0;
- thread->data.cpu_context.lr = (unsigned long)cleanup;
- unsigned char p = priority;
- if (p >= PRIORITIES) {
- p = PRIORITIES - 1;
- }
- thread->data.priority = p;
- push_ll(&scheduler.tlist[p], thread);
-}
-
-struct LL* get_next_thread(void)
-{
- for(unsigned long i = 0; i < PRIORITIES; i++) {
- struct LL* thread_ll = scheduler.tlist[i].next;
- if (thread_ll == &scheduler.tlist[i])
- continue;
- do {
- struct Thread* thread = thread_ll->data;
- if((thread->data.status == THREAD_RUNNING) || (thread->data.status == THREAD_READY))
- return thread_ll;
- thread_ll = thread_ll->next;
- } while(thread_ll != &scheduler.tlist[i]);
- }
- return 0;
-}
-
-void schedule_c(void)
-{
- // Preserve registers in current context
- preserve_ctx(scheduler.ctx);
-
- // Get current thread
- struct LL* current_thread_ll = scheduler.rthread_ll;
- // Get next thread
- struct LL* next_thread_ll = get_next_thread();
-
- // If there is a current thread
- if (current_thread_ll != 0) {
- // If we are switching the thread
- if (current_thread_ll != next_thread_ll) {
- // Context switch
- struct Thread* current_thread = current_thread_ll->data;
- struct Thread* next_thread = next_thread_ll->data;
- preserve_stack(current_thread);
- //preserve_pc(current_thread);
- current_thread->thread = (void*)current_thread->data.cpu_context.lr;
- restore_stack(next_thread);
- scheduler.rthread_ll = next_thread_ll;
- scheduler.ctx = &next_thread->data.cpu_context;
- }
- }
- else if (next_thread_ll != 0) {
- struct Thread* next_thread = next_thread_ll->data;
- preserve_sys_stack(&svcsp);
- restore_stack(next_thread);
- scheduler.rthread_ll = next_thread_ll;
- scheduler.ctx = &next_thread->data.cpu_context;
- }
- if (scheduler.rthread_ll) {
- struct Thread* rthread = scheduler.rthread_ll->data;
- restore_ctx(scheduler.ctx);
- asm volatile ("bx %0" :: "r"(rthread->thread));
- } else {
- scheduler.ctx = &svccpu;
- restore_sys_stack(&svcsp);
- restore_ctx(scheduler.ctx);
- }
-}
-
-void cleanup(void)
-{
- if (scheduler.rthread_ll != 0) {
- // Mark the thread as finished
- struct Thread* t = scheduler.rthread_ll->data;
- //uart_string("Cleaning up thread ");
- //uart_10(t->data.pid);
- //uart_char('\n');
- t->data.status = THREAD_FINISHED;
- // Mark the stack space as free
- unsigned long sidx = (unsigned long)(heap_end() - t->stack_base)/STACK_SIZE;
- stacks_table[sidx] = 0;
- // Remove the thread
- struct LL* ll = scheduler.rthread_ll;
- struct LL* prev = ll->prev;
- struct LL* next = ll->next;
- prev->next = ll->next;
- next->prev = ll->prev;
- free(ll->data);
- free(ll);
- scheduler.rthread_ll = 0;
- }
- // Schedule next thread
- schedule();
-}
-
-void sched_info(void)
-{
- uart_string("Scheduler Information\n");
- for(unsigned long i = 0; i < PRIORITIES; i++) {
- struct LL* ll = scheduler.tlist[i].next;
- uart_string("Queue ");
- uart_10(i);
- while (ll != &scheduler.tlist[i]) {
- uart_string("\nThread ");
- struct Thread* t = ll->data;
- uart_hex((unsigned long)t->thread);uart_char(' ');
- uart_hex((unsigned long)t->stack);uart_char(' ');
- uart_hex((unsigned long)t->stack_base);uart_char(' ');
- uart_10(t->data.priority);uart_char(' ');
- uart_10(t->data.preempt_count);uart_char(' ');
- uart_10(t->data.status);uart_char(' ');
- uart_hex((unsigned long)t->data.mutex_waiting);uart_char(' ');
- uart_10(t->data.pid);uart_char('\n');
- memshow32((unsigned long*)&t->data.cpu_context, 10);
- ll = ll->next;
- }
- uart_char('\n');
- }
- uart_string("Stacks:\n");
- memshow32((unsigned long*)stacks_table, 6);
}