aboutsummaryrefslogtreecommitdiff
path: root/kernel/sys
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sys')
-rw-r--r--kernel/sys/core.c58
-rw-r--r--kernel/sys/kernel.S32
-rw-r--r--kernel/sys/power.c39
-rw-r--r--kernel/sys/schedule.S53
-rw-r--r--kernel/sys/schedule.c468
5 files changed, 650 insertions, 0 deletions
diff --git a/kernel/sys/core.c b/kernel/sys/core.c
new file mode 100644
index 0000000..d76b712
--- /dev/null
+++ b/kernel/sys/core.c
@@ -0,0 +1,58 @@
+#include <cpu/irq.h>
+#include <cpu.h>
+#include <drivers/uart.h>
+#include <globals.h>
+#include <graphics/lfb.h>
+#include <lib/kmem.h>
+#include <lib/mmu.h>
+#include <lib/strings.h>
+#include <symbols.h>
+#include <sys/core.h>
+#include <sys/power.h>
+#include <sys/schedule.h>
+#include <util/mutex.h>
+#include <util/status.h>
+#include <util/time.h>
+
+// Initialize IRQs
+void sysinit(void)
+{
+ // Initialize System Globals
+ stimeh = *(unsigned long*)SYS_TIMER_CHI;
+ stimel = *(unsigned long*)SYS_TIMER_CLO;
+ *(unsigned long*) SYS_TIMER_C0 = 2000000 + stimeh; // 2 second trigger
+ uart_init();
+ ///...
+
+ // Route GPU interrupts to Core 0
+ store32(0x00, GPU_INTERRUPTS_ROUTING);
+
+ // Mask Overrun of UART0
+ store32(1<<4, UART0_IMSC);
+ // Enable UART GPU IRQ
+ store32(1<<25, IRQ_ENABLE2);
+ // Enable Timer
+ //// Get the frequency
+ cntfrq = read_cntfrq();
+ // Clear cntv interrupt and set next 1 second timer
+ write_cntv_tval(cntfrq);
+ // Route timer to core0 fiq
+ routing_core0cntv_to_core0fiq();
+ // Enable timer
+ enablecntv();
+ // Enable system timer
+ store32(SYS_TIMER_SC_M0, IRQ_ENABLE1);
+
+ // Graphics Initialize
+ lfb_init();
+ lfb_showpicture();
+
+ // Initialize Memory Management Unit
+ mmu_init();
+
+ // Initialize Mutex Manager
+ mutex_init();
+
+ // Start Scheduler
+ init_scheduler();
+}
diff --git a/kernel/sys/kernel.S b/kernel/sys/kernel.S
new file mode 100644
index 0000000..71b22a1
--- /dev/null
+++ b/kernel/sys/kernel.S
@@ -0,0 +1,32 @@
+.section ".text.kernel"
+
+.include "macros.inc"
+
+.globl kernel_main
+kernel_main:
+ bl sysinit
+ bl status
+ ldr r2, =ttbr_msg
+ mov r0, #23
+ mov r1, #0
+ mov r3, #0xFF00
+ bl draw_cstring
+ // Initialize System Cycle Counter
+ mov r0, #1
+ mcr p15, 0, r0, c9, c14, 0
+ mov r0, #1
+ mcr p15, 0, r0, c9, c12, 0
+ mov r0, #0x80000000
+ mcr p15, 0, r0, c9, c12, 1
+
+ // Intentional undefined instruction
+ // .word 0xf7f0a000
+ cpsie ai, #0x10
+ svc #2 // Start scheduling!
+2:
+ wfe
+ b 2b
+
+.section .data
+ttbr_msg:
+ .asciz "MMU Initialized!"
diff --git a/kernel/sys/power.c b/kernel/sys/power.c
new file mode 100644
index 0000000..c4f12a9
--- /dev/null
+++ b/kernel/sys/power.c
@@ -0,0 +1,39 @@
+#include <symbols.h>
+#include <sys/core.h>
+#include <sys/power.h>
+
+//https://github.com/raspberrypi/linux/blob/aeaa2460db088fb2c97ae56dec6d7d0058c68294/drivers/watchdog/bcm2835_wdt.c
+void wdt_start(void)
+{
+ store32(BCM2835_PERI_BASE + PM_WDOG, PM_PASSWORD | (SECS_TO_WDOG_TICS(100) & PM_WDOG_TIME_SET));
+ unsigned long cur = load32(BCM2835_PERI_BASE + PM_RSTC);
+ store32(BCM2835_PERI_BASE + PM_RSTC, PM_PASSWORD | (cur & PM_RSTC_WRCFG_CLR) | PM_RSTC_WRCFG_FULL_RESET);
+}
+
+void wdt_stop(void)
+{
+ store32(BCM2835_PERI_BASE + PM_RSTC, PM_PASSWORD | PM_RSTC_RESET);
+}
+
+void __bcm2835_restart(unsigned char partition)
+{
+ unsigned long val, rsts;
+ rsts = (partition & 1) | ((partition & 0b10) << 1) |
+ ((partition & 0b100) << 2) | ((partition & 0b1000) << 3) |
+ ((partition & 0b10000) << 4) | ((partition & 0b100000) << 5);
+ val = load32(BCM2835_PERI_BASE + PM_RSTS);
+ val &= PM_RSTS_PARTITION_CLR;
+ val |= PM_PASSWORD | rsts;
+ store32(BCM2835_PERI_BASE + PM_RSTS, val);
+ store32(BCM2835_PERI_BASE + PM_WDOG, 10 | PM_PASSWORD);
+ val = load32(BCM2835_PERI_BASE + PM_RSTC);
+ val &= PM_RSTC_WRCFG_CLR;
+ val |= PM_PASSWORD | PM_RSTC_WRCFG_FULL_RESET;
+ store32(BCM2835_PERI_BASE + PM_RSTC, val);
+ delay(1);
+}
+
+void bcm2835_power_off(void)
+{
+ __bcm2835_restart(63); // Partition 63 => Halt
+}
diff --git a/kernel/sys/schedule.S b/kernel/sys/schedule.S
new file mode 100644
index 0000000..a47252c
--- /dev/null
+++ b/kernel/sys/schedule.S
@@ -0,0 +1,53 @@
+.section ".text"
+.globl schedule
+
+.include "macros.inc"
+
+// Assumption: Enter in SVC mode
+schedule:
+ preserve_ctx
+ ldr r1, =irqlr
+ ldr r0, [r1]
+ cmp r0, #0
+ beq 1f
+ // Replace LR with IRQ's LR
+ ldr r3, =scheduler
+ ldr r2, [r3, #0] // struct Thread* rthread
+ str r0, [r2, #0] // svc_lr -> void* pc
+ // Clear IRQ's LR
+ mov r0, #0
+ str r0, [r1]
+1:
+ bl next_thread // Thread* next -> r0
+ ldr r3, =scheduler
+ str r0, [r3, #0] // next -> rthread
+ restore_ctx
+ subs pc, lr, #0
+
+.globl cleanup
+cleanup:
+ bl c_cleanup
+ // usrloop -> rthread
+ ldr r3, =scheduler
+ ldr r2, =usrloopthread
+ str r2, [r3, #0]
+ ldr sp, [r2, #4]
+ ldmfd sp!,{lr}
+ ldmfd sp!,{r0-r12}
+ ldr lr, =kernel_usr_task_loop
+ // svc sched
+ svc #2
+.globl kernel_usr_task_loop
+kernel_usr_task_loop:
+ wfe
+ b kernel_usr_task_loop
+
+.globl add_thread
+add_thread:
+ mrs r3, cpsr
+ and r3, #0x1F
+ cmp r3, #0x10
+ beq 1f
+ b svc_add_thread
+1: svc #3
+ bx lr
diff --git a/kernel/sys/schedule.c b/kernel/sys/schedule.c
new file mode 100644
index 0000000..9b6d46e
--- /dev/null
+++ b/kernel/sys/schedule.c
@@ -0,0 +1,468 @@
+#include <cpu.h>
+#include <globals.h>
+#include <graphics/lfb.h>
+#include <drivers/uart.h>
+#include <lib/kmem.h>
+#include <sys/schedule.h>
+#include <util/mutex.h>
+
+extern void kernel_usr_task_loop(void);
+
+void init_scheduler(void)
+{
+ // Set rthread to usrloopthread - an infinitely running thread so that the pointer will never be null
+ usrloopthread.pc = (void*)kernel_usr_task_loop;
+ usrloopthread.sp = (void*)0x5FC8;
+ *(unsigned long**)usrloopthread.sp = (unsigned long*)kernel_usr_task_loop;
+ usrloopthread.sp_base = -1;
+ usrloopthread.mptr = 0;
+ usrloopthread.pid = -1;
+ usrloopthread.priority = -1;
+ usrloopthread.old_priority = -1;
+ usrloopthread.status = THREAD_READY;
+ usrloopthread.offset = -1;
+ scheduler.rthread = &usrloopthread;
+
+ // Initialize Scheduling Queues
+ for (unsigned long p = 0; p < PRIORITIES; p++) {
+ // Ready Init
+ scheduler.ready[p].start.value = 0;
+ scheduler.ready[p].start.next = &scheduler.ready[p].end;
+ scheduler.ready[p].start.entry_type = START_ENTRY;
+ scheduler.ready[p].end.value = 0;
+ scheduler.ready[p].end.next = &scheduler.ready[p].start;
+ scheduler.ready[p].end.entry_type = END_ENTRY;
+ // Mutex Wait Init
+ scheduler.mwait[p].start.value = 0;
+ scheduler.mwait[p].start.next = &scheduler.mwait[p].end;
+ scheduler.mwait[p].start.entry_type = START_ENTRY;
+ scheduler.mwait[p].end.value = 0;
+ scheduler.mwait[p].end.next = &scheduler.mwait[p].start;
+ scheduler.mwait[p].end.entry_type = END_ENTRY;
+ // Signal Wait Init
+ scheduler.swait[p].start.value = 0;
+ scheduler.swait[p].start.next = &scheduler.swait[p].end;
+ scheduler.swait[p].start.entry_type = START_ENTRY;
+ scheduler.swait[p].end.value = 0;
+ scheduler.swait[p].end.next = &scheduler.swait[p].start;
+ scheduler.swait[p].end.entry_type = END_ENTRY;
+ }
+
+ // Initialize nextpid
+ nextpid = FIRST_AVAIL_PID;
+
+ // Initialize Threads - Stack Base and Offsets
+ for (unsigned long i = 0; i < MAX_THREADS; i++) {
+ struct Thread* t = &threads[i];
+ t->offset = i;
+ t->sp_base = 0x20000000 - STACK_SIZE*i;
+ thread_entries[i].value = t;
+ thread_entries[i].next = &thread_entries[(i+1)];
+ thread_entries[i].entry_type = VALUE_ENTRY;
+ }
+ // Initialize the free queue
+ scheduler.free_threads.start.value = 0;
+ scheduler.free_threads.start.entry_type = START_ENTRY;
+ scheduler.free_threads.end.value = 0;
+ scheduler.free_threads.end.entry_type = END_ENTRY;
+ scheduler.free_threads.start.next = &thread_entries[0];
+ scheduler.free_threads.end.next = &thread_entries[MAX_THREADS-1];
+ thread_entries[MAX_THREADS-1].next = &scheduler.free_threads.end;
+}
+
+void push_thread_to_queue(struct Thread* t, unsigned char type, unsigned char priority)
+{
+ struct Entry* entry = &thread_entries[t->offset];
+ struct Queue* queue;
+ if (type == THREAD_READY) {
+ queue = &scheduler.ready[priority];
+ } else if (type == THREAD_MWAIT) {
+ queue = &scheduler.mwait[priority];
+ } else if (type == THREAD_SWAIT) {
+ queue = &scheduler.swait[priority];
+ } else {
+ return;
+ }
+ push_to_queue(entry, queue);
+ //queue->end.next->next = entry;
+ //queue->end.next = entry;
+ //entry->next = &queue->end;
+}
+
+void prepend_thread_to_queue(struct Thread* t, unsigned char type, unsigned char priority)
+{
+ struct Entry* entry = &thread_entries[t->offset];
+ struct Queue* queue;
+ if (type == THREAD_READY) {
+ queue = &scheduler.ready[priority];
+ } else if (type == THREAD_MWAIT) {
+ queue = &scheduler.mwait[priority];
+ } else if (type == THREAD_SWAIT) {
+ queue = &scheduler.swait[priority];
+ } else {
+ return;
+ }
+ prepend_to_queue(entry, queue);
+}
+
+struct Entry* pop_thread_from_queue(unsigned char type, unsigned char priority)
+{
+ struct Entry* entry = 0;
+ struct Queue* queue;
+ if (type == THREAD_READY) {
+ queue = &scheduler.ready[priority];
+ } else if (type == THREAD_MWAIT) {
+ queue = &scheduler.mwait[priority];
+ } else if (type == THREAD_SWAIT) {
+ queue = &scheduler.swait[priority];
+ } else {
+ return entry;
+ }
+ return pop_from_queue(queue);
+}
+
+struct Entry* find_pid(unsigned long pid)
+{
+ for (unsigned char p = 0; p < PRIORITIES; p++) {
+ struct Queue* queue;
+ struct Entry* prev;
+ struct Entry* entry;
+
+ queue = &scheduler.ready[p];
+ prev = &queue->start;
+ entry = prev->next;
+ while (entry->entry_type != END_ENTRY) {
+ if (((struct Thread*)entry->value)->pid == pid)
+ return prev;
+ prev = entry;
+ entry = entry->next;
+ }
+
+ queue = &scheduler.mwait[p];
+ prev = &queue->start;
+ entry = prev->next;
+ while (entry->entry_type != END_ENTRY) {
+ if (((struct Thread*)entry->value)->pid == pid)
+ return prev;
+ prev = entry;
+ entry = entry->next;
+ }
+
+ queue = &scheduler.swait[p];
+ prev = &queue->start;
+ entry = prev->next;
+ while (entry->entry_type != END_ENTRY) {
+ if (((struct Thread*)entry->value)->pid == pid)
+ return prev;
+ prev = entry;
+ entry = entry->next;
+ }
+ }
+ return 0;
+}
+
+struct Entry* find_mutex_wait_next(void* m)
+{
+ for (unsigned char p = 0; p < PRIORITIES; p++) {
+ struct Queue* queue = &scheduler.mwait[p];
+ struct Entry* prev = &queue->start;
+ struct Entry* entry = prev->next;
+ while (entry->entry_type != END_ENTRY) {
+ if (((struct Thread*)entry->value)->mptr == m)
+ return prev;
+ prev = entry;
+ entry = entry->next;
+ }
+ }
+ return 0;
+}
+
+struct Entry* find_signal_wait_next(void* s)
+{
+ for (unsigned char p = 0; p < PRIORITIES; p++) {
+ struct Queue* queue = &scheduler.swait[p];
+ struct Entry* prev = &queue->start;
+ struct Entry* entry = prev->next;
+ while (entry->entry_type != END_ENTRY) {
+ if (((struct Thread*)entry->value)->mptr == s)
+ return prev;
+ prev = entry;
+ entry = entry->next;
+ }
+ }
+ return 0;
+}
+
+struct Entry* get_unused_thread(void)
+{
+ struct Queue* q = &scheduler.free_threads;
+ // If we have no available free threads
+ // return null pointer
+ if (q->start.next->entry_type == END_ENTRY)
+ return 0;
+ // Otherwise, get the next thread
+ return pop_from_queue(q);
+}
+
+unsigned char find_duplicate(void* pc)
+{
+ for (unsigned char p = 0; p < PRIORITIES; p++) {
+ struct Queue* queue = &scheduler.ready[p];
+ struct Entry* entry = queue->start.next;
+ while (entry->entry_type == VALUE_ENTRY) {
+ if (((struct Thread*)entry->value)->pc == pc) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+unsigned char add_thread_without_duplicate(void* pc, void* arg, unsigned char priority)
+{
+ if (!find_duplicate(pc)) {
+ return add_thread(pc, arg, priority);
+ }
+ return 1;
+}
+
+unsigned char svc_add_thread(void* pc, void* arg, unsigned char priority)
+{
+ struct Entry* thread_entry = get_unused_thread();
+ // The only point-of-failure is not having a thread available
+ if (thread_entry == 0)
+ return 1;
+ struct Thread* thread = thread_entry->value;
+ /// Thread Setup
+ thread->pc = pc;
+ unsigned long* argp = (void*)thread->sp_base;
+ argp -= 13;
+ *argp = (unsigned long)arg; // Set r0 to the argument
+ argp -= 1;
+ *(unsigned long**)argp = (unsigned long*)cleanup; // Set lr to the cleanup function
+ thread->sp = argp;
+ thread->status = THREAD_READY;
+ thread->mptr = (void*)0;
+ thread->pid = nextpid++;
+ // Reset next pid on overflow
+ if (nextpid < FIRST_AVAIL_PID) {
+ nextpid = FIRST_AVAIL_PID;
+ }
+ // Cap Priority Level
+ if (priority >= PRIORITIES)
+ thread->priority = PRIORITIES - 1;
+ else
+ thread->priority = priority;
+ // This thread is new
+ thread->old_priority = -1;
+ // Reserved for non-preemptible tasking
+ thread->preempt = 0;
+ /// Add Thread to Scheduler
+ push_thread_to_queue(thread, THREAD_READY, thread->priority);
+ return 0;
+}
+
+void uart_scheduler(void)
+{
+ uart_string("Scheduler Info\n==============\nCurrent\n");
+ uart_hex((unsigned long)scheduler.rthread);
+ uart_char(' ');
+ kmemshow32((void*)scheduler.rthread, 9);
+ unsigned long length;
+ for(int p = 0; p < PRIORITIES; p++) {
+ uart_string("Priority ");
+ uart_10(p);
+ uart_char('\n');
+ struct Queue* queue;
+ struct Entry* entry;
+
+ queue = &scheduler.ready[p];
+ uart_string("Ready Queue\n");
+ entry = queue->start.next;
+ length = 0;
+ while (entry->entry_type != END_ENTRY) {
+ uart_hex((unsigned long)entry->value);
+ uart_char(' ');
+ kmemshow32((void*)entry->value, 9);
+ entry = entry->next;
+ length++;
+ }
+ uart_hexn(length);
+
+ queue = &scheduler.mwait[p];
+ uart_string("Mutex Wait Queue\n");
+ entry = queue->start.next;
+ length = 0;
+ while (entry->entry_type != END_ENTRY) {
+ uart_hex((unsigned long)entry->value);
+ uart_char(' ');
+ kmemshow32((void*)entry->value, 9);
+ entry = entry->next;
+ length++;
+ }
+ uart_hexn(length);
+
+ queue = &scheduler.swait[p];
+ uart_string("Signal Wait Queue\n");
+ entry = queue->start.next;
+ length = 0;
+ while (entry->entry_type != END_ENTRY) {
+ uart_hex((unsigned long)entry->value);
+ uart_char(' ');
+ kmemshow32((void*)entry->value, 9);
+ entry = entry->next;
+ length++;
+ }
+ uart_hexn(length);
+ }
+ // Count number of free threads
+ struct Queue* queue = &scheduler.free_threads;
+ struct Entry* entry = queue->start.next;
+ while (entry->entry_type != END_ENTRY) {
+ entry = entry->next;
+ length++;
+ }
+ uart_hexn(length);
+ uart_string("==============\n");
+}
+
+struct Thread* next_thread(void)
+{
+ // Recurse through all priorities to try to find a ready thread
+ for (int p = 0; p < PRIORITIES; p++) {
+ struct Queue* rq = &scheduler.ready[p];
+ if (rq->start.next->entry_type == END_ENTRY)
+ continue;
+ return rq->start.next->value;
+ }
+ // No thread found, use basic usrloopthread while waiting for new thread
+ return &usrloopthread;
+}
+
+void c_cleanup(void)
+{
+ struct Thread* rt = scheduler.rthread;
+ struct Entry* e = pop_thread_from_queue(THREAD_READY, rt->priority);
+ // Add to free threads
+ push_to_queue(e, &scheduler.free_threads);
+}
+
+void yield(void)
+{
+ struct Thread* rthread = scheduler.rthread;
+ // usrloopthread should not be yielded
+ if (rthread == &usrloopthread)
+ return;
+ // Put current thread at the end of its ready queue,
+ // thus any threads of the same priority can be run first
+ unsigned char priority = rthread->priority;
+ struct Entry* tq;
+ // Remove from top of queue
+ tq = pop_thread_from_queue(THREAD_READY, priority);
+ if (tq != 0) {
+ // Add to bottom of queue
+ push_thread_to_queue(tq->value, THREAD_READY, priority);
+ }
+}
+
+void sched_mutex_yield(void* m)
+{
+ struct Thread* rthread = scheduler.rthread;
+ // usrloopthread should not be yielded
+ if (rthread == &usrloopthread)
+ return;
+ unsigned char priority = rthread->priority;
+ // Signify which lock this thread is waiting for
+ rthread->mptr = m;
+ struct Entry* rt;
+ // Remove from top of running queue
+ rt = pop_thread_from_queue(THREAD_READY, priority);
+ if (rt != 0)
+ // Push to bottom of wait queue
+ push_thread_to_queue(rt->value, THREAD_MWAIT, priority);
+ // Find the thread that has the mutex locked
+ struct Mutex* mtex = m;
+ struct Entry* mutex_next = find_pid(mtex->pid);
+ if (mutex_next == 0)
+ return;
+ // The next thread is the one with the lock
+ struct Entry* mutex_thread_entry = mutex_next->next;
+ // Check if it is lower priority
+ if (((struct Thread*)mutex_thread_entry->value)->priority > priority) {
+ // Remove it from the old priority queue
+ remove_next_from_queue(mutex_next);
+ struct Thread* t = mutex_thread_entry->value;
+ // Preserve the old priority
+ if (t->old_priority == 0xFF)
+ t->old_priority = t->priority;
+ t->priority = priority;
+ // Add it to the higher priority queue
+ push_thread_to_queue(t, THREAD_READY, priority);
+ }
+}
+
+void sched_semaphore_yield(void* s)
+{
+ struct Thread* rthread = scheduler.rthread;
+ // usrloopthread should not be yielded
+ if (rthread == &usrloopthread)
+ return;
+ unsigned char priority = rthread->priority;
+ // Signify which lock this thread is waiting for
+ rthread->mptr = s;
+ struct Entry* rt;
+ // Remove from top of running queue
+ rt = pop_thread_from_queue(THREAD_READY, priority);
+ if (rt != 0)
+ // Push to bottom of wait queue
+ push_thread_to_queue(rt->value, THREAD_SWAIT, priority);
+}
+
+void sched_mutex_resurrect(void* m)
+{
+ // Find any mutex to resurrect
+ struct Entry* prev = find_mutex_wait_next(m);
+ if (prev == 0)
+ return;
+ struct Entry* entry = prev->next;
+ struct Thread* thread = entry->value;
+ // Resurrect the thread
+ thread->mptr = 0;
+ // Remove from wait queue
+ entry = remove_next_from_queue(prev);
+ if (entry == 0)
+ return;
+ // Add to ready queue
+ push_thread_to_queue(entry->value, THREAD_READY, ((struct Thread*)entry->value)->priority);
+ // Demote current thread
+ struct Thread* rthread = scheduler.rthread;
+ unsigned long p = rthread->priority;
+ unsigned long op = rthread->old_priority;
+ // Restore the original priority level
+ if (op != 0xFF) {
+ struct Entry* tentry = pop_thread_from_queue(THREAD_READY, p);
+ ((struct Thread*)tentry->value)->priority = op;
+ ((struct Thread*)tentry->value)->old_priority = 0xFF;
+ prepend_thread_to_queue(tentry->value, THREAD_READY, op);
+ }
+}
+
+void sched_semaphore_resurrect(void* s, unsigned long count)
+{
+ while (count--) {
+ // Find any signal/ semaphore to resurrect
+ struct Entry* prev = find_signal_wait_next(s);
+ if (prev == 0)
+ return;
+ struct Entry* entry = prev->next;
+ struct Thread* thread = entry->value;
+ // Resurrect the thread
+ thread->mptr = 0;
+ // Remove from wait queue
+ entry = remove_next_from_queue(prev);
+ if (entry == 0)
+ return;
+ // Add to ready queue
+ push_thread_to_queue(entry->value, THREAD_READY, ((struct Thread*)entry->value)->priority);
+ }
+}