aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/sys/schedule.h166
-rw-r--r--later/schedule.S195
-rw-r--r--later/schedule.c206
-rw-r--r--later/schedule.flat.c32
-rw-r--r--later/schedule.h159
-rw-r--r--later/schedule.ll.c13
-rw-r--r--later/schedule.q.c118
-rw-r--r--src/cpu/irq.c17
-rw-r--r--src/sys/core.c70
-rw-r--r--src/sys/schedule.S155
-rw-r--r--src/sys/schedule.c164
-rw-r--r--src/util/mutex.c14
12 files changed, 525 insertions, 784 deletions
diff --git a/include/sys/schedule.h b/include/sys/schedule.h
index e1cde57..b153bdb 100644
--- a/include/sys/schedule.h
+++ b/include/sys/schedule.h
@@ -1,144 +1,58 @@
#ifndef SYS_SCHEDULE_H
#define SYS_SCHEDULE_H
-#include <cpu.h>
-#include <lib/mem.h>
-#include <lib/ll.h>
-#include <sys/core.h>
-enum ThreadStatus {
- THREAD_READY = 0,
- THREAD_RUNNING = 1,
- THREAD_WAITING = 2,
- THREAD_WAITING_FOR_MUTEX = 3,
- THREAD_FINISHED = 4,
- THREAD_STACK_ERROR = 5,
-};
+#define TQUEUE_MAX 0x100
+#define STACK_SIZE 0x1000
+#define TQUEUE_CNT 5
+#define PRIORITIES 6
+#define MAX_THREADS TQUEUE_MAX*PRIORITIES*TQUEUE_CNT
-struct cpu_context {
- unsigned long r4;
- unsigned long r5;
- unsigned long r6;
- unsigned long r7;
- unsigned long r8;
- unsigned long r9;
- unsigned long r10;
- unsigned long r11;
- unsigned long r12;
- unsigned long lr;
+enum ThreadStatus {
+ THREAD_READY = 0,
+ THREAD_MWAIT = 1,
+ THREAD_SWAIT = 2,
+ THREAD_FINISH = 3, // Need to clean up
+ THREAD_SERROR = 4, // Stack Error
};
-struct ThreadData {
+struct Thread {
+ void* pc;
+ void* sp; // Store r0-r12,lr on stack
+ void* sp_base;
+ void* mptr;
+ unsigned long pid;
unsigned char priority;
- unsigned char preempt_count;
+ unsigned char preempt;
unsigned short status;
- void* mutex_waiting;
- unsigned long pid;
- struct cpu_context cpu_context;
};
-struct Thread {
- //void (*thread)(void);
- void* thread;
- void* stack;
- void* stack_base;
- struct ThreadData data;
+struct ThreadRotBuffer {
+ unsigned int roffset;
+ unsigned int woffset;
+ struct Thread* queue[TQUEUE_MAX];
};
-#define MAX_THREADS 0x100
-#define STACK_SIZE 0x1000
-#define PRIORITIES 6
-struct Scheduler {
- struct LL* rthread_ll;
- struct cpu_context* ctx;
- struct LL tlist[PRIORITIES];
+struct ThreadQueues {
+ struct ThreadRotBuffer ready;
+ struct ThreadRotBuffer mwait;
+ struct ThreadRotBuffer swait;
+ struct ThreadRotBuffer finish;
+ struct ThreadRotBuffer serror;
};
-void init_scheduler(void);
-void add_thread(void (*thread_fxn)(void), unsigned char priority);
-extern void schedule(void);
-void schedule_c(void);
-void schedule_irq(void);
-void cleanup(void);
-void sched_info(void);
-struct LL* get_next_thread(void);
-
-static inline void preserve_stack(struct Thread* thread)
-{
- // Get current mode
- unsigned long mode = getmode();
- // Set supervisor mode - "User mode"
- setsvc();
- // Store the stack pointer
- void* ssp = getsp() + 4*4; // Ignore 4 words pushed on by (schedule)
- thread->stack = ssp;
- // Restore mode
- setmode(mode);
-}
-
-static inline void restore_stack(struct Thread* thread)
-{
- // Get current mode
- unsigned long mode = getmode();
- // Set supervisor mode - "User mode"
- setsvc();
- // Set stack pointer to thread's stack pointer
- asm volatile("mov sp, %0" :: "r"(thread->stack));
- // Restore mode
- setmode(mode);
-}
-
-static inline void preserve_sys_stack(unsigned long* sp)
-{
- if (*sp == 0) {
- unsigned long mode = getmode();
- setsvc();
- *sp = (unsigned long)getsp();
- setmode(mode);
- }
-}
-
-static inline void restore_sys_stack(unsigned long* sp)
-{
- if (*sp) {
- unsigned long mode = getmode();
- setsvc();
- setsp((void*)*sp);
- setmode(mode);
- *sp = 0;
- }
-}
-
-static inline void preserve_pc(struct Thread* t)
-{
- t->thread = (void*)t->data.cpu_context.lr;
-}
+struct Scheduler {
+ void* sp;
+ struct Thread* rthread;
+ struct ThreadQueues thread_queues[PRIORITIES];
+};
-static inline void preserve_ctx(struct cpu_context* cpuctx)
-{
- asm volatile ("mov %0, r4" : "=r"(cpuctx->r4));
- asm volatile ("mov %0, r5" : "=r"(cpuctx->r5));
- asm volatile ("mov %0, r6" : "=r"(cpuctx->r6));
- asm volatile ("mov %0, r7" : "=r"(cpuctx->r7));
- asm volatile ("mov %0, r8" : "=r"(cpuctx->r8));
- asm volatile ("mov %0, r9" : "=r"(cpuctx->r9));
- asm volatile ("mov %0, r10" : "=r"(cpuctx->r10));
- asm volatile ("mov %0, r11" : "=r"(cpuctx->r11));
- asm volatile ("mov %0, r12" : "=r"(cpuctx->r12));
- asm volatile ("mov %0, lr" : "=r"(cpuctx->lr));
-}
-static inline void restore_ctx(struct cpu_context* cpuctx)
-{
- asm volatile ("mov r4, %0" :: "r"(cpuctx->r4));
- asm volatile ("mov r5, %0" :: "r"(cpuctx->r5));
- asm volatile ("mov r6, %0" :: "r"(cpuctx->r6));
- asm volatile ("mov r7, %0" :: "r"(cpuctx->r7));
- asm volatile ("mov r8, %0" :: "r"(cpuctx->r8));
- asm volatile ("mov r9, %0" :: "r"(cpuctx->r9));
- asm volatile ("mov r10, %0" :: "r"(cpuctx->r10));
- asm volatile ("mov r11, %0" :: "r"(cpuctx->r11));
- asm volatile ("mov r12, %0" :: "r"(cpuctx->r12));
- asm volatile ("mov lr, %0" :: "r"(cpuctx->lr));
-}
+void init_scheduler(void);
+// void add_thread(void* pc, void* arg, unsigned char priority);
+/// TODO: ENSURE IRQ/ FIQ entry switches
+/// to user mode then calls the SVC call
+// extern void schedule(void);
+// void yield(void);
+// struct Thread* next_thread(void);
#endif
diff --git a/later/schedule.S b/later/schedule.S
index 1811c7c..a46654c 100644
--- a/later/schedule.S
+++ b/later/schedule.S
@@ -1,58 +1,143 @@
-.section .text
-.globl preserveregs
-preserveregs:
- str r4, [r0, #0x10]
- str r5, [r0, #0x14]
- str r6, [r0, #0x18]
- str r7, [r0, #0x1c]
- str r8, [r0, #0x20]
- str r9, [r0, #0x24]
- str r10, [r0, #0x28]
- str r11, [r0, #0x2c]
- bx lr
-
-.globl restoreregs
-restoreregs:
- ldr r4, [r0, #0x10]
- ldr r5, [r0, #0x14]
- ldr r6, [r0, #0x18]
- ldr r7, [r0, #0x1c]
- ldr r8, [r0, #0x20]
- ldr r9, [r0, #0x24]
- ldr r10, [r0, #0x28]
- ldr r11, [r0, #0x2c]
- bx lr
+.section ".text"
+.globl schedule
+// TODO: Implement Scheduler for IRQ
-.globl exetask
-exetask:
- push {r0, r4, r5, r6, r7, r8, r9, r10, r11, lr}
- // Restore registers from context switch
- ldr r4, [r0, #0x10]
- ldr r5, [r0, #0x14]
- ldr r6, [r0, #0x18]
- ldr r7, [r0, #0x1c]
- ldr r8, [r0, #0x20]
- ldr r9, [r0, #0x24]
- ldr r10, [r0, #0x28]
- ldr r11, [r0, #0x2c]
- // Preserve system stack
- ldr r1, =msp
- str sp, [r1]
- // Setup task's stack
- ldr sp, [r0, #0x34]
- // Switch to task
- ldr lr, [r0, #0x3c]
- blx lr
- // Restore system stack
- ldr r1, =msp
- ldr sp, [r1]
- pop {r0, r4, r5, r6, r7, r8, r9, r10, r11, pc}
+// Save Context
+// reg = struct cpu_context*
+.macro save_context reg
+ str r4, [\reg, #0x00]
+ str r5, [\reg, #0x04]
+ str r6, [\reg, #0x08]
+ str r7, [\reg, #0x0c]
+ str r8, [\reg, #0x10]
+ str r9, [\reg, #0x14]
+ str r10, [\reg, #0x18]
+ str r11, [\reg, #0x1c]
+ str r12, [\reg, #0x20]
+ str lr, [\reg, #0x24]
+.endm
+// Restore Context
+// reg = struct cpu_context*
+.macro restore_context reg
+ ldr r4, [\reg, #0x00]
+ ldr r5, [\reg, #0x04]
+ ldr r6, [\reg, #0x08]
+ ldr r7, [\reg, #0x0c]
+ ldr r8, [\reg, #0x10]
+ ldr r9, [\reg, #0x14]
+ ldr r10, [\reg, #0x18]
+ ldr r11, [\reg, #0x1c]
+ ldr r12, [\reg, #0x20]
+ ldr lr , [\reg, #0x24]
+.endm
-.section .data
-msp:
- .space 4
-.section .data.stacks
-.globl stacks
-stacks:
- .space 0x100000
+// Implemented the scheduler in Assembly since the C defined was messing around with the program stacks
+// This way, I can be confident that the stacks will be unchanged
+schedule:
+ ldr r3, =scheduler
+ // Preserve context
+ ldr r0, [r3, #4]
+ // r0 = struct cpu_context*
+ save_context r0
+ // Get the next available thread
+ push {r1-r3, lr}
+ bl get_next_thread
+ pop {r1-r3, lr}
+ ldr r1, [r3, #0]
+ // r3 = struct Scheduler*
+ // r0 = struct LL* next_thread_ll
+ // r1 = struct LL* current_thread_ll
+ // Check if there is a valid currently running thread
+ cmp r1, #0
+ beq schedule.current_thread_nexists
+schedule.current_thread_exists:
+ cmp r0, r1
+ beq schedule.run_current
+ cmp r0, #0
+ moveq r0, r1 // Make the current running thread the next running thread if no next running thread
+ // Next is not the same as the current
+ // Preserve stack of current
+ ldr r2, [r1, #0x8] // struct Thread* current
+ ldrh r1, [r2, #0x0e]
+ cmp r1, #2 // THREAD_WAITING
+ beq schedule.temp_status
+ cmp r1, #1 // THREAD_RUNNING
+ bne schedule.dont_modify_status
+schedule.temp_status:
+ mov r1, #0 // THREAD_READY
+ strh r1, [r2, #0x0e]
+schedule.dont_modify_status:
+ str sp, [r2, #0x4] // void* stack // Preserve stack
+ // Preserve program counter of current
+ str lr, [r2, #0x0] // void* thread // Preserve pc
+ ldr r2, [r0, #0x8] // struct Thread* next
+ // Set new stack pointer
+ ldr sp, [r2, #0x4]
+ // Set the thread as running
+ mov r1, #1 // THREAD_RUNNING
+ strh r1, [r2, #0x0e] // unsigned short status
+ add r2, r2, #0x18
+ // Set new running thread
+ str r0, [r3, #0x0] // struct LL* next_thread_ll // Set new running thread
+ // Set new context
+ str r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
+ b schedule.run_current
+schedule.current_thread_nexists:
+ // r0 = struct LL* next_thread_ll
+ // r1 = 0 = struct LL* current_thread_ll
+ cmp r0, #0
+ beq schedule.no_next_thread
+ ldr r1, [r0, #0x8]
+ // r1 = struct Thread* next_thread
+ // Store system stack pointer
+ ldr r2, =svcsp
+ push {r1}
+ ldr r1, [r2]
+ cmp r1, #0
+ pop {r1}
+ bne schedule.dont_overwrite_sys_stack
+ // Store if zero system stack
+ str sp, [r2]
+schedule.dont_overwrite_sys_stack:
+ // Move stack to next thread's stack pointer
+ ldr sp, [r1, #0x4] // void* stack
+ // Store the running thread ll entry
+ str r0, [r3, #0x0] // struct LL* rthread_ll
+ ldr r2, [r0, #0x8] // struct Thread* thread
+ mov r0, #1 // THREAD_RUNNING
+ strh r0, [r2, #0x0e]
+ // Set context
+ add r1, r1, #0x18 // struct cpu_context*
+ str r1, [r3, #0x4] // store to scheduler.ctx
+schedule.run_current:
+ // Restore context
+ ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
+ restore_context r2
+ // Run
+ ldr r1, [r3, #0]
+ // r1 = struct LL* rthread_ll
+ ldr r1, [r1, #0x8]
+ // r1 = struct Thread* rthread
+ ldr r0, [r1, #0x0]
+ // r0 = void* thread
+ bx r0
+schedule.no_next_thread:
+ // r0 = 0 = struct LL* next_thread_ll
+ // r1 = 0 = struct LL* current_thread_ll
+ // No thread to run
+ // Restore sys context
+ ldr r0, =svccpu
+ str r0, [r3, #0x4] // Store context
+ ldr r0, =svcsp
+ ldr r1, [r0]
+ cmp r1, #0
+ beq schedule.exit
+ mov sp, r1 // Restore stack pointer
+ mov r1, #0
+ str r1, [r0] // Clear stack pointer
+schedule.exit:
+ // Restore register context
+ ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
+ restore_context r2
+ bx lr
diff --git a/later/schedule.c b/later/schedule.c
index 8cf2780..c300ae0 100644
--- a/later/schedule.c
+++ b/later/schedule.c
@@ -1,83 +1,167 @@
-#include "../sys/schedule.h"
-#include "../lib/ll.h"
-#include "../lib/q.h"
+#include <cpu/irq.h>
+#include <drivers/uart.h>
+#include <globals.h>
+#include <sys/core.h>
+#include <sys/schedule.h>
+#include <util/mutex.h>
-#ifdef IGNORE
-#ifdef FLAT
-static struct Task* task_list[256];
-
-static struct Scheduler scheduler = {
- .tasks = task_list,
-};
-
-static unsigned int ntask_i = 0;
-
-void add_task(struct Task* t)
+void init_scheduler(void)
{
- scheduler.tasks[ntask_i] = t;
- ntask_i += 1;
- if (ntask_i > 256) {
- ntask_i = 0;
+ for(int i = 0; i < PRIORITIES; i++) {
+ scheduler.tlist[i].prev = &scheduler.tlist[i];
+ scheduler.tlist[i].next = &scheduler.tlist[i];
+ scheduler.tlist[i].data = 0;
}
+ scheduler.rthread_ll = 0;
+ scheduler.ctx = &svccpu;
}
-unsigned int get_task_length(void)
+void* get_stack(void)
{
- return ntask_i;
+ for (int i = 0; i < MAX_THREADS; i++) {
+ if (stacks_table[i] == 0) {
+ stacks_table[i] = 1;
+ return (void*)heap_end() - STACK_SIZE*i;
+ }
+ }
+ return 0;
}
-void execute_task(void)
+void add_thread(void (*thread_fxn)(void), unsigned char priority)
{
- if (scheduler.tasks[ntask_i-1] != 0)
- scheduler.tasks[ntask_i-1]->task();
+ struct Thread* thread = (struct Thread*)malloca(sizeof(struct Thread), 4);
+ // Set the program counter to the entry
+ thread->thread = thread_fxn;
+ // Get a stack frame
+ thread->stack_base = get_stack();
+ thread->stack = thread->stack_base;
+ // Put in error state for no stack
+ if(thread->stack == 0)
+ thread->data.status = THREAD_STACK_ERROR;
+ else
+ thread->data.status = THREAD_READY;
+ // Doesn't wait for mutex at start
+ thread->data.mutex_waiting = 0;
+ // Set PID
+ thread->data.pid = nextpid++;
+ thread->data.preempt_count = 0;
+ thread->data.cpu_context.lr = (unsigned long)cleanup;
+ unsigned char p = priority;
+ if (p >= PRIORITIES) {
+ p = PRIORITIES - 1;
+ }
+ thread->data.priority = p;
+ push_ll(&scheduler.tlist[p], thread);
}
-#elseif LL
-static struct LL bl = {
- .prev = 0,
- .next = 0,
-};
-static struct Scheduler scheduler = {
- .tasks = &bl,
-};
-#else
-static struct Q_base bq = {
- .next = 0,
- .last = 0,
-};
-static struct Scheduler scheduler = {
- .tasks = &bq,
-};
-void add_task(struct Task* t)
+struct LL* get_next_thread(void)
{
- pushq(scheduler.tasks, t);
+ for(unsigned long i = 0; i < PRIORITIES; i++) {
+ struct LL* thread_ll = scheduler.tlist[i].next;
+ if (thread_ll == &scheduler.tlist[i])
+ continue;
+ do {
+ struct Thread* thread = thread_ll->data;
+ if((thread->data.status == THREAD_RUNNING) || (thread->data.status == THREAD_READY))
+ return thread_ll;
+ thread_ll = thread_ll->next;
+ } while(thread_ll != &scheduler.tlist[i]);
+ }
+ return 0;
}
-unsigned int get_task_length(void)
+void schedule_c(void)
{
- unsigned int length = 0;
- if (scheduler.tasks->last == 0)
- return length;
- else if (scheduler.tasks->next == scheduler.tasks->last)
- return 1;
- else {
- struct Q* q = scheduler.tasks->next;
- length += 1;
- while (q->next != 0) {
- q = q->next;
- length += 1;
+ // Preserve registers in current context
+ preserve_ctx(scheduler.ctx);
+
+ // Get current thread
+ struct LL* current_thread_ll = scheduler.rthread_ll;
+ // Get next thread
+ struct LL* next_thread_ll = get_next_thread();
+
+ // If there is a current thread
+ if (current_thread_ll != 0) {
+ // If we are switching the thread
+ if (current_thread_ll != next_thread_ll) {
+ // Context switch
+ struct Thread* current_thread = current_thread_ll->data;
+ struct Thread* next_thread = next_thread_ll->data;
+ preserve_stack(current_thread);
+ //preserve_pc(current_thread);
+ current_thread->thread = (void*)current_thread->data.cpu_context.lr;
+ restore_stack(next_thread);
+ scheduler.rthread_ll = next_thread_ll;
+ scheduler.ctx = &next_thread->data.cpu_context;
}
- return length;
}
+ else if (next_thread_ll != 0) {
+ struct Thread* next_thread = next_thread_ll->data;
+ preserve_sys_stack(&svcsp);
+ restore_stack(next_thread);
+ scheduler.rthread_ll = next_thread_ll;
+ scheduler.ctx = &next_thread->data.cpu_context;
+ }
+ if (scheduler.rthread_ll) {
+ struct Thread* rthread = scheduler.rthread_ll->data;
+ restore_ctx(scheduler.ctx);
+ asm volatile ("bx %0" :: "r"(rthread->thread));
+ } else {
+ scheduler.ctx = &svccpu;
+ restore_sys_stack(&svcsp);
+ restore_ctx(scheduler.ctx);
+ }
+}
+
+void cleanup(void)
+{
+ if (scheduler.rthread_ll != 0) {
+ // Mark the thread as finished
+ struct Thread* t = scheduler.rthread_ll->data;
+ //uart_string("Cleaning up thread ");
+ //uart_10(t->data.pid);
+ //uart_char('\n');
+ t->data.status = THREAD_FINISHED;
+ // Mark the stack space as free
+ unsigned long sidx = (unsigned long)(heap_end() - t->stack_base)/STACK_SIZE;
+ stacks_table[sidx] = 0;
+ // Remove the thread
+ struct LL* ll = scheduler.rthread_ll;
+ struct LL* prev = ll->prev;
+ struct LL* next = ll->next;
+ prev->next = ll->next;
+ next->prev = ll->prev;
+ free(ll->data);
+ free(ll);
+ scheduler.rthread_ll = 0;
+ }
+ // Schedule next thread
+ schedule();
}
-void execute_task(void)
+void sched_info(void)
{
- if (scheduler.tasks->last != 0) {
- struct Task* tsk = (struct Task*)scheduler.tasks->next->data;
- (tsk->task)();
- popq(scheduler.tasks);
+ uart_string("Scheduler Information\n");
+ for(unsigned long i = 0; i < PRIORITIES; i++) {
+ struct LL* ll = scheduler.tlist[i].next;
+ uart_string("Queue ");
+ uart_10(i);
+ while (ll != &scheduler.tlist[i]) {
+ uart_string("\nThread ");
+ struct Thread* t = ll->data;
+ uart_hex((unsigned long)t->thread);uart_char(' ');
+ uart_hex((unsigned long)t->stack);uart_char(' ');
+ uart_hex((unsigned long)t->stack_base);uart_char(' ');
+ uart_10(t->data.priority);uart_char(' ');
+ uart_10(t->data.preempt_count);uart_char(' ');
+ uart_10(t->data.status);uart_char(' ');
+ uart_hex((unsigned long)t->data.mutex_waiting);uart_char(' ');
+ uart_10(t->data.pid);uart_char('\n');
+ memshow32((unsigned long*)&t->data.cpu_context, 10);
+ ll = ll->next;
+ }
+ uart_char('\n');
}
+ uart_string("Stacks:\n");
+ memshow32((unsigned long*)stacks_table, 6);
}
-#endif
-#endif
diff --git a/later/schedule.flat.c b/later/schedule.flat.c
deleted file mode 100644
index 6eb0d14..0000000
--- a/later/schedule.flat.c
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifdef FLAT
-
-#include "../sys/schedule.h"
-static struct Task* task_list[256];
-
-static struct Scheduler scheduler = {
- .tasks = task_list,
-};
-
-static unsigned int ntask_i = 0;
-
-void add_task(struct Task* t)
-{
- scheduler.tasks[ntask_i] = t;
- ntask_i += 1;
- if (ntask_i > 256) {
- ntask_i = 0;
- }
-}
-
-unsigned int get_task_length(void)
-{
- return ntask_i;
-}
-
-void execute_task(void)
-{
- if (scheduler.tasks[ntask_i-1] != 0)
- scheduler.tasks[ntask_i-1]->task();
-}
-
-#endif
diff --git a/later/schedule.h b/later/schedule.h
index 77da7eb..e1cde57 100644
--- a/later/schedule.h
+++ b/later/schedule.h
@@ -1,43 +1,144 @@
#ifndef SYS_SCHEDULE_H
#define SYS_SCHEDULE_H
+#include <cpu.h>
+#include <lib/mem.h>
+#include <lib/ll.h>
+#include <sys/core.h>
-#define S_WAITING 0
-#define S_READY 1
+enum ThreadStatus {
+ THREAD_READY = 0,
+ THREAD_RUNNING = 1,
+ THREAD_WAITING = 2,
+ THREAD_WAITING_FOR_MUTEX = 3,
+ THREAD_FINISHED = 4,
+ THREAD_STACK_ERROR = 5,
+};
+
+struct cpu_context {
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long lr;
+};
-#define STACK_SIZE 0x800
-struct Task {
+struct ThreadData {
unsigned char priority;
- unsigned char status;
+ unsigned char preempt_count;
+ unsigned short status;
+ void* mutex_waiting;
unsigned long pid;
- void (*task)(void);
- unsigned long reg[16];
- unsigned long stacki;
- void* stack;
+ struct cpu_context cpu_context;
};
-#ifdef FLAT
-struct Scheduler {
- unsigned long running_pid;
- struct Task** tasks;
-};
-#elseif LL
-#include "../lib/ll.h"
-struct Scheduler {
- unsigned long running_pid;
- struct LL* tasks;
+struct Thread {
+ //void (*thread)(void);
+ void* thread;
+ void* stack;
+ void* stack_base;
+ struct ThreadData data;
};
-#else
-#include "../lib/q.h"
+
+#define MAX_THREADS 0x100
+#define STACK_SIZE 0x1000
+#define PRIORITIES 6
struct Scheduler {
- unsigned long running_pid;
- struct Q_base* tasks;
+ struct LL* rthread_ll;
+ struct cpu_context* ctx;
+ struct LL tlist[PRIORITIES];
};
-#endif
-void add_fxn(void (*task)(void), unsigned char priority);
-unsigned char exists(void (*task)(void));
-void add_no_repeat(void (*task)(void), unsigned char priority);
-unsigned int get_task_length(void);
-void execute_task(void);
+void init_scheduler(void);
+void add_thread(void (*thread_fxn)(void), unsigned char priority);
+extern void schedule(void);
+void schedule_c(void);
+void schedule_irq(void);
+void cleanup(void);
+void sched_info(void);
+struct LL* get_next_thread(void);
+
+static inline void preserve_stack(struct Thread* thread)
+{
+ // Get current mode
+ unsigned long mode = getmode();
+ // Set supervisor mode - "User mode"
+ setsvc();
+ // Store the stack pointer
+ void* ssp = getsp() + 4*4; // Ignore 4 words pushed on by (schedule)
+ thread->stack = ssp;
+ // Restore mode
+ setmode(mode);
+}
+
+static inline void restore_stack(struct Thread* thread)
+{
+ // Get current mode
+ unsigned long mode = getmode();
+ // Set supervisor mode - "User mode"
+ setsvc();
+ // Set stack pointer to thread's stack pointer
+ asm volatile("mov sp, %0" :: "r"(thread->stack));
+ // Restore mode
+ setmode(mode);
+}
+
+static inline void preserve_sys_stack(unsigned long* sp)
+{
+ if (*sp == 0) {
+ unsigned long mode = getmode();
+ setsvc();
+ *sp = (unsigned long)getsp();
+ setmode(mode);
+ }
+}
+
+static inline void restore_sys_stack(unsigned long* sp)
+{
+ if (*sp) {
+ unsigned long mode = getmode();
+ setsvc();
+ setsp((void*)*sp);
+ setmode(mode);
+ *sp = 0;
+ }
+}
+
+static inline void preserve_pc(struct Thread* t)
+{
+ t->thread = (void*)t->data.cpu_context.lr;
+}
+
+static inline void preserve_ctx(struct cpu_context* cpuctx)
+{
+ asm volatile ("mov %0, r4" : "=r"(cpuctx->r4));
+ asm volatile ("mov %0, r5" : "=r"(cpuctx->r5));
+ asm volatile ("mov %0, r6" : "=r"(cpuctx->r6));
+ asm volatile ("mov %0, r7" : "=r"(cpuctx->r7));
+ asm volatile ("mov %0, r8" : "=r"(cpuctx->r8));
+ asm volatile ("mov %0, r9" : "=r"(cpuctx->r9));
+ asm volatile ("mov %0, r10" : "=r"(cpuctx->r10));
+ asm volatile ("mov %0, r11" : "=r"(cpuctx->r11));
+ asm volatile ("mov %0, r12" : "=r"(cpuctx->r12));
+ asm volatile ("mov %0, lr" : "=r"(cpuctx->lr));
+}
+
+static inline void restore_ctx(struct cpu_context* cpuctx)
+{
+ asm volatile ("mov r4, %0" :: "r"(cpuctx->r4));
+ asm volatile ("mov r5, %0" :: "r"(cpuctx->r5));
+ asm volatile ("mov r6, %0" :: "r"(cpuctx->r6));
+ asm volatile ("mov r7, %0" :: "r"(cpuctx->r7));
+ asm volatile ("mov r8, %0" :: "r"(cpuctx->r8));
+ asm volatile ("mov r9, %0" :: "r"(cpuctx->r9));
+ asm volatile ("mov r10, %0" :: "r"(cpuctx->r10));
+ asm volatile ("mov r11, %0" :: "r"(cpuctx->r11));
+ asm volatile ("mov r12, %0" :: "r"(cpuctx->r12));
+ asm volatile ("mov lr, %0" :: "r"(cpuctx->lr));
+}
#endif
diff --git a/later/schedule.ll.c b/later/schedule.ll.c
deleted file mode 100644
index d9ab954..0000000
--- a/later/schedule.ll.c
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifdef LL
-
-#include "../sys/schedule.h"
-#include "../lib/ll.h"
-static struct LL bl = {
- .prev = 0,
- .next = 0,
-};
-static struct Scheduler scheduler = {
- .tasks = &bl,
-};
-
-#endif
diff --git a/later/schedule.q.c b/later/schedule.q.c
deleted file mode 100644
index 4a5ff85..0000000
--- a/later/schedule.q.c
+++ /dev/null
@@ -1,118 +0,0 @@
-#if !(defined(LL) || defined(FLAT))
-#include "../sys/schedule.h"
-#include "../lib/q.h"
-#include "../lib/mem.h"
-#include "../drivers/uart.h"
-
-extern void preserveregs(void*);
-extern void restoreregs(void*);
-extern void exetask(void*);
-
-static unsigned long next_pid = 3;
-unsigned char table[256] = {0, };
-
-static struct __attribute__((packed,align(4))) Q_base bq = {
- .next = 0,
- .last = 0,
-};
-struct __attribute__((packed,align(4))) Scheduler scheduler = {
- .running_pid = 0,
- .tasks = &bq,
-};
-
-extern unsigned long __stacks_start;
-
-unsigned long getstack(void)
-{
- for(unsigned int i = 0; i < 256; i++) {
- if (table[i] == 0) {
- table[i] = 1;
- return i;
- }
- }
- return -1;
-}
-
-void add_fxn(void (*task)(void), unsigned char priority)
-{
- struct Task* t = (struct Task*)malloc(sizeof(struct Task));
- // Allocate a stack frame and space for registers to be preserved on context switches
- t->priority = priority;
- t->task = task;
- t->stacki = getstack();
- t->pid = next_pid;
- next_pid += 1;
- if(t->stacki > 256)
- t->stack = 0;
- else
- t->stack = (void*)(__stacks_start + STACK_SIZE*t->stacki);
- t->status = S_READY;
- for(unsigned char i = 0; i < 13; i++)
- t->reg[i] = 0;
- t->reg[13] = (unsigned long)t->stack;
- t->reg[14] = 0; // LR
- t->reg[15] = (unsigned long)task; // PC
- pushq(scheduler.tasks, t);
-}
-
-unsigned char exists(void (*task)(void))
-{
- if (scheduler.tasks->next == 0)
- return 0;
- struct Q* q = scheduler.tasks->next;
- while (q != 0) {
- struct Task* t = q->data;
- if (t->task == task)
- return 1;
- q = q->next;
- }
- return 0;
-}
-
-void add_no_repeat(void (*task)(void), unsigned char priority)
-{
- if (!(exists(task)))
- add_fxn(task, priority);
-}
-
-unsigned int get_task_length(void)
-{
- unsigned int length = 0;
- if (scheduler.tasks->last == 0)
- return length;
- else if (scheduler.tasks->next == scheduler.tasks->last)
- return 1;
- else {
- struct Q* q = scheduler.tasks->next;
- length += 1;
- while (q->next != 0) {
- q = q->next;
- length += 1;
- }
- return length;
- }
-}
-void execute_task(void)
-{
- // Preserve Current Running PID's Data
- if (scheduler.tasks->last != 0) {
- struct Q* q = (struct Q*)scheduler.tasks->next;
- while (((struct Task*)q->data)->status == S_WAITING) {
- q = q->next;
- }
- struct Task* tsk = (struct Task*)q->data;
- // Restore registers
- // Including program counter as the entry point
- // Including allocated stack position
- // Set lr to the return address to restore system stack
- //preserveregs(tsk->reg);
- //restoreregs(tsk->reg);
- scheduler.running_pid = tsk->pid;
- exetask(tsk->reg);
- scheduler.running_pid = 0;
- table[tsk->stacki] = 0;
- popq(scheduler.tasks);
- }
-}
-
-#endif
diff --git a/src/cpu/irq.c b/src/cpu/irq.c
index c966935..7a9bcaa 100644
--- a/src/cpu/irq.c
+++ b/src/cpu/irq.c
@@ -1,3 +1,4 @@
+#include <cpu.h>
#include <cpu/irq.h>
#include <drivers/uart.h>
#include <globals.h>
@@ -217,12 +218,12 @@ void c_fiq_handler(void)
void localtest(void)
{
- struct Thread* t = scheduler.rthread_ll->data;
- uart_string("Running IRQ Task... ");
- uart_10(t->data.pid);
- uart_char('\n');
- uart_string("Finished! ");
- uart_10(t->data.pid);
- uart_char('\n');
- sched_info();
+ //struct Thread* t = scheduler.rthread_ll->data;
+ //uart_string("Running IRQ Task... ");
+ //uart_10(t->data.pid);
+ //uart_char('\n');
+ //uart_string("Finished! ");
+ //uart_10(t->data.pid);
+ //uart_char('\n');
+ //sched_info();
}
diff --git a/src/sys/core.c b/src/sys/core.c
index cb9b765..1329c12 100644
--- a/src/sys/core.c
+++ b/src/sys/core.c
@@ -71,43 +71,43 @@ void sysinit(void)
void testlocal1(void)
{
- unsigned long a = 5;
- struct Thread* t = scheduler.rthread_ll->data;
- uart_string("vRan Thread ");
- uart_10(t->data.pid);
- uart_string(" Pri. ");
- uart_10(t->data.priority);
- uart_string(" ...\n");
- add_thread(testlocal, 0);
- schedule();
- a += t->data.pid;
- uart_10(a);
- uart_string(" Done!\n");
+ //unsigned long a = 5;
+ //struct Thread* t = scheduler.rthread_ll->data;
+ //uart_string("vRan Thread ");
+ //uart_10(t->data.pid);
+ //uart_string(" Pri. ");
+ //uart_10(t->data.priority);
+ //uart_string(" ...\n");
+ //add_thread(testlocal, 0);
+ //schedule();
+ //a += t->data.pid;
+ //uart_10(a);
+ //uart_string(" Done!\n");
}
void testlocal(void)
{
- struct Thread* t = scheduler.rthread_ll->data;
- uart_string("Ran Thread ");
- uart_10(t->data.pid);
- uart_string(" Pri. ");
- uart_10(t->data.priority);
- uart_string(" ...\n");
- //delay(0x80000000);
- if (t->data.pid == 5) {
- add_thread(testlocal1, 1);
- schedule();
- }
- if (t->data.pid == 3) {
- // Example
- /*
- while (uart_tx_full) {
- t->data.status = THREAD_WAITING;
- schedule();
- } // Will wait until uart_tx is not full
- */
- }
- uart_string("Done! ");
- uart_10(t->data.pid);
- uart_char('\n');
+ //struct Thread* t = scheduler.rthread_ll->data;
+ //uart_string("Ran Thread ");
+ //uart_10(t->data.pid);
+ //uart_string(" Pri. ");
+ //uart_10(t->data.priority);
+ //uart_string(" ...\n");
+ ////delay(0x80000000);
+ //if (t->data.pid == 5) {
+ // add_thread(testlocal1, 1);
+ // schedule();
+ //}
+ //if (t->data.pid == 3) {
+ // // Example
+ // /*
+ // while (uart_tx_full) {
+ // t->data.status = THREAD_WAITING;
+ // schedule();
+ // } // Will wait until uart_tx is not full
+ // */
+ //}
+ //uart_string("Done! ");
+ //uart_10(t->data.pid);
+ //uart_char('\n');
}
diff --git a/src/sys/schedule.S b/src/sys/schedule.S
index a46654c..298646a 100644
--- a/src/sys/schedule.S
+++ b/src/sys/schedule.S
@@ -1,143 +1,26 @@
.section ".text"
.globl schedule
-// TODO: Implement Scheduler for IRQ
-
-// Save Context
-// reg = struct cpu_context*
-.macro save_context reg
- str r4, [\reg, #0x00]
- str r5, [\reg, #0x04]
- str r6, [\reg, #0x08]
- str r7, [\reg, #0x0c]
- str r8, [\reg, #0x10]
- str r9, [\reg, #0x14]
- str r10, [\reg, #0x18]
- str r11, [\reg, #0x1c]
- str r12, [\reg, #0x20]
- str lr, [\reg, #0x24]
+// Implicit: SVC Mode
+.macro preserve_ctx
+ cps #0x1f // Sys mode
+ push {r0-r12, lr} // Restore usr regs
+ cps #0x10 // Svc mode
+ ldr r3, =scheduler // struct Scheduler
+ ldr r3, [r3, #4] // struct Thread* rthread
+ str lr, [r3, #0] // svc_lr -> void* pc
.endm
-// Restore Context
-// reg = struct cpu_context*
-.macro restore_context reg
- ldr r4, [\reg, #0x00]
- ldr r5, [\reg, #0x04]
- ldr r6, [\reg, #0x08]
- ldr r7, [\reg, #0x0c]
- ldr r8, [\reg, #0x10]
- ldr r9, [\reg, #0x14]
- ldr r10, [\reg, #0x18]
- ldr r11, [\reg, #0x1c]
- ldr r12, [\reg, #0x20]
- ldr lr , [\reg, #0x24]
+
+.macro restore_ctx
+ ldr r3, =scheduler // struct Scheduler
+ ldr r3, [r3, #4] // struct Thread* rthread
+ ldr lr, [r3, #0] // void* pc -> lr_svc
+ cps #0x1f // Sys mode
+ pop {r0-r12,lr} // Restore usr regs
+ cps #0x10 // Svc mode
.endm
-// Implemented the scheduler in Assembly since the C defined was messing around with the program stacks
-// This way, I can be confident that the stacks will be unchanged
schedule:
- ldr r3, =scheduler
- // Preserve context
- ldr r0, [r3, #4]
- // r0 = struct cpu_context*
- save_context r0
- // Get the next available thread
- push {r1-r3, lr}
- bl get_next_thread
- pop {r1-r3, lr}
- ldr r1, [r3, #0]
- // r3 = struct Scheduler*
- // r0 = struct LL* next_thread_ll
- // r1 = struct LL* current_thread_ll
- // Check if there is a valid currently running thread
- cmp r1, #0
- beq schedule.current_thread_nexists
-schedule.current_thread_exists:
- cmp r0, r1
- beq schedule.run_current
- cmp r0, #0
- moveq r0, r1 // Make the current running thread the next running thread if no next running thread
- // Next is not the same as the current
- // Preserve stack of current
- ldr r2, [r1, #0x8] // struct Thread* current
- ldrh r1, [r2, #0x0e]
- cmp r1, #2 // THREAD_WAITING
- beq schedule.temp_status
- cmp r1, #1 // THREAD_RUNNING
- bne schedule.dont_modify_status
-schedule.temp_status:
- mov r1, #0 // THREAD_READY
- strh r1, [r2, #0x0e]
-schedule.dont_modify_status:
- str sp, [r2, #0x4] // void* stack // Preserve stack
- // Preserve program counter of current
- str lr, [r2, #0x0] // void* thread // Preserve pc
- ldr r2, [r0, #0x8] // struct Thread* next
- // Set new stack pointer
- ldr sp, [r2, #0x4]
- // Set the thread as running
- mov r1, #1 // THREAD_RUNNING
- strh r1, [r2, #0x0e] // unsigned short status
- add r2, r2, #0x18
- // Set new running thread
- str r0, [r3, #0x0] // struct LL* next_thread_ll // Set new running thread
- // Set new context
- str r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
- b schedule.run_current
-schedule.current_thread_nexists:
- // r0 = struct LL* next_thread_ll
- // r1 = 0 = struct LL* current_thread_ll
- cmp r0, #0
- beq schedule.no_next_thread
- ldr r1, [r0, #0x8]
- // r1 = struct Thread* next_thread
- // Store system stack pointer
- ldr r2, =svcsp
- push {r1}
- ldr r1, [r2]
- cmp r1, #0
- pop {r1}
- bne schedule.dont_overwrite_sys_stack
- // Store if zero system stack
- str sp, [r2]
-schedule.dont_overwrite_sys_stack:
- // Move stack to next thread's stack pointer
- ldr sp, [r1, #0x4] // void* stack
- // Store the running thread ll entry
- str r0, [r3, #0x0] // struct LL* rthread_ll
- ldr r2, [r0, #0x8] // struct Thread* thread
- mov r0, #1 // THREAD_RUNNING
- strh r0, [r2, #0x0e]
- // Set context
- add r1, r1, #0x18 // struct cpu_context*
- str r1, [r3, #0x4] // store to scheduler.ctx
-schedule.run_current:
- // Restore context
- ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
- restore_context r2
- // Run
- ldr r1, [r3, #0]
- // r1 = struct LL* rthread_ll
- ldr r1, [r1, #0x8]
- // r1 = struct Thread* rthread
- ldr r0, [r1, #0x0]
- // r0 = void* thread
- bx r0
-schedule.no_next_thread:
- // r0 = 0 = struct LL* next_thread_ll
- // r1 = 0 = struct LL* current_thread_ll
- // No thread to run
- // Restore sys context
- ldr r0, =svccpu
- str r0, [r3, #0x4] // Store context
- ldr r0, =svcsp
- ldr r1, [r0]
- cmp r1, #0
- beq schedule.exit
- mov sp, r1 // Restore stack pointer
- mov r1, #0
- str r1, [r0] // Clear stack pointer
-schedule.exit:
- // Restore register context
- ldr r2, [r3, #0x4] // struct cpu_context* ctx // Set new context
- restore_context r2
- bx lr
+ //preserve_ctx
+ //restore_ctx
+ //bx lr
diff --git a/src/sys/schedule.c b/src/sys/schedule.c
index c300ae0..96dc678 100644
--- a/src/sys/schedule.c
+++ b/src/sys/schedule.c
@@ -1,167 +1,3 @@
-#include <cpu/irq.h>
-#include <drivers/uart.h>
-#include <globals.h>
-#include <sys/core.h>
-#include <sys/schedule.h>
-#include <util/mutex.h>
-
void init_scheduler(void)
{
- for(int i = 0; i < PRIORITIES; i++) {
- scheduler.tlist[i].prev = &scheduler.tlist[i];
- scheduler.tlist[i].next = &scheduler.tlist[i];
- scheduler.tlist[i].data = 0;
- }
- scheduler.rthread_ll = 0;
- scheduler.ctx = &svccpu;
-}
-
-void* get_stack(void)
-{
- for (int i = 0; i < MAX_THREADS; i++) {
- if (stacks_table[i] == 0) {
- stacks_table[i] = 1;
- return (void*)heap_end() - STACK_SIZE*i;
- }
- }
- return 0;
-}
-
-void add_thread(void (*thread_fxn)(void), unsigned char priority)
-{
- struct Thread* thread = (struct Thread*)malloca(sizeof(struct Thread), 4);
- // Set the program counter to the entry
- thread->thread = thread_fxn;
- // Get a stack frame
- thread->stack_base = get_stack();
- thread->stack = thread->stack_base;
- // Put in error state for no stack
- if(thread->stack == 0)
- thread->data.status = THREAD_STACK_ERROR;
- else
- thread->data.status = THREAD_READY;
- // Doesn't wait for mutex at start
- thread->data.mutex_waiting = 0;
- // Set PID
- thread->data.pid = nextpid++;
- thread->data.preempt_count = 0;
- thread->data.cpu_context.lr = (unsigned long)cleanup;
- unsigned char p = priority;
- if (p >= PRIORITIES) {
- p = PRIORITIES - 1;
- }
- thread->data.priority = p;
- push_ll(&scheduler.tlist[p], thread);
-}
-
-struct LL* get_next_thread(void)
-{
- for(unsigned long i = 0; i < PRIORITIES; i++) {
- struct LL* thread_ll = scheduler.tlist[i].next;
- if (thread_ll == &scheduler.tlist[i])
- continue;
- do {
- struct Thread* thread = thread_ll->data;
- if((thread->data.status == THREAD_RUNNING) || (thread->data.status == THREAD_READY))
- return thread_ll;
- thread_ll = thread_ll->next;
- } while(thread_ll != &scheduler.tlist[i]);
- }
- return 0;
-}
-
-void schedule_c(void)
-{
- // Preserve registers in current context
- preserve_ctx(scheduler.ctx);
-
- // Get current thread
- struct LL* current_thread_ll = scheduler.rthread_ll;
- // Get next thread
- struct LL* next_thread_ll = get_next_thread();
-
- // If there is a current thread
- if (current_thread_ll != 0) {
- // If we are switching the thread
- if (current_thread_ll != next_thread_ll) {
- // Context switch
- struct Thread* current_thread = current_thread_ll->data;
- struct Thread* next_thread = next_thread_ll->data;
- preserve_stack(current_thread);
- //preserve_pc(current_thread);
- current_thread->thread = (void*)current_thread->data.cpu_context.lr;
- restore_stack(next_thread);
- scheduler.rthread_ll = next_thread_ll;
- scheduler.ctx = &next_thread->data.cpu_context;
- }
- }
- else if (next_thread_ll != 0) {
- struct Thread* next_thread = next_thread_ll->data;
- preserve_sys_stack(&svcsp);
- restore_stack(next_thread);
- scheduler.rthread_ll = next_thread_ll;
- scheduler.ctx = &next_thread->data.cpu_context;
- }
- if (scheduler.rthread_ll) {
- struct Thread* rthread = scheduler.rthread_ll->data;
- restore_ctx(scheduler.ctx);
- asm volatile ("bx %0" :: "r"(rthread->thread));
- } else {
- scheduler.ctx = &svccpu;
- restore_sys_stack(&svcsp);
- restore_ctx(scheduler.ctx);
- }
-}
-
-void cleanup(void)
-{
- if (scheduler.rthread_ll != 0) {
- // Mark the thread as finished
- struct Thread* t = scheduler.rthread_ll->data;
- //uart_string("Cleaning up thread ");
- //uart_10(t->data.pid);
- //uart_char('\n');
- t->data.status = THREAD_FINISHED;
- // Mark the stack space as free
- unsigned long sidx = (unsigned long)(heap_end() - t->stack_base)/STACK_SIZE;
- stacks_table[sidx] = 0;
- // Remove the thread
- struct LL* ll = scheduler.rthread_ll;
- struct LL* prev = ll->prev;
- struct LL* next = ll->next;
- prev->next = ll->next;
- next->prev = ll->prev;
- free(ll->data);
- free(ll);
- scheduler.rthread_ll = 0;
- }
- // Schedule next thread
- schedule();
-}
-
-void sched_info(void)
-{
- uart_string("Scheduler Information\n");
- for(unsigned long i = 0; i < PRIORITIES; i++) {
- struct LL* ll = scheduler.tlist[i].next;
- uart_string("Queue ");
- uart_10(i);
- while (ll != &scheduler.tlist[i]) {
- uart_string("\nThread ");
- struct Thread* t = ll->data;
- uart_hex((unsigned long)t->thread);uart_char(' ');
- uart_hex((unsigned long)t->stack);uart_char(' ');
- uart_hex((unsigned long)t->stack_base);uart_char(' ');
- uart_10(t->data.priority);uart_char(' ');
- uart_10(t->data.preempt_count);uart_char(' ');
- uart_10(t->data.status);uart_char(' ');
- uart_hex((unsigned long)t->data.mutex_waiting);uart_char(' ');
- uart_10(t->data.pid);uart_char('\n');
- memshow32((unsigned long*)&t->data.cpu_context, 10);
- ll = ll->next;
- }
- uart_char('\n');
- }
- uart_string("Stacks:\n");
- memshow32((unsigned long*)stacks_table, 6);
}
diff --git a/src/util/mutex.c b/src/util/mutex.c
index f97760b..0e1c816 100644
--- a/src/util/mutex.c
+++ b/src/util/mutex.c
@@ -9,16 +9,16 @@ unsigned char lock_mutex(struct Mutex* m, unsigned long pid)
if (m->pid == NULL_PID) {
// Use currently running thread's PID if no pid given
if (pid == 0) {
- struct Thread* thread = scheduler.rthread_ll->data;
- atm_lock(thread->data.pid, &m->pid);
+ struct Thread* thread = scheduler.rthread;
+ atm_lock(thread->pid, &m->pid);
} else {
atm_lock(pid, &m->pid);
}
return 0;
}
- struct Thread* thread = scheduler.rthread_ll->data;
- thread->data.status = THREAD_WAITING_FOR_MUTEX;
- thread->data.mutex_waiting = m;
+ struct Thread* thread = scheduler.rthread;
+ thread->status = THREAD_MWAIT;
+ thread->mptr = m;
return 1;
}
@@ -29,8 +29,8 @@ unsigned char release_mutex(struct Mutex* m, unsigned long pid)
{
// Use current thread's PID if no pid
if (pid == 0) {
- struct Thread* thread = scheduler.rthread_ll->data;
- if (m->pid == thread->data.pid) {
+ struct Thread* thread = scheduler.rthread;
+ if (m->pid == thread->pid) {
atm_release(&m->pid);
return 0;
}