blob: 119ad6b5e168d8486ee238828d7a4ead12026f95 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
#include <globals.h>
#include <drivers/uart.h>
#include <sys/schedule.h>
#include <util/mutex.h>
void loop(void);
void cleanup(void);
void init_scheduler(void)
{
// Set rthread to usrloopthread - an infinitely running thread so that the pointer will never be null
usrloopthread.pc = (void*)loop;
usrloopthread.sp = (void*)0x5FC8;
*(unsigned long**)usrloopthread.sp = (unsigned long*)loop;
usrloopthread.sp_base = (void*)0x6000;
usrloopthread.mptr = 0;
usrloopthread.pid = -1;
usrloopthread.priority = -1;
usrloopthread.status = THREAD_READY;
scheduler.rthread = &usrloopthread;
// Initialize Rotating Buffers
struct ThreadQueues* tq;
for (int i = 0; i < PRIORITIES; i++) {
tq = &scheduler.thread_queues[i];
struct ThreadRotBuffer* trb = &tq->ready;
for (int i = 0; i < TQUEUE_CNT; i++) {
trb->roffset = 0;
trb->woffset = 0;
for (int j = 0; j < TQUEUE_MAX; j++)
trb->queue[j] = 0;
trb += 1;
}
}
// Initialize nextpid
nextpid = SCHED_PID + 1;
}
void loop(void)
{
while(1)
asm volatile ("wfe");
}
void* get_stack(void)
{
for (int i = 0; i < MAX_THREADS; i++) {
if (stacks_table[i] == 0) {
stacks_table[i] = 1;
return (void*)0x20000000 - STACK_SIZE*i;
}
}
return 0;
}
void add_thread(void* pc, void* arg, unsigned char priority)
{
void* sp = get_stack();
struct Thread* thread = (struct Thread*)malloca(sizeof(struct Thread), 4);
thread->pc = pc;
if (sp) {
thread->sp_base = sp;
unsigned long* argp = sp;
argp -= 1;
*argp = (unsigned long)arg; // Set r0 to the argument
argp -= 13;
*(unsigned long**)argp = (unsigned long*)cleanup; // Set lr to the cleanup function
thread->sp = (void*)argp;
thread->status = THREAD_READY;
} else {
thread->sp_base = 0;
thread->sp = 0;
thread->status = THREAD_SERROR;
}
thread->mptr = (void*)0;
thread->pid = nextpid++;
thread->priority = priority % PRIORITIES;
thread->preempt = 0;
// Add Thread* to scheduler's appropriate buffer
struct ThreadQueues* tq = &scheduler.thread_queues[thread->priority];
struct ThreadRotBuffer* trb;
if (thread->status == THREAD_SERROR) {
trb = &tq->serror;
} else {
trb = &tq->ready;
}
trb->queue[trb->woffset++] = thread;
trb->woffset %= TQUEUE_MAX;
}
void uart_scheduler(void)
{
uart_string("Scheduler Info\n==============\nCurrent\n");
uart_hex((unsigned long)scheduler.rthread);
uart_char(' ');
memshow32((void*)scheduler.rthread, 6);
struct ThreadQueues* tq;
for(int p = 0; p < PRIORITIES; p++) {
uart_string("Priority ");
uart_10(p);
uart_char('\n');
tq = &scheduler.thread_queues[p];
struct ThreadRotBuffer* trb;
trb = &tq->ready;
for(int i = 0; i < TQUEUE_CNT; i++) {
if (trb->roffset == trb->woffset) {
trb += 1;
continue;
}
uart_string("Queue ");
uart_10(i);
uart_char('\n');
unsigned long roffset = trb->roffset;
while (roffset != trb->woffset) {
uart_hex((unsigned long)&trb->queue[roffset]);
uart_char(' ');
memshow32((void*)trb->queue[roffset], 6);
memshow32((void*)trb->queue[roffset]->sp, 14);
roffset++;
roffset %= TQUEUE_MAX;
}
trb += 1;
}
}
uart_string("==============\n");
}
void cleanup(void)
{
}
|