1 | // -*- Mode: CFA -*- |
---|
2 | // |
---|
3 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo |
---|
4 | // |
---|
5 | // The contents of this file are covered under the licence agreement in the |
---|
6 | // file "LICENCE" distributed with Cforall. |
---|
7 | // |
---|
8 | // kernel.c -- |
---|
9 | // |
---|
10 | // Author : Thierry Delisle |
---|
11 | // Created On : Tue Jan 17 12:27:26 2016 |
---|
12 | // Last Modified By : Thierry Delisle |
---|
13 | // Last Modified On : -- |
---|
14 | // Update Count : 0 |
---|
15 | // |
---|
16 | |
---|
17 | //Start and stop routine for the kernel, declared first to make sure they run first |
---|
18 | void kernel_startup(void) __attribute__((constructor(101))); |
---|
19 | void kernel_shutdown(void) __attribute__((destructor(101))); |
---|
20 | |
---|
21 | //Header |
---|
22 | #include "kernel" |
---|
23 | |
---|
24 | //C Includes |
---|
25 | #include <stddef.h> |
---|
26 | extern "C" { |
---|
27 | #include <sys/resource.h> |
---|
28 | } |
---|
29 | |
---|
30 | //CFA Includes |
---|
31 | #include "libhdr.h" |
---|
32 | #include "threads" |
---|
33 | |
---|
34 | //Private includes |
---|
35 | #define __CFA_INVOKE_PRIVATE__ |
---|
36 | #include "invoke.h" |
---|
37 | |
---|
38 | //----------------------------------------------------------------------------- |
---|
39 | // Kernel storage |
---|
40 | struct processorCtx_t { |
---|
41 | processor * proc; |
---|
42 | coroutine c; |
---|
43 | }; |
---|
44 | |
---|
45 | DECL_COROUTINE(processorCtx_t) |
---|
46 | |
---|
47 | #define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)] |
---|
48 | |
---|
49 | KERNEL_STORAGE(processorCtx_t, systemProcessorCtx); |
---|
50 | KERNEL_STORAGE(cluster, systemCluster); |
---|
51 | KERNEL_STORAGE(processor, systemProcessor); |
---|
52 | KERNEL_STORAGE(thread, mainThread); |
---|
53 | KERNEL_STORAGE(machine_context_t, mainThread_context); |
---|
54 | |
---|
55 | cluster * systemCluster; |
---|
56 | processor * systemProcessor; |
---|
57 | thread * mainThread; |
---|
58 | |
---|
59 | //----------------------------------------------------------------------------- |
---|
60 | // Global state |
---|
61 | |
---|
62 | thread_local processor * this_processor; |
---|
63 | |
---|
64 | processor * get_this_processor() { |
---|
65 | return this_processor; |
---|
66 | } |
---|
67 | |
---|
68 | coroutine * this_coroutine(void) { |
---|
69 | return this_processor->current_coroutine; |
---|
70 | } |
---|
71 | |
---|
72 | thread * this_thread(void) { |
---|
73 | return this_processor->current_thread; |
---|
74 | } |
---|
75 | |
---|
76 | //----------------------------------------------------------------------------- |
---|
77 | // Main thread construction |
---|
78 | struct current_stack_info_t { |
---|
79 | machine_context_t ctx; |
---|
80 | unsigned int size; // size of stack |
---|
81 | void *base; // base of stack |
---|
82 | void *storage; // pointer to stack |
---|
83 | void *limit; // stack grows towards stack limit |
---|
84 | void *context; // address of cfa_context_t |
---|
85 | void *top; // address of top of storage |
---|
86 | }; |
---|
87 | |
---|
88 | void ?{}( current_stack_info_t * this ) { |
---|
89 | CtxGet( &this->ctx ); |
---|
90 | this->base = this->ctx.FP; |
---|
91 | this->storage = this->ctx.SP; |
---|
92 | |
---|
93 | rlimit r; |
---|
94 | int ret = getrlimit( RLIMIT_STACK, &r); |
---|
95 | this->size = r.rlim_cur; |
---|
96 | |
---|
97 | this->limit = (void *)(((intptr_t)this->base) - this->size); |
---|
98 | this->context = &mainThread_context_storage; |
---|
99 | this->top = this->base; |
---|
100 | } |
---|
101 | |
---|
102 | void ?{}( coStack_t * this, current_stack_info_t * info) { |
---|
103 | this->size = info->size; |
---|
104 | this->storage = info->storage; |
---|
105 | this->limit = info->limit; |
---|
106 | this->base = info->base; |
---|
107 | this->context = info->context; |
---|
108 | this->top = info->top; |
---|
109 | this->userStack = true; |
---|
110 | } |
---|
111 | |
---|
112 | void ?{}( coroutine * this, current_stack_info_t * info) { |
---|
113 | (&this->stack){ info }; |
---|
114 | this->name = "Main Thread"; |
---|
115 | this->errno_ = 0; |
---|
116 | this->state = Inactive; |
---|
117 | this->notHalted = true; |
---|
118 | } |
---|
119 | |
---|
120 | void ?{}( thread * this, current_stack_info_t * info) { |
---|
121 | (&this->c){ info }; |
---|
122 | } |
---|
123 | |
---|
124 | //----------------------------------------------------------------------------- |
---|
125 | // Processor coroutine |
---|
126 | void ?{}(processorCtx_t * this, processor * proc) { |
---|
127 | (&this->c){}; |
---|
128 | this->proc = proc; |
---|
129 | proc->ctx = this; |
---|
130 | } |
---|
131 | |
---|
132 | void ?{}(processorCtx_t * this, processor * proc, current_stack_info_t * info) { |
---|
133 | (&this->c){ info }; |
---|
134 | this->proc = proc; |
---|
135 | proc->ctx = this; |
---|
136 | } |
---|
137 | |
---|
138 | void start(processor * this); |
---|
139 | |
---|
140 | void ?{}(processor * this) { |
---|
141 | this{ systemCluster }; |
---|
142 | } |
---|
143 | |
---|
144 | void ?{}(processor * this, cluster * cltr) { |
---|
145 | this->cltr = cltr; |
---|
146 | this->current_coroutine = NULL; |
---|
147 | this->current_thread = NULL; |
---|
148 | (&this->lock){}; |
---|
149 | this->terminated = false; |
---|
150 | |
---|
151 | start( this ); |
---|
152 | } |
---|
153 | |
---|
154 | void ?{}(processor * this, cluster * cltr, processorCtx_t * ctx) { |
---|
155 | this->cltr = cltr; |
---|
156 | this->current_coroutine = NULL; |
---|
157 | this->current_thread = NULL; |
---|
158 | (&this->lock){}; |
---|
159 | this->terminated = false; |
---|
160 | |
---|
161 | this->ctx = ctx; |
---|
162 | LIB_DEBUG_PRINTF("Kernel : constructing processor context %p\n", ctx); |
---|
163 | ctx{ this }; |
---|
164 | } |
---|
165 | |
---|
166 | void ^?{}(processor * this) { |
---|
167 | if( ! this->terminated ) { |
---|
168 | LIB_DEBUG_PRINTF("Kernel : core %p signaling termination\n", this); |
---|
169 | this->terminated = true; |
---|
170 | lock( &this->lock ); |
---|
171 | } |
---|
172 | } |
---|
173 | |
---|
174 | void ?{}(cluster * this) { |
---|
175 | ( &this->ready_queue ){}; |
---|
176 | pthread_spin_init( &this->lock, PTHREAD_PROCESS_PRIVATE ); |
---|
177 | } |
---|
178 | |
---|
179 | void ^?{}(cluster * this) { |
---|
180 | pthread_spin_destroy( &this->lock ); |
---|
181 | } |
---|
182 | |
---|
183 | //----------------------------------------------------------------------------- |
---|
184 | // Processor running routines |
---|
185 | void main(processorCtx_t * ctx); |
---|
186 | thread * nextThread(cluster * this); |
---|
187 | void scheduleInternal(processor * this, thread * dst); |
---|
188 | void spin(processor * this, unsigned int * spin_count); |
---|
189 | |
---|
190 | void main(processorCtx_t * ctx) { |
---|
191 | processor * this = ctx->proc; |
---|
192 | LIB_DEBUG_PRINTF("Kernel : core %p starting\n", this); |
---|
193 | |
---|
194 | thread * readyThread = NULL; |
---|
195 | for( unsigned int spin_count = 0; ! this->terminated; spin_count++ ) { |
---|
196 | |
---|
197 | readyThread = nextThread( this->cltr ); |
---|
198 | |
---|
199 | if(readyThread) { |
---|
200 | scheduleInternal(this, readyThread); |
---|
201 | spin_count = 0; |
---|
202 | } else { |
---|
203 | spin(this, &spin_count); |
---|
204 | } |
---|
205 | } |
---|
206 | |
---|
207 | LIB_DEBUG_PRINTF("Kernel : core %p unlocking thread\n", this); |
---|
208 | unlock( &this->lock ); |
---|
209 | LIB_DEBUG_PRINTF("Kernel : core %p terminated\n", this); |
---|
210 | } |
---|
211 | |
---|
212 | //Declarations for scheduleInternal |
---|
213 | extern void ThreadCtxSwitch(coroutine * src, coroutine * dst); |
---|
214 | |
---|
215 | // scheduleInternal runs a thread by context switching |
---|
216 | // from the processor coroutine to the target thread |
---|
217 | void scheduleInternal(processor * this, thread * dst) { |
---|
218 | // coroutine * proc_ctx = get_coroutine(this->ctx); |
---|
219 | // coroutine * thrd_ctx = get_coroutine(dst); |
---|
220 | |
---|
221 | // //Update global state |
---|
222 | // this->current_thread = dst; |
---|
223 | |
---|
224 | // // Context Switch to the thread |
---|
225 | // ThreadCtxSwitch(proc_ctx, thrd_ctx); |
---|
226 | // // when ThreadCtxSwitch returns we are back in the processor coroutine |
---|
227 | |
---|
228 | coroutine * proc_ctx = get_coroutine(this->ctx); |
---|
229 | coroutine * thrd_ctx = get_coroutine(dst); |
---|
230 | thrd_ctx->last = proc_ctx; |
---|
231 | |
---|
232 | // context switch to specified coroutine |
---|
233 | // Which is now the current_coroutine |
---|
234 | LIB_DEBUG_PRINTF("Kernel : switching to ctx %p (from %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine); |
---|
235 | this->current_thread = dst; |
---|
236 | this->current_coroutine = thrd_ctx; |
---|
237 | CtxSwitch( proc_ctx->stack.context, thrd_ctx->stack.context ); |
---|
238 | this->current_coroutine = proc_ctx; |
---|
239 | LIB_DEBUG_PRINTF("Kernel : returned from ctx %p (to %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine); |
---|
240 | |
---|
241 | // when CtxSwitch returns we are back in the processor coroutine |
---|
242 | } |
---|
243 | |
---|
244 | // Handles spinning logic |
---|
245 | // TODO : find some strategy to put cores to sleep after some time |
---|
246 | void spin(processor * this, unsigned int * spin_count) { |
---|
247 | (*spin_count)++; |
---|
248 | } |
---|
249 | |
---|
250 | // Context invoker for processors |
---|
251 | // This is the entry point for processors (kernel threads) |
---|
252 | // It effectively constructs a coroutine by stealing the pthread stack |
---|
253 | void * CtxInvokeProcessor(void * arg) { |
---|
254 | processor * proc = (processor *) arg; |
---|
255 | this_processor = proc; |
---|
256 | // SKULLDUGGERY: We want to create a context for the processor coroutine |
---|
257 | // which is needed for the 2-step context switch. However, there is no reason |
---|
258 | // to waste the perfectly valid stack create by pthread. |
---|
259 | current_stack_info_t info; |
---|
260 | machine_context_t ctx; |
---|
261 | info.context = &ctx; |
---|
262 | processorCtx_t proc_cor_storage = { proc, &info }; |
---|
263 | |
---|
264 | //Set global state |
---|
265 | proc->current_coroutine = &proc->ctx->c; |
---|
266 | proc->current_thread = NULL; |
---|
267 | |
---|
268 | //We now have a proper context from which to schedule threads |
---|
269 | LIB_DEBUG_PRINTF("Kernel : core %p created (%p)\n", proc, proc->ctx); |
---|
270 | |
---|
271 | // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't |
---|
272 | // resume it to start it like it normally would, it will just context switch |
---|
273 | // back to here. Instead directly call the main since we already are on the |
---|
274 | // appropriate stack. |
---|
275 | proc_cor_storage.c.state = Active; |
---|
276 | main( &proc_cor_storage ); |
---|
277 | proc_cor_storage.c.state = Halt; |
---|
278 | proc_cor_storage.c.notHalted = false; |
---|
279 | |
---|
280 | // Main routine of the core returned, the core is now fully terminated |
---|
281 | LIB_DEBUG_PRINTF("Kernel : core %p main ended (%p)\n", proc, proc->ctx); |
---|
282 | |
---|
283 | return NULL; |
---|
284 | } |
---|
285 | |
---|
286 | void start(processor * this) { |
---|
287 | LIB_DEBUG_PRINTF("Kernel : Starting core %p\n", this); |
---|
288 | |
---|
289 | pthread_attr_t attributes; |
---|
290 | pthread_attr_init( &attributes ); |
---|
291 | |
---|
292 | pthread_create( &this->kernel_thread, &attributes, CtxInvokeProcessor, (void*)this ); |
---|
293 | |
---|
294 | pthread_attr_destroy( &attributes ); |
---|
295 | |
---|
296 | LIB_DEBUG_PRINTF("Kernel : core %p started\n", this); |
---|
297 | } |
---|
298 | |
---|
299 | //----------------------------------------------------------------------------- |
---|
300 | // Scheduler routines |
---|
301 | void thread_schedule( thread * thrd ) { |
---|
302 | assertf( thrd->next == NULL, "Expected null got %p", thrd->next ); |
---|
303 | |
---|
304 | pthread_spinlock_guard guard = { &systemProcessor->cltr->lock }; |
---|
305 | append( &systemProcessor->cltr->ready_queue, thrd ); |
---|
306 | } |
---|
307 | |
---|
308 | thread * nextThread(cluster * this) { |
---|
309 | pthread_spinlock_guard guard = { &this->lock }; |
---|
310 | return pop_head( &this->ready_queue ); |
---|
311 | } |
---|
312 | |
---|
313 | //----------------------------------------------------------------------------- |
---|
314 | // Kernel boot procedures |
---|
315 | void kernel_startup(void) { |
---|
316 | |
---|
317 | // SKULLDUGGERY: the mainThread steals the process main thread |
---|
318 | // which will then be scheduled by the systemProcessor normally |
---|
319 | LIB_DEBUG_PRINTF("Kernel : Starting\n"); |
---|
320 | |
---|
321 | current_stack_info_t info; |
---|
322 | |
---|
323 | // LIB_DEBUG_PRINTF("Kernel : core base : %p \n", info.base ); |
---|
324 | // LIB_DEBUG_PRINTF("Kernel : core storage : %p \n", info.storage ); |
---|
325 | // LIB_DEBUG_PRINTF("Kernel : core size : %x \n", info.size ); |
---|
326 | // LIB_DEBUG_PRINTF("Kernel : core limit : %p \n", info.limit ); |
---|
327 | // LIB_DEBUG_PRINTF("Kernel : core context : %p \n", info.context ); |
---|
328 | // LIB_DEBUG_PRINTF("Kernel : core top : %p \n", info.top ); |
---|
329 | |
---|
330 | // Start by initializing the main thread |
---|
331 | mainThread = (thread *)&mainThread_storage; |
---|
332 | mainThread{ &info }; |
---|
333 | |
---|
334 | // Initialize the system cluster |
---|
335 | systemCluster = (cluster *)&systemCluster_storage; |
---|
336 | systemCluster{}; |
---|
337 | |
---|
338 | // Initialize the system processor and the system processor ctx |
---|
339 | // (the coroutine that contains the processing control flow) |
---|
340 | systemProcessor = (processor *)&systemProcessor_storage; |
---|
341 | systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage }; |
---|
342 | |
---|
343 | // Add the main thread to the ready queue |
---|
344 | // once resume is called on systemProcessor->ctx the mainThread needs to be scheduled like any normal thread |
---|
345 | thread_schedule(mainThread); |
---|
346 | |
---|
347 | //initialize the global state variables |
---|
348 | this_processor = systemProcessor; |
---|
349 | this_processor->current_thread = mainThread; |
---|
350 | this_processor->current_coroutine = &mainThread->c; |
---|
351 | |
---|
352 | // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX |
---|
353 | // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that |
---|
354 | // mainThread is on the ready queue when this call is made. |
---|
355 | resume(systemProcessor->ctx); |
---|
356 | |
---|
357 | |
---|
358 | |
---|
359 | // THE SYSTEM IS NOW COMPLETELY RUNNING |
---|
360 | |
---|
361 | |
---|
362 | |
---|
363 | LIB_DEBUG_PRINTF("Kernel : Started\n--------------------------------------------------\n\n"); |
---|
364 | } |
---|
365 | |
---|
366 | void kernel_shutdown(void) { |
---|
367 | LIB_DEBUG_PRINTF("\n--------------------------------------------------\nKernel : Shutting down\n"); |
---|
368 | |
---|
369 | // SKULLDUGGERY: Notify the systemProcessor it needs to terminates. |
---|
370 | // When its coroutine terminates, it return control to the mainThread |
---|
371 | // which is currently here |
---|
372 | systemProcessor->terminated = true; |
---|
373 | suspend(); |
---|
374 | |
---|
375 | // THE SYSTEM IS NOW COMPLETELY STOPPED |
---|
376 | |
---|
377 | // Destroy the system processor and its context in reverse order of construction |
---|
378 | // These were manually constructed so we need manually destroy them |
---|
379 | ^(systemProcessor->ctx){}; |
---|
380 | ^(systemProcessor){}; |
---|
381 | |
---|
382 | // Final step, destroy the main thread since it is no longer needed |
---|
383 | // Since we provided a stack to this taxk it will not destroy anything |
---|
384 | ^(mainThread){}; |
---|
385 | |
---|
386 | LIB_DEBUG_PRINTF("Kernel : Shutdown complete\n"); |
---|
387 | } |
---|
388 | |
---|
389 | //----------------------------------------------------------------------------- |
---|
390 | // Locks |
---|
391 | void ?{}( simple_lock * this ) { |
---|
392 | ( &this->blocked ){}; |
---|
393 | } |
---|
394 | |
---|
395 | void ^?{}( simple_lock * this ) { |
---|
396 | |
---|
397 | } |
---|
398 | |
---|
399 | void lock( simple_lock * this ) { |
---|
400 | { |
---|
401 | pthread_spinlock_guard guard = { &systemCluster->lock }; //HUGE TEMP HACK which only works if we have a single cluster and is stupid |
---|
402 | append( &this->blocked, this_thread() ); |
---|
403 | } |
---|
404 | suspend(); |
---|
405 | } |
---|
406 | |
---|
407 | void unlock( simple_lock * this ) { |
---|
408 | thread * it; |
---|
409 | while( it = pop_head( &this->blocked) ) { |
---|
410 | thread_schedule( it ); |
---|
411 | } |
---|
412 | } |
---|
413 | |
---|
414 | //----------------------------------------------------------------------------- |
---|
415 | // Queues |
---|
416 | void ?{}( simple_thread_list * this ) { |
---|
417 | this->head = NULL; |
---|
418 | this->tail = &this->head; |
---|
419 | } |
---|
420 | |
---|
421 | void append( simple_thread_list * this, thread * t ) { |
---|
422 | assert( t->next == NULL ); |
---|
423 | *this->tail = t; |
---|
424 | this->tail = &t->next; |
---|
425 | } |
---|
426 | |
---|
427 | thread * pop_head( simple_thread_list * this ) { |
---|
428 | thread * head = this->head; |
---|
429 | if( head ) { |
---|
430 | this->head = head->next; |
---|
431 | if( !head->next ) { |
---|
432 | this->tail = &this->head; |
---|
433 | } |
---|
434 | head->next = NULL; |
---|
435 | } |
---|
436 | |
---|
437 | return head; |
---|
438 | } |
---|
439 | // Local Variables: // |
---|
440 | // mode: c // |
---|
441 | // tab-width: 4 // |
---|
442 | // End: // |
---|