source: src/libcfa/concurrency/kernel.c@ ee897e4b

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since ee897e4b was ee897e4b, checked in by Thierry Delisle <tdelisle@…>, 9 years ago

Made some clean-up and removed redundant coroutine state

  • Property mode set to 100644
File size: 13.8 KB
Line 
1// -*- Mode: CFA -*-
2//
3// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
4//
5// The contents of this file are covered under the licence agreement in the
6// file "LICENCE" distributed with Cforall.
7//
8// kernel.c --
9//
10// Author : Thierry Delisle
11// Created On : Tue Jan 17 12:27:26 2017
12// Last Modified By : Thierry Delisle
13// Last Modified On : --
14// Update Count : 0
15//
16
17//Start and stop routine for the kernel, declared first to make sure they run first
18void kernel_startup(void) __attribute__((constructor(101)));
19void kernel_shutdown(void) __attribute__((destructor(101)));
20
21//Header
22#include "kernel_private.h"
23
24//C Includes
25#include <stddef.h>
26extern "C" {
27#include <fenv.h>
28#include <sys/resource.h>
29}
30
31//CFA Includes
32#include "libhdr.h"
33
34//Private includes
35#define __CFA_INVOKE_PRIVATE__
36#include "invoke.h"
37
38static volatile int lock;
39
40void spin_lock( volatile int *lock ) {
41 for ( unsigned int i = 1;; i += 1 ) {
42 if ( *lock == 0 && __sync_lock_test_and_set_4( lock, 1 ) == 0 ) break;
43 }
44}
45
46void spin_unlock( volatile int *lock ) {
47 __sync_lock_release_4( lock );
48}
49
50//-----------------------------------------------------------------------------
51// Kernel storage
52#define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)]
53
54KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
55KERNEL_STORAGE(cluster, systemCluster);
56KERNEL_STORAGE(processor, systemProcessor);
57KERNEL_STORAGE(thread, mainThread);
58KERNEL_STORAGE(machine_context_t, mainThread_context);
59
60cluster * systemCluster;
61processor * systemProcessor;
62thread * mainThread;
63
64//-----------------------------------------------------------------------------
65// Global state
66
67thread_local processor * this_processor;
68
69processor * get_this_processor() {
70 return this_processor;
71}
72
73coroutine * this_coroutine(void) {
74 return this_processor->current_coroutine;
75}
76
77thread * this_thread(void) {
78 return this_processor->current_thread;
79}
80
81//-----------------------------------------------------------------------------
82// Main thread construction
83struct current_stack_info_t {
84 machine_context_t ctx;
85 unsigned int size; // size of stack
86 void *base; // base of stack
87 void *storage; // pointer to stack
88 void *limit; // stack grows towards stack limit
89 void *context; // address of cfa_context_t
90 void *top; // address of top of storage
91};
92
93void ?{}( current_stack_info_t * this ) {
94 CtxGet( &this->ctx );
95 this->base = this->ctx.FP;
96 this->storage = this->ctx.SP;
97
98 rlimit r;
99 getrlimit( RLIMIT_STACK, &r);
100 this->size = r.rlim_cur;
101
102 this->limit = (void *)(((intptr_t)this->base) - this->size);
103 this->context = &mainThread_context_storage;
104 this->top = this->base;
105}
106
107void ?{}( coStack_t * this, current_stack_info_t * info) {
108 this->size = info->size;
109 this->storage = info->storage;
110 this->limit = info->limit;
111 this->base = info->base;
112 this->context = info->context;
113 this->top = info->top;
114 this->userStack = true;
115}
116
117void ?{}( coroutine * this, current_stack_info_t * info) {
118 (&this->stack){ info };
119 this->name = "Main Thread";
120 this->errno_ = 0;
121 this->state = Start;
122}
123
124void ?{}( thread * this, current_stack_info_t * info) {
125 (&this->c){ info };
126}
127
128//-----------------------------------------------------------------------------
129// Processor coroutine
130void ?{}(processorCtx_t * this, processor * proc) {
131 (&this->c){};
132 this->proc = proc;
133 proc->runner = this;
134}
135
136void ?{}(processorCtx_t * this, processor * proc, current_stack_info_t * info) {
137 (&this->c){ info };
138 this->proc = proc;
139 proc->runner = this;
140}
141
142void ?{}(processor * this) {
143 this{ systemCluster };
144}
145
146void ?{}(processor * this, cluster * cltr) {
147 this->cltr = cltr;
148 this->current_coroutine = NULL;
149 this->current_thread = NULL;
150 (&this->terminated){};
151 this->is_terminated = false;
152
153 start( this );
154}
155
156void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) {
157 this->cltr = cltr;
158 this->current_coroutine = NULL;
159 this->current_thread = NULL;
160 (&this->terminated){};
161 this->is_terminated = false;
162
163 this->runner = runner;
164 LIB_DEBUG_PRINTF("Kernel : constructing processor context %p\n", runner);
165 runner{ this };
166}
167
168void ^?{}(processor * this) {
169 if( ! this->is_terminated ) {
170 LIB_DEBUG_PRINTF("Kernel : core %p signaling termination\n", this);
171 this->is_terminated = true;
172 wait( &this->terminated );
173 }
174}
175
176void ?{}(cluster * this) {
177 ( &this->ready_queue ){};
178 lock = 0;
179}
180
181void ^?{}(cluster * this) {
182
183}
184
185//=============================================================================================
186// Kernel Scheduling logic
187//=============================================================================================
188//Main of the processor contexts
189void main(processorCtx_t * runner) {
190 processor * this = runner->proc;
191 LIB_DEBUG_PRINTF("Kernel : core %p starting\n", this);
192
193 thread * readyThread = NULL;
194 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ )
195 {
196 readyThread = nextThread( this->cltr );
197
198 if(readyThread)
199 {
200 runThread(this, readyThread);
201
202 //Some actions need to be taken from the kernel
203 finishRunning(this);
204
205 spin_count = 0;
206 }
207 else
208 {
209 spin(this, &spin_count);
210 }
211 }
212
213 LIB_DEBUG_PRINTF("Kernel : core %p unlocking thread\n", this);
214 signal( &this->terminated );
215 LIB_DEBUG_PRINTF("Kernel : core %p terminated\n", this);
216}
217
218// runThread runs a thread by context switching
219// from the processor coroutine to the target thread
220void runThread(processor * this, thread * dst) {
221 coroutine * proc_cor = get_coroutine(this->runner);
222 coroutine * thrd_cor = get_coroutine(dst);
223
224 //Reset the terminating actions here
225 this->finish.action_code = No_Action;
226
227 //Update global state
228 this->current_thread = dst;
229
230 // Context Switch to the thread
231 ThreadCtxSwitch(proc_cor, thrd_cor);
232 // when ThreadCtxSwitch returns we are back in the processor coroutine
233}
234
235// Once a thread has finished running, some of
236// its final actions must be executed from the kernel
237void finishRunning(processor * this) {
238 if( this->finish.action_code == Release ) {
239 unlock( this->finish.lock );
240 }
241 else if( this->finish.action_code == Schedule ) {
242 ScheduleThread( this->finish.thrd );
243 }
244 else if( this->finish.action_code == Release_Schedule ) {
245 unlock( this->finish.lock );
246 ScheduleThread( this->finish.thrd );
247 }
248 else {
249 assert(this->finish.action_code == No_Action);
250 }
251}
252
253// Handles spinning logic
254// TODO : find some strategy to put cores to sleep after some time
255void spin(processor * this, unsigned int * spin_count) {
256 (*spin_count)++;
257}
258
259// Context invoker for processors
260// This is the entry point for processors (kernel threads)
261// It effectively constructs a coroutine by stealing the pthread stack
262void * CtxInvokeProcessor(void * arg) {
263 processor * proc = (processor *) arg;
264 this_processor = proc;
265 // SKULLDUGGERY: We want to create a context for the processor coroutine
266 // which is needed for the 2-step context switch. However, there is no reason
267 // to waste the perfectly valid stack create by pthread.
268 current_stack_info_t info;
269 machine_context_t ctx;
270 info.context = &ctx;
271 processorCtx_t proc_cor_storage = { proc, &info };
272
273 LIB_DEBUG_PRINTF("Coroutine : created stack %p\n", proc_cor_storage.c.stack.base);
274
275 //Set global state
276 proc->current_coroutine = &proc->runner->c;
277 proc->current_thread = NULL;
278
279 //We now have a proper context from which to schedule threads
280 LIB_DEBUG_PRINTF("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);
281
282 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
283 // resume it to start it like it normally would, it will just context switch
284 // back to here. Instead directly call the main since we already are on the
285 // appropriate stack.
286 proc_cor_storage.c.state = Active;
287 main( &proc_cor_storage );
288 proc_cor_storage.c.state = Halted;
289
290 // Main routine of the core returned, the core is now fully terminated
291 LIB_DEBUG_PRINTF("Kernel : core %p main ended (%p)\n", proc, proc->runner);
292
293 return NULL;
294}
295
296void start(processor * this) {
297 LIB_DEBUG_PRINTF("Kernel : Starting core %p\n", this);
298
299 // pthread_attr_t attributes;
300 // pthread_attr_init( &attributes );
301
302 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
303
304 // pthread_attr_destroy( &attributes );
305
306 LIB_DEBUG_PRINTF("Kernel : core %p started\n", this);
307}
308
309//-----------------------------------------------------------------------------
310// Scheduler routines
311void ScheduleThread( thread * thrd ) {
312 assertf( thrd->next == NULL, "Expected null got %p", thrd->next );
313
314 lock( &systemProcessor->cltr->lock );
315 append( &systemProcessor->cltr->ready_queue, thrd );
316 unlock( &systemProcessor->cltr->lock );
317}
318
319thread * nextThread(cluster * this) {
320 lock( &this->lock );
321 thread * head = pop_head( &this->ready_queue );
322 unlock( &this->lock );
323 return head;
324}
325
326void ScheduleInternal() {
327 suspend();
328}
329
330void ScheduleInternal( spinlock * lock ) {
331 get_this_processor()->finish.action_code = Release;
332 get_this_processor()->finish.lock = lock;
333 suspend();
334}
335
336void ScheduleInternal( thread * thrd ) {
337 get_this_processor()->finish.action_code = Schedule;
338 get_this_processor()->finish.thrd = thrd;
339 suspend();
340}
341
342void ScheduleInternal( spinlock * lock, thread * thrd ) {
343 get_this_processor()->finish.action_code = Release_Schedule;
344 get_this_processor()->finish.lock = lock;
345 get_this_processor()->finish.thrd = thrd;
346 suspend();
347}
348
349//-----------------------------------------------------------------------------
350// Kernel boot procedures
351void kernel_startup(void) {
352 LIB_DEBUG_PRINTF("Kernel : Starting\n");
353
354 // Start by initializing the main thread
355 // SKULLDUGGERY: the mainThread steals the process main thread
356 // which will then be scheduled by the systemProcessor normally
357 mainThread = (thread *)&mainThread_storage;
358 current_stack_info_t info;
359 mainThread{ &info };
360
361 // Initialize the system cluster
362 systemCluster = (cluster *)&systemCluster_storage;
363 systemCluster{};
364
365 // Initialize the system processor and the system processor ctx
366 // (the coroutine that contains the processing control flow)
367 systemProcessor = (processor *)&systemProcessor_storage;
368 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };
369
370 // Add the main thread to the ready queue
371 // once resume is called on systemProcessor->ctx the mainThread needs to be scheduled like any normal thread
372 ScheduleThread(mainThread);
373
374 //initialize the global state variables
375 this_processor = systemProcessor;
376 this_processor->current_thread = mainThread;
377 this_processor->current_coroutine = &mainThread->c;
378
379 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
380 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
381 // mainThread is on the ready queue when this call is made.
382 resume(systemProcessor->runner);
383
384
385
386 // THE SYSTEM IS NOW COMPLETELY RUNNING
387 LIB_DEBUG_PRINTF("Kernel : Started\n--------------------------------------------------\n\n");
388}
389
390void kernel_shutdown(void) {
391 LIB_DEBUG_PRINTF("\n--------------------------------------------------\nKernel : Shutting down\n");
392
393 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
394 // When its coroutine terminates, it return control to the mainThread
395 // which is currently here
396 systemProcessor->is_terminated = true;
397 suspend();
398
399 // THE SYSTEM IS NOW COMPLETELY STOPPED
400
401 // Destroy the system processor and its context in reverse order of construction
402 // These were manually constructed so we need manually destroy them
403 ^(systemProcessor->runner){};
404 ^(systemProcessor){};
405
406 // Final step, destroy the main thread since it is no longer needed
407 // Since we provided a stack to this taxk it will not destroy anything
408 ^(mainThread){};
409
410 LIB_DEBUG_PRINTF("Kernel : Shutdown complete\n");
411}
412
413//-----------------------------------------------------------------------------
414// Locks
415// void ?{}( simple_lock * this ) {
416// ( &this->blocked ){};
417// }
418
419// void ^?{}( simple_lock * this ) {
420
421// }
422
423// void lock( simple_lock * this ) {
424// {
425// spin_lock( &lock );
426// append( &this->blocked, this_thread() );
427// spin_unlock( &lock );
428// }
429// ScheduleInternal();
430// }
431
432// void lock( simple_lock * this, spinlock * to_release ) {
433// {
434// spin_lock( &lock );
435// append( &this->blocked, this_thread() );
436// spin_unlock( &lock );
437// }
438// ScheduleInternal( to_release );
439// lock( to_release );
440// }
441
442// void unlock( simple_lock * this ) {
443// thread * it;
444// while( it = pop_head( &this->blocked) ) {
445// ScheduleThread( it );
446// }
447// }
448
449void ?{}( spinlock * this ) {
450 this->lock = 0;
451}
452void ^?{}( spinlock * this ) {
453
454}
455
456void lock( spinlock * this ) {
457 for ( unsigned int i = 1;; i += 1 ) {
458 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break;
459 }
460}
461
462void unlock( spinlock * this ) {
463 __sync_lock_release_4( &this->lock );
464}
465
466void ?{}( signal_once * this ) {
467 this->condition = false;
468}
469void ^?{}( signal_once * this ) {
470
471}
472
473void wait( signal_once * this ) {
474 lock( &this->lock );
475 if( !this->condition ) {
476 append( &this->blocked, this_thread() );
477 ScheduleInternal( &this->lock );
478 lock( &this->lock );
479 }
480 unlock( &this->lock );
481}
482
483void signal( signal_once * this ) {
484 lock( &this->lock );
485 {
486 this->condition = true;
487
488 thread * it;
489 while( it = pop_head( &this->blocked) ) {
490 ScheduleThread( it );
491 }
492 }
493 unlock( &this->lock );
494}
495
496//-----------------------------------------------------------------------------
497// Queues
498void ?{}( simple_thread_list * this ) {
499 this->head = NULL;
500 this->tail = &this->head;
501}
502
503void append( simple_thread_list * this, thread * t ) {
504 assert( t->next == NULL );
505 *this->tail = t;
506 this->tail = &t->next;
507}
508
509thread * pop_head( simple_thread_list * this ) {
510 thread * head = this->head;
511 if( head ) {
512 this->head = head->next;
513 if( !head->next ) {
514 this->tail = &this->head;
515 }
516 head->next = NULL;
517 }
518
519 return head;
520}
521// Local Variables: //
522// mode: c //
523// tab-width: 4 //
524// End: //
Note: See TracBrowser for help on using the repository browser.