source: src/libcfa/concurrency/kernel.c@ db6f06a

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since db6f06a was db6f06a, checked in by Thierry Delisle <tdelisle@…>, 9 years ago

Implemented better condition lock to solve race condition on thread/processor termination

  • Property mode set to 100644
File size: 13.8 KB
Line 
1// -*- Mode: CFA -*-
2//
3// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
4//
5// The contents of this file are covered under the licence agreement in the
6// file "LICENCE" distributed with Cforall.
7//
8// kernel.c --
9//
10// Author : Thierry Delisle
11// Created On : Tue Jan 17 12:27:26 2017
12// Last Modified By : Thierry Delisle
13// Last Modified On : --
14// Update Count : 0
15//
16
17//Start and stop routine for the kernel, declared first to make sure they run first
18void kernel_startup(void) __attribute__((constructor(101)));
19void kernel_shutdown(void) __attribute__((destructor(101)));
20
21//Header
22#include "kernel_private.h"
23
24//C Includes
25#include <stddef.h>
26extern "C" {
27#include <fenv.h>
28#include <sys/resource.h>
29}
30
31//CFA Includes
32#include "libhdr.h"
33
34//Private includes
35#define __CFA_INVOKE_PRIVATE__
36#include "invoke.h"
37
38static volatile int lock;
39
40void spin_lock( volatile int *lock ) {
41 for ( unsigned int i = 1;; i += 1 ) {
42 if ( *lock == 0 && __sync_lock_test_and_set_4( lock, 1 ) == 0 ) break;
43 }
44}
45
46void spin_unlock( volatile int *lock ) {
47 __sync_lock_release_4( lock );
48}
49
50//-----------------------------------------------------------------------------
51// Kernel storage
52#define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)]
53
54KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
55KERNEL_STORAGE(cluster, systemCluster);
56KERNEL_STORAGE(processor, systemProcessor);
57KERNEL_STORAGE(thread, mainThread);
58KERNEL_STORAGE(machine_context_t, mainThread_context);
59
60cluster * systemCluster;
61processor * systemProcessor;
62thread * mainThread;
63
64//-----------------------------------------------------------------------------
65// Global state
66
67thread_local processor * this_processor;
68
69processor * get_this_processor() {
70 return this_processor;
71}
72
73coroutine * this_coroutine(void) {
74 return this_processor->current_coroutine;
75}
76
77thread * this_thread(void) {
78 return this_processor->current_thread;
79}
80
81//-----------------------------------------------------------------------------
82// Main thread construction
83struct current_stack_info_t {
84 machine_context_t ctx;
85 unsigned int size; // size of stack
86 void *base; // base of stack
87 void *storage; // pointer to stack
88 void *limit; // stack grows towards stack limit
89 void *context; // address of cfa_context_t
90 void *top; // address of top of storage
91};
92
93void ?{}( current_stack_info_t * this ) {
94 CtxGet( &this->ctx );
95 this->base = this->ctx.FP;
96 this->storage = this->ctx.SP;
97
98 rlimit r;
99 getrlimit( RLIMIT_STACK, &r);
100 this->size = r.rlim_cur;
101
102 this->limit = (void *)(((intptr_t)this->base) - this->size);
103 this->context = &mainThread_context_storage;
104 this->top = this->base;
105}
106
107void ?{}( coStack_t * this, current_stack_info_t * info) {
108 this->size = info->size;
109 this->storage = info->storage;
110 this->limit = info->limit;
111 this->base = info->base;
112 this->context = info->context;
113 this->top = info->top;
114 this->userStack = true;
115}
116
117void ?{}( coroutine * this, current_stack_info_t * info) {
118 (&this->stack){ info };
119 this->name = "Main Thread";
120 this->errno_ = 0;
121 this->state = Inactive;
122 this->notHalted = true;
123}
124
125void ?{}( thread * this, current_stack_info_t * info) {
126 (&this->c){ info };
127}
128
129//-----------------------------------------------------------------------------
130// Processor coroutine
131void ?{}(processorCtx_t * this, processor * proc) {
132 (&this->c){};
133 this->proc = proc;
134 proc->runner = this;
135}
136
137void ?{}(processorCtx_t * this, processor * proc, current_stack_info_t * info) {
138 (&this->c){ info };
139 this->proc = proc;
140 proc->runner = this;
141}
142
143void ?{}(processor * this) {
144 this{ systemCluster };
145}
146
147void ?{}(processor * this, cluster * cltr) {
148 this->cltr = cltr;
149 this->current_coroutine = NULL;
150 this->current_thread = NULL;
151 (&this->terminated){};
152 this->is_terminated = false;
153
154 start( this );
155}
156
157void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) {
158 this->cltr = cltr;
159 this->current_coroutine = NULL;
160 this->current_thread = NULL;
161 (&this->terminated){};
162 this->is_terminated = false;
163
164 this->runner = runner;
165 LIB_DEBUG_PRINTF("Kernel : constructing processor context %p\n", runner);
166 runner{ this };
167}
168
169void ^?{}(processor * this) {
170 if( ! this->is_terminated ) {
171 LIB_DEBUG_PRINTF("Kernel : core %p signaling termination\n", this);
172 this->is_terminated = true;
173 wait( &this->terminated );
174 }
175}
176
177void ?{}(cluster * this) {
178 ( &this->ready_queue ){};
179 lock = 0;
180}
181
182void ^?{}(cluster * this) {
183
184}
185
186//=============================================================================================
187// Kernel Scheduling logic
188//=============================================================================================
189//Main of the processor contexts
190void main(processorCtx_t * runner) {
191 processor * this = runner->proc;
192 LIB_DEBUG_PRINTF("Kernel : core %p starting\n", this);
193
194 thread * readyThread = NULL;
195 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ )
196 {
197 readyThread = nextThread( this->cltr );
198
199 if(readyThread)
200 {
201 runThread(this, readyThread);
202
203 //Some actions need to be taken from the kernel
204 finishRunning(this);
205
206 spin_count = 0;
207 }
208 else
209 {
210 spin(this, &spin_count);
211 }
212 }
213
214 LIB_DEBUG_PRINTF("Kernel : core %p unlocking thread\n", this);
215 signal( &this->terminated );
216 LIB_DEBUG_PRINTF("Kernel : core %p terminated\n", this);
217}
218
219// runThread runs a thread by context switching
220// from the processor coroutine to the target thread
221void runThread(processor * this, thread * dst) {
222 coroutine * proc_cor = get_coroutine(this->runner);
223 coroutine * thrd_cor = get_coroutine(dst);
224
225 //Reset the terminating actions here
226 this->finish.action_code = No_Action;
227
228 //Update global state
229 this->current_thread = dst;
230
231 // Context Switch to the thread
232 ThreadCtxSwitch(proc_cor, thrd_cor);
233 // when ThreadCtxSwitch returns we are back in the processor coroutine
234}
235
236// Once a thread has finished running, some of
237// its final actions must be executed from the kernel
238void finishRunning(processor * this) {
239 if( this->finish.action_code == Release ) {
240 unlock( this->finish.lock );
241 }
242 else if( this->finish.action_code == Schedule ) {
243 ScheduleThread( this->finish.thrd );
244 }
245 else if( this->finish.action_code == Release_Schedule ) {
246 unlock( this->finish.lock );
247 ScheduleThread( this->finish.thrd );
248 }
249 else {
250 assert(this->finish.action_code == No_Action);
251 }
252}
253
254// Handles spinning logic
255// TODO : find some strategy to put cores to sleep after some time
256void spin(processor * this, unsigned int * spin_count) {
257 (*spin_count)++;
258}
259
260// Context invoker for processors
261// This is the entry point for processors (kernel threads)
262// It effectively constructs a coroutine by stealing the pthread stack
263void * CtxInvokeProcessor(void * arg) {
264 processor * proc = (processor *) arg;
265 this_processor = proc;
266 // SKULLDUGGERY: We want to create a context for the processor coroutine
267 // which is needed for the 2-step context switch. However, there is no reason
268 // to waste the perfectly valid stack create by pthread.
269 current_stack_info_t info;
270 machine_context_t ctx;
271 info.context = &ctx;
272 processorCtx_t proc_cor_storage = { proc, &info };
273
274 LIB_DEBUG_PRINTF("Coroutine : created stack %p\n", proc_cor_storage.c.stack.base);
275
276 //Set global state
277 proc->current_coroutine = &proc->runner->c;
278 proc->current_thread = NULL;
279
280 //We now have a proper context from which to schedule threads
281 LIB_DEBUG_PRINTF("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);
282
283 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
284 // resume it to start it like it normally would, it will just context switch
285 // back to here. Instead directly call the main since we already are on the
286 // appropriate stack.
287 proc_cor_storage.c.state = Active;
288 main( &proc_cor_storage );
289 proc_cor_storage.c.state = Halt;
290 proc_cor_storage.c.notHalted = false;
291
292 // Main routine of the core returned, the core is now fully terminated
293 LIB_DEBUG_PRINTF("Kernel : core %p main ended (%p)\n", proc, proc->runner);
294
295 return NULL;
296}
297
298void start(processor * this) {
299 LIB_DEBUG_PRINTF("Kernel : Starting core %p\n", this);
300
301 // pthread_attr_t attributes;
302 // pthread_attr_init( &attributes );
303
304 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
305
306 // pthread_attr_destroy( &attributes );
307
308 LIB_DEBUG_PRINTF("Kernel : core %p started\n", this);
309}
310
311//-----------------------------------------------------------------------------
312// Scheduler routines
313void ScheduleThread( thread * thrd ) {
314 assertf( thrd->next == NULL, "Expected null got %p", thrd->next );
315
316 lock( &systemProcessor->cltr->lock );
317 append( &systemProcessor->cltr->ready_queue, thrd );
318 unlock( &systemProcessor->cltr->lock );
319}
320
321thread * nextThread(cluster * this) {
322 lock( &this->lock );
323 thread * head = pop_head( &this->ready_queue );
324 unlock( &this->lock );
325 return head;
326}
327
328void ScheduleInternal() {
329 suspend();
330}
331
332void ScheduleInternal( spinlock * lock ) {
333 get_this_processor()->finish.action_code = Release;
334 get_this_processor()->finish.lock = lock;
335 suspend();
336}
337
338void ScheduleInternal( thread * thrd ) {
339 get_this_processor()->finish.action_code = Schedule;
340 get_this_processor()->finish.thrd = thrd;
341 suspend();
342}
343
344void ScheduleInternal( spinlock * lock, thread * thrd ) {
345 get_this_processor()->finish.action_code = Release_Schedule;
346 get_this_processor()->finish.lock = lock;
347 get_this_processor()->finish.thrd = thrd;
348 suspend();
349}
350
351//-----------------------------------------------------------------------------
352// Kernel boot procedures
353void kernel_startup(void) {
354 LIB_DEBUG_PRINTF("Kernel : Starting\n");
355
356 // Start by initializing the main thread
357 // SKULLDUGGERY: the mainThread steals the process main thread
358 // which will then be scheduled by the systemProcessor normally
359 mainThread = (thread *)&mainThread_storage;
360 current_stack_info_t info;
361 mainThread{ &info };
362
363 // Initialize the system cluster
364 systemCluster = (cluster *)&systemCluster_storage;
365 systemCluster{};
366
367 // Initialize the system processor and the system processor ctx
368 // (the coroutine that contains the processing control flow)
369 systemProcessor = (processor *)&systemProcessor_storage;
370 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };
371
372 // Add the main thread to the ready queue
373 // once resume is called on systemProcessor->ctx the mainThread needs to be scheduled like any normal thread
374 ScheduleThread(mainThread);
375
376 //initialize the global state variables
377 this_processor = systemProcessor;
378 this_processor->current_thread = mainThread;
379 this_processor->current_coroutine = &mainThread->c;
380
381 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
382 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
383 // mainThread is on the ready queue when this call is made.
384 resume(systemProcessor->runner);
385
386
387
388 // THE SYSTEM IS NOW COMPLETELY RUNNING
389 LIB_DEBUG_PRINTF("Kernel : Started\n--------------------------------------------------\n\n");
390}
391
392void kernel_shutdown(void) {
393 LIB_DEBUG_PRINTF("\n--------------------------------------------------\nKernel : Shutting down\n");
394
395 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
396 // When its coroutine terminates, it return control to the mainThread
397 // which is currently here
398 systemProcessor->is_terminated = true;
399 suspend();
400
401 // THE SYSTEM IS NOW COMPLETELY STOPPED
402
403 // Destroy the system processor and its context in reverse order of construction
404 // These were manually constructed so we need manually destroy them
405 ^(systemProcessor->runner){};
406 ^(systemProcessor){};
407
408 // Final step, destroy the main thread since it is no longer needed
409 // Since we provided a stack to this taxk it will not destroy anything
410 ^(mainThread){};
411
412 LIB_DEBUG_PRINTF("Kernel : Shutdown complete\n");
413}
414
415//-----------------------------------------------------------------------------
416// Locks
417// void ?{}( simple_lock * this ) {
418// ( &this->blocked ){};
419// }
420
421// void ^?{}( simple_lock * this ) {
422
423// }
424
425// void lock( simple_lock * this ) {
426// {
427// spin_lock( &lock );
428// append( &this->blocked, this_thread() );
429// spin_unlock( &lock );
430// }
431// ScheduleInternal();
432// }
433
434// void lock( simple_lock * this, spinlock * to_release ) {
435// {
436// spin_lock( &lock );
437// append( &this->blocked, this_thread() );
438// spin_unlock( &lock );
439// }
440// ScheduleInternal( to_release );
441// lock( to_release );
442// }
443
444// void unlock( simple_lock * this ) {
445// thread * it;
446// while( it = pop_head( &this->blocked) ) {
447// ScheduleThread( it );
448// }
449// }
450
451void ?{}( spinlock * this ) {
452 this->lock = 0;
453}
454void ^?{}( spinlock * this ) {
455
456}
457
458void lock( spinlock * this ) {
459 for ( unsigned int i = 1;; i += 1 ) {
460 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break;
461 }
462}
463
464void unlock( spinlock * this ) {
465 __sync_lock_release_4( &this->lock );
466}
467
468void ?{}( signal_once * this ) {
469 this->condition = false;
470}
471void ^?{}( signal_once * this ) {
472
473}
474
475void wait( signal_once * this ) {
476 lock( &this->lock );
477 if( !this->condition ) {
478 append( &this->blocked, this_thread() );
479 ScheduleInternal( &this->lock );
480 lock( &this->lock );
481 }
482 unlock( &this->lock );
483}
484
485void signal( signal_once * this ) {
486 lock( &this->lock );
487 {
488 this->condition = true;
489
490 thread * it;
491 while( it = pop_head( &this->blocked) ) {
492 ScheduleThread( it );
493 }
494 }
495 unlock( &this->lock );
496}
497
498//-----------------------------------------------------------------------------
499// Queues
500void ?{}( simple_thread_list * this ) {
501 this->head = NULL;
502 this->tail = &this->head;
503}
504
505void append( simple_thread_list * this, thread * t ) {
506 assert( t->next == NULL );
507 *this->tail = t;
508 this->tail = &t->next;
509}
510
511thread * pop_head( simple_thread_list * this ) {
512 thread * head = this->head;
513 if( head ) {
514 this->head = head->next;
515 if( !head->next ) {
516 this->tail = &this->head;
517 }
518 head->next = NULL;
519 }
520
521 return head;
522}
523// Local Variables: //
524// mode: c //
525// tab-width: 4 //
526// End: //
Note: See TracBrowser for help on using the repository browser.