source: src/libcfa/concurrency/kernel.c@ 8d4f7fe

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since 8d4f7fe was 8fcbb4c, checked in by Thierry Delisle <tdelisle@…>, 9 years ago

removed pthread_spinlock_t and fixed race condition in yield

  • Property mode set to 100644
File size: 12.7 KB
RevLine 
[8118303]1// -*- Mode: CFA -*-
2//
3// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
4//
5// The contents of this file are covered under the licence agreement in the
6// file "LICENCE" distributed with Cforall.
7//
8// kernel.c --
9//
10// Author : Thierry Delisle
11// Created On : Tue Jan 17 12:27:26 2016
12// Last Modified By : Thierry Delisle
13// Last Modified On : --
14// Update Count : 0
15//
16
[0c92c9f]17//Start and stop routine for the kernel, declared first to make sure they run first
18void kernel_startup(void) __attribute__((constructor(101)));
19void kernel_shutdown(void) __attribute__((destructor(101)));
20
[8118303]21//Header
22#include "kernel"
23
24//C Includes
[c84e80a]25#include <stddef.h>
[eb2e723]26extern "C" {
[8fcbb4c]27#include <fenv.h>
[eb2e723]28#include <sys/resource.h>
29}
[8118303]30
31//CFA Includes
32#include "libhdr.h"
33#include "threads"
34
35//Private includes
36#define __CFA_INVOKE_PRIVATE__
37#include "invoke.h"
38
[8fcbb4c]39static volatile int lock;
40
41void spin_lock( volatile int *lock ) {
42 for ( unsigned int i = 1;; i += 1 ) {
43 if ( *lock == 0 && __sync_lock_test_and_set_4( lock, 1 ) == 0 ) break;
44 }
45}
46
47void spin_unlock( volatile int *lock ) {
48 __sync_lock_release_4( lock );
49}
50
[8def349]51//-----------------------------------------------------------------------------
52// Kernel storage
53struct processorCtx_t {
54 processor * proc;
55 coroutine c;
56};
57
[77e6fcb]58DECL_COROUTINE(processorCtx_t);
[8def349]59
60#define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)]
61
62KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
63KERNEL_STORAGE(cluster, systemCluster);
64KERNEL_STORAGE(processor, systemProcessor);
[e15df4c]65KERNEL_STORAGE(thread, mainThread);
[8def349]66KERNEL_STORAGE(machine_context_t, mainThread_context);
67
[bd98b58]68cluster * systemCluster;
[eb2e723]69processor * systemProcessor;
[e15df4c]70thread * mainThread;
[eb2e723]71
[bd98b58]72//-----------------------------------------------------------------------------
73// Global state
74
[8def349]75thread_local processor * this_processor;
76
77processor * get_this_processor() {
78 return this_processor;
79}
[c84e80a]80
[bd98b58]81coroutine * this_coroutine(void) {
82 return this_processor->current_coroutine;
83}
84
[e15df4c]85thread * this_thread(void) {
[bd98b58]86 return this_processor->current_thread;
[c84e80a]87}
88
89//-----------------------------------------------------------------------------
[8def349]90// Main thread construction
91struct current_stack_info_t {
92 machine_context_t ctx;
93 unsigned int size; // size of stack
94 void *base; // base of stack
95 void *storage; // pointer to stack
96 void *limit; // stack grows towards stack limit
97 void *context; // address of cfa_context_t
98 void *top; // address of top of storage
[c84e80a]99};
100
[8def349]101void ?{}( current_stack_info_t * this ) {
102 CtxGet( &this->ctx );
103 this->base = this->ctx.FP;
104 this->storage = this->ctx.SP;
105
106 rlimit r;
107 int ret = getrlimit( RLIMIT_STACK, &r);
108 this->size = r.rlim_cur;
109
110 this->limit = (void *)(((intptr_t)this->base) - this->size);
111 this->context = &mainThread_context_storage;
112 this->top = this->base;
113}
114
115void ?{}( coStack_t * this, current_stack_info_t * info) {
116 this->size = info->size;
117 this->storage = info->storage;
118 this->limit = info->limit;
119 this->base = info->base;
120 this->context = info->context;
121 this->top = info->top;
122 this->userStack = true;
123}
124
125void ?{}( coroutine * this, current_stack_info_t * info) {
126 (&this->stack){ info };
127 this->name = "Main Thread";
128 this->errno_ = 0;
129 this->state = Inactive;
130 this->notHalted = true;
131}
132
[e15df4c]133void ?{}( thread * this, current_stack_info_t * info) {
[8def349]134 (&this->c){ info };
135}
[c84e80a]136
[8def349]137//-----------------------------------------------------------------------------
138// Processor coroutine
[eb2e723]139void ?{}(processorCtx_t * this, processor * proc) {
140 (&this->c){};
[c84e80a]141 this->proc = proc;
[8fcbb4c]142 proc->runner = this;
[8def349]143}
144
145void ?{}(processorCtx_t * this, processor * proc, current_stack_info_t * info) {
146 (&this->c){ info };
147 this->proc = proc;
[8fcbb4c]148 proc->runner = this;
[8def349]149}
150
151void start(processor * this);
152
153void ?{}(processor * this) {
154 this{ systemCluster };
155}
156
157void ?{}(processor * this, cluster * cltr) {
158 this->cltr = cltr;
159 this->current_coroutine = NULL;
160 this->current_thread = NULL;
161 (&this->lock){};
162 this->terminated = false;
163
164 start( this );
[c84e80a]165}
166
[8fcbb4c]167void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) {
[8def349]168 this->cltr = cltr;
169 this->current_coroutine = NULL;
170 this->current_thread = NULL;
171 (&this->lock){};
172 this->terminated = false;
173
[8fcbb4c]174 this->runner = runner;
175 LIB_DEBUG_PRINTF("Kernel : constructing processor context %p\n", runner);
176 runner{ this };
[8def349]177}
178
179void ^?{}(processor * this) {
180 if( ! this->terminated ) {
181 LIB_DEBUG_PRINTF("Kernel : core %p signaling termination\n", this);
182 this->terminated = true;
183 lock( &this->lock );
184 }
185}
186
187void ?{}(cluster * this) {
188 ( &this->ready_queue ){};
[8fcbb4c]189 lock = 0;
[8def349]190}
191
192void ^?{}(cluster * this) {
[8fcbb4c]193
[c84e80a]194}
195
196//-----------------------------------------------------------------------------
197// Processor running routines
[8fcbb4c]198void main(processorCtx_t *);
[e15df4c]199thread * nextThread(cluster * this);
[0c92c9f]200void scheduleInternal(processor * this, thread * dst);
[c84e80a]201void spin(processor * this, unsigned int * spin_count);
[8fcbb4c]202void thread_schedule( thread * thrd );
[8118303]203
[8fcbb4c]204//Main of the processor contexts
205void main(processorCtx_t * runner) {
206 processor * this = runner->proc;
[eb2e723]207 LIB_DEBUG_PRINTF("Kernel : core %p starting\n", this);
[8118303]208
[8fcbb4c]209 fenv_t envp;
210 fegetenv( &envp );
211 LIB_DEBUG_PRINTF("Kernel : mxcsr %x\n", envp.__mxcsr);
212
[e15df4c]213 thread * readyThread = NULL;
[c84e80a]214 for( unsigned int spin_count = 0; ! this->terminated; spin_count++ ) {
215
[bd98b58]216 readyThread = nextThread( this->cltr );
[8118303]217
[c84e80a]218 if(readyThread) {
[0c92c9f]219 scheduleInternal(this, readyThread);
[c84e80a]220 spin_count = 0;
221 } else {
222 spin(this, &spin_count);
223 }
224 }
[8118303]225
[8def349]226 LIB_DEBUG_PRINTF("Kernel : core %p unlocking thread\n", this);
227 unlock( &this->lock );
[c84e80a]228 LIB_DEBUG_PRINTF("Kernel : core %p terminated\n", this);
229}
230
[0c92c9f]231//Declarations for scheduleInternal
232extern void ThreadCtxSwitch(coroutine * src, coroutine * dst);
233
234// scheduleInternal runs a thread by context switching
235// from the processor coroutine to the target thread
236void scheduleInternal(processor * this, thread * dst) {
[8fcbb4c]237 this->thread_action = NoAction;
238
[0c92c9f]239 // coroutine * proc_ctx = get_coroutine(this->ctx);
240 // coroutine * thrd_ctx = get_coroutine(dst);
241
242 // //Update global state
243 // this->current_thread = dst;
244
245 // // Context Switch to the thread
246 // ThreadCtxSwitch(proc_ctx, thrd_ctx);
247 // // when ThreadCtxSwitch returns we are back in the processor coroutine
248
[8fcbb4c]249 coroutine * proc_ctx = get_coroutine(this->runner);
[c84e80a]250 coroutine * thrd_ctx = get_coroutine(dst);
[0c92c9f]251 thrd_ctx->last = proc_ctx;
252
253 // context switch to specified coroutine
254 // Which is now the current_coroutine
[8fcbb4c]255 // LIB_DEBUG_PRINTF("Kernel : switching to ctx %p (from %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine);
[0c92c9f]256 this->current_thread = dst;
257 this->current_coroutine = thrd_ctx;
258 CtxSwitch( proc_ctx->stack.context, thrd_ctx->stack.context );
259 this->current_coroutine = proc_ctx;
[8fcbb4c]260 // LIB_DEBUG_PRINTF("Kernel : returned from ctx %p (to %p, current %p)\n", thrd_ctx, proc_ctx, this->current_coroutine);
[0c92c9f]261
262 // when CtxSwitch returns we are back in the processor coroutine
[8fcbb4c]263 if(this->thread_action == Reschedule) {
264 thread_schedule( dst );
265 }
[c84e80a]266}
267
[0c92c9f]268// Handles spinning logic
269// TODO : find some strategy to put cores to sleep after some time
[c84e80a]270void spin(processor * this, unsigned int * spin_count) {
271 (*spin_count)++;
272}
273
[0c92c9f]274// Context invoker for processors
275// This is the entry point for processors (kernel threads)
276// It effectively constructs a coroutine by stealing the pthread stack
[8def349]277void * CtxInvokeProcessor(void * arg) {
278 processor * proc = (processor *) arg;
279 this_processor = proc;
280 // SKULLDUGGERY: We want to create a context for the processor coroutine
281 // which is needed for the 2-step context switch. However, there is no reason
282 // to waste the perfectly valid stack create by pthread.
283 current_stack_info_t info;
284 machine_context_t ctx;
285 info.context = &ctx;
286 processorCtx_t proc_cor_storage = { proc, &info };
287
[8fcbb4c]288 LIB_DEBUG_PRINTF("Coroutine : created stack %p\n", proc_cor_storage.c.stack.base);
289
[0c92c9f]290 //Set global state
[8fcbb4c]291 proc->current_coroutine = &proc->runner->c;
[8def349]292 proc->current_thread = NULL;
293
294 //We now have a proper context from which to schedule threads
[8fcbb4c]295 LIB_DEBUG_PRINTF("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);
[8def349]296
297 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
298 // resume it to start it like it normally would, it will just context switch
299 // back to here. Instead directly call the main since we already are on the
300 // appropriate stack.
301 proc_cor_storage.c.state = Active;
302 main( &proc_cor_storage );
303 proc_cor_storage.c.state = Halt;
304 proc_cor_storage.c.notHalted = false;
305
[0c92c9f]306 // Main routine of the core returned, the core is now fully terminated
[8fcbb4c]307 LIB_DEBUG_PRINTF("Kernel : core %p main ended (%p)\n", proc, proc->runner);
[8def349]308
309 return NULL;
[c84e80a]310}
311
[8def349]312void start(processor * this) {
313 LIB_DEBUG_PRINTF("Kernel : Starting core %p\n", this);
314
[8fcbb4c]315 // pthread_attr_t attributes;
316 // pthread_attr_init( &attributes );
[eb2e723]317
[8fcbb4c]318 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
[eb2e723]319
[8fcbb4c]320 // pthread_attr_destroy( &attributes );
[eb2e723]321
[8def349]322 LIB_DEBUG_PRINTF("Kernel : core %p started\n", this);
[eb2e723]323}
324
[8def349]325//-----------------------------------------------------------------------------
326// Scheduler routines
[e15df4c]327void thread_schedule( thread * thrd ) {
[8def349]328 assertf( thrd->next == NULL, "Expected null got %p", thrd->next );
329
[8fcbb4c]330 spin_lock( &lock );
[8def349]331 append( &systemProcessor->cltr->ready_queue, thrd );
[8fcbb4c]332 spin_unlock( &lock );
[eb2e723]333}
334
[e15df4c]335thread * nextThread(cluster * this) {
[8fcbb4c]336 spin_lock( &lock );
337 thread * head = pop_head( &this->ready_queue );
338 spin_unlock( &lock );
339 return head;
[eb2e723]340}
341
342//-----------------------------------------------------------------------------
343// Kernel boot procedures
344void kernel_startup(void) {
345 LIB_DEBUG_PRINTF("Kernel : Starting\n");
346
347 // Start by initializing the main thread
[8fcbb4c]348 // SKULLDUGGERY: the mainThread steals the process main thread
349 // which will then be scheduled by the systemProcessor normally
[e15df4c]350 mainThread = (thread *)&mainThread_storage;
[8fcbb4c]351 current_stack_info_t info;
[8def349]352 mainThread{ &info };
[eb2e723]353
[bd98b58]354 // Initialize the system cluster
355 systemCluster = (cluster *)&systemCluster_storage;
356 systemCluster{};
357
[8def349]358 // Initialize the system processor and the system processor ctx
[eb2e723]359 // (the coroutine that contains the processing control flow)
[8def349]360 systemProcessor = (processor *)&systemProcessor_storage;
361 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };
[eb2e723]362
[dcb42b8]363 // Add the main thread to the ready queue
364 // once resume is called on systemProcessor->ctx the mainThread needs to be scheduled like any normal thread
[bd98b58]365 thread_schedule(mainThread);
[eb2e723]366
[dcb42b8]367 //initialize the global state variables
[bd98b58]368 this_processor = systemProcessor;
369 this_processor->current_thread = mainThread;
370 this_processor->current_coroutine = &mainThread->c;
[eb2e723]371
[dcb42b8]372 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
373 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
374 // mainThread is on the ready queue when this call is made.
[8fcbb4c]375 resume(systemProcessor->runner);
[eb2e723]376
[dcb42b8]377
378
379 // THE SYSTEM IS NOW COMPLETELY RUNNING
[eb2e723]380 LIB_DEBUG_PRINTF("Kernel : Started\n--------------------------------------------------\n\n");
381}
382
[dcb42b8]383void kernel_shutdown(void) {
384 LIB_DEBUG_PRINTF("\n--------------------------------------------------\nKernel : Shutting down\n");
[eb2e723]385
[dcb42b8]386 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
387 // When its coroutine terminates, it return control to the mainThread
388 // which is currently here
[8f49a54]389 systemProcessor->terminated = true;
[eb2e723]390 suspend();
391
[dcb42b8]392 // THE SYSTEM IS NOW COMPLETELY STOPPED
[eb2e723]393
[dcb42b8]394 // Destroy the system processor and its context in reverse order of construction
395 // These were manually constructed so we need manually destroy them
[8fcbb4c]396 ^(systemProcessor->runner){};
[eb2e723]397 ^(systemProcessor){};
398
[dcb42b8]399 // Final step, destroy the main thread since it is no longer needed
400 // Since we provided a stack to this taxk it will not destroy anything
[eb2e723]401 ^(mainThread){};
402
403 LIB_DEBUG_PRINTF("Kernel : Shutdown complete\n");
[8118303]404}
405
[bd98b58]406//-----------------------------------------------------------------------------
407// Locks
408void ?{}( simple_lock * this ) {
409 ( &this->blocked ){};
410}
411
412void ^?{}( simple_lock * this ) {
413
414}
415
416void lock( simple_lock * this ) {
[8def349]417 {
[8fcbb4c]418 spin_lock( &lock );
[8def349]419 append( &this->blocked, this_thread() );
[8fcbb4c]420 spin_unlock( &lock );
[8def349]421 }
[bd98b58]422 suspend();
423}
424
425void unlock( simple_lock * this ) {
[e15df4c]426 thread * it;
[bd98b58]427 while( it = pop_head( &this->blocked) ) {
428 thread_schedule( it );
429 }
430}
431
432//-----------------------------------------------------------------------------
433// Queues
434void ?{}( simple_thread_list * this ) {
435 this->head = NULL;
436 this->tail = &this->head;
437}
438
[e15df4c]439void append( simple_thread_list * this, thread * t ) {
[bd98b58]440 assert( t->next == NULL );
441 *this->tail = t;
442 this->tail = &t->next;
443}
444
[e15df4c]445thread * pop_head( simple_thread_list * this ) {
446 thread * head = this->head;
[bd98b58]447 if( head ) {
448 this->head = head->next;
449 if( !head->next ) {
450 this->tail = &this->head;
451 }
452 head->next = NULL;
453 }
454
455 return head;
456}
[8118303]457// Local Variables: //
458// mode: c //
459// tab-width: 4 //
460// End: //
Note: See TracBrowser for help on using the repository browser.