source: src/libcfa/concurrency/kernel.c@ 695e00d

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since 695e00d was 6b224a52, checked in by Thierry Delisle <tdelisle@…>, 8 years ago

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

  • Property mode set to 100644
File size: 19.3 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
[75f3522]10// Created On : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
12// Last Modified On : Fri Jul 21 22:33:18 2017
13// Update Count : 2
[8118303]14//
15
[2ac095d]16#include "libhdr.h"
[8118303]17
18//C Includes
[c84e80a]19#include <stddef.h>
[eb2e723]20extern "C" {
[9d944b2]21#include <stdio.h>
[8fcbb4c]22#include <fenv.h>
[eb2e723]23#include <sys/resource.h>
[9d944b2]24#include <signal.h>
25#include <unistd.h>
[eb2e723]26}
[8118303]27
28//CFA Includes
[2ac095d]29#include "kernel_private.h"
[c81ebf9]30#include "preemption.h"
[2ac095d]31#include "startup.h"
[8118303]32
33//Private includes
34#define __CFA_INVOKE_PRIVATE__
35#include "invoke.h"
36
[2ac095d]37//Start and stop routine for the kernel, declared first to make sure they run first
38void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
39void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
40
[8def349]41//-----------------------------------------------------------------------------
42// Kernel storage
[969b3fe]43KERNEL_STORAGE(cluster, mainCluster);
44KERNEL_STORAGE(processor, mainProcessor);
45KERNEL_STORAGE(processorCtx_t, mainProcessorCtx);
46KERNEL_STORAGE(thread_desc, mainThread);
[f2b12406]47KERNEL_STORAGE(machine_context_t, mainThreadCtx);
[8def349]48
[969b3fe]49cluster * mainCluster;
50processor * mainProcessor;
[348006f]51thread_desc * mainThread;
[eb2e723]52
[bd98b58]53//-----------------------------------------------------------------------------
54// Global state
55
[9cc0472]56thread_local coroutine_desc * volatile this_coroutine;
57thread_local thread_desc * volatile this_thread;
58thread_local processor * volatile this_processor;
[969b3fe]59
[d6ff3ff]60volatile thread_local bool preemption_in_progress = 0;
[1c273d0]61volatile thread_local unsigned short disable_preempt_count = 1;
[c84e80a]62
63//-----------------------------------------------------------------------------
[8def349]64// Main thread construction
65struct current_stack_info_t {
[1c273d0]66 machine_context_t ctx;
[8def349]67 unsigned int size; // size of stack
68 void *base; // base of stack
69 void *storage; // pointer to stack
70 void *limit; // stack grows towards stack limit
71 void *context; // address of cfa_context_t
72 void *top; // address of top of storage
[c84e80a]73};
74
[242a902]75void ?{}( current_stack_info_t & this ) {
76 CtxGet( this.ctx );
77 this.base = this.ctx.FP;
78 this.storage = this.ctx.SP;
[8def349]79
80 rlimit r;
[132fad4]81 getrlimit( RLIMIT_STACK, &r);
[242a902]82 this.size = r.rlim_cur;
[8def349]83
[242a902]84 this.limit = (void *)(((intptr_t)this.base) - this.size);
[9236060]85 this.context = &storage_mainThreadCtx;
[242a902]86 this.top = this.base;
[8def349]87}
88
[242a902]89void ?{}( coStack_t & this, current_stack_info_t * info) {
90 this.size = info->size;
91 this.storage = info->storage;
92 this.limit = info->limit;
93 this.base = info->base;
94 this.context = info->context;
95 this.top = info->top;
96 this.userStack = true;
[8def349]97}
98
[242a902]99void ?{}( coroutine_desc & this, current_stack_info_t * info) {
100 (this.stack){ info };
101 this.name = "Main Thread";
102 this.errno_ = 0;
103 this.state = Start;
[8def349]104}
105
[242a902]106void ?{}( thread_desc & this, current_stack_info_t * info) {
107 (this.cor){ info };
[8def349]108}
[c84e80a]109
[8def349]110//-----------------------------------------------------------------------------
111// Processor coroutine
[242a902]112void ?{}(processorCtx_t & this, processor * proc) {
113 (this.__cor){ "Processor" };
114 this.proc = proc;
115 proc->runner = &this;
[8def349]116}
117
[242a902]118void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
119 (this.__cor){ info };
120 this.proc = proc;
121 proc->runner = &this;
[8def349]122}
123
[242a902]124void ?{}(processor & this) {
[969b3fe]125 this{ mainCluster };
[8def349]126}
127
[242a902]128void ?{}(processor & this, cluster * cltr) {
129 this.cltr = cltr;
130 (this.terminated){ 0 };
[9236060]131 this.do_terminate = false;
[242a902]132 this.preemption_alarm = NULL;
133 this.pending_preemption = false;
[8def349]134
[242a902]135 start( &this );
[c84e80a]136}
137
[83a071f9]138void ?{}(processor & this, cluster * cltr, processorCtx_t & runner) {
[242a902]139 this.cltr = cltr;
140 (this.terminated){ 0 };
[9236060]141 this.do_terminate = false;
[242a902]142 this.preemption_alarm = NULL;
143 this.pending_preemption = false;
144 this.kernel_thread = pthread_self();
[8def349]145
[83a071f9]146 this.runner = &runner;
[9236060]147 LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", &runner);
[83a071f9]148 runner{ &this };
[8def349]149}
150
[242a902]151void ^?{}(processor & this) {
[9236060]152 if( ! this.do_terminate ) {
[242a902]153 LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", &this);
[9236060]154 this.do_terminate = true;
[53a8e68]155 P( &this.terminated );
[242a902]156 pthread_join( this.kernel_thread, NULL );
[8def349]157 }
158}
159
[242a902]160void ?{}(cluster & this) {
161 ( this.ready_queue ){};
[9236060]162 ( this.ready_queue_lock ){};
[e60e0dc]163
[9236060]164 this.preemption = default_preemption();
[8def349]165}
166
[242a902]167void ^?{}(cluster & this) {
[1c273d0]168
[c84e80a]169}
170
[75f3522]171//=============================================================================================
172// Kernel Scheduling logic
173//=============================================================================================
[8fcbb4c]174//Main of the processor contexts
[83a071f9]175void main(processorCtx_t & runner) {
176 processor * this = runner.proc;
[c81ebf9]177
[9d944b2]178 LIB_DEBUG_PRINT_SAFE("Kernel : core %p starting\n", this);
[8118303]179
[75f3522]180 {
[c81ebf9]181 // Setup preemption data
182 preemption_scope scope = { this };
183
184 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);
[8118303]185
[c81ebf9]186 thread_desc * readyThread = NULL;
[e60e0dc]187 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )
[75f3522]188 {
[c81ebf9]189 readyThread = nextThread( this->cltr );
[75f3522]190
[c81ebf9]191 if(readyThread)
192 {
[0b33412]193 verify( disable_preempt_count > 0 );
[4e6fb8e]194
[c81ebf9]195 runThread(this, readyThread);
[75f3522]196
[0b33412]197 verify( disable_preempt_count > 0 );
[4e6fb8e]198
[c81ebf9]199 //Some actions need to be taken from the kernel
200 finishRunning(this);
201
202 spin_count = 0;
203 }
204 else
205 {
206 spin(this, &spin_count);
207 }
208 }
209
210 LIB_DEBUG_PRINT_SAFE("Kernel : core %p stopping\n", this);
[c84e80a]211 }
[8118303]212
[bdeba0b]213 V( &this->terminated );
214
[9d944b2]215 LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this);
[c84e80a]216}
217
[1c273d0]218// runThread runs a thread by context switching
219// from the processor coroutine to the target thread
[348006f]220void runThread(processor * this, thread_desc * dst) {
[83a071f9]221 coroutine_desc * proc_cor = get_coroutine(*this->runner);
[c3acb841]222 coroutine_desc * thrd_cor = get_coroutine(dst);
[1c273d0]223
[75f3522]224 //Reset the terminating actions here
[db6f06a]225 this->finish.action_code = No_Action;
[8fcbb4c]226
[75f3522]227 //Update global state
[1c273d0]228 this_thread = dst;
[75f3522]229
230 // Context Switch to the thread
231 ThreadCtxSwitch(proc_cor, thrd_cor);
232 // when ThreadCtxSwitch returns we are back in the processor coroutine
233}
234
[1c273d0]235// Once a thread has finished running, some of
[75f3522]236// its final actions must be executed from the kernel
[db6f06a]237void finishRunning(processor * this) {
238 if( this->finish.action_code == Release ) {
239 unlock( this->finish.lock );
240 }
241 else if( this->finish.action_code == Schedule ) {
242 ScheduleThread( this->finish.thrd );
243 }
244 else if( this->finish.action_code == Release_Schedule ) {
[1c273d0]245 unlock( this->finish.lock );
[db6f06a]246 ScheduleThread( this->finish.thrd );
247 }
[0c78741]248 else if( this->finish.action_code == Release_Multi ) {
249 for(int i = 0; i < this->finish.lock_count; i++) {
250 unlock( this->finish.locks[i] );
251 }
252 }
253 else if( this->finish.action_code == Release_Multi_Schedule ) {
254 for(int i = 0; i < this->finish.lock_count; i++) {
255 unlock( this->finish.locks[i] );
256 }
257 for(int i = 0; i < this->finish.thrd_count; i++) {
258 ScheduleThread( this->finish.thrds[i] );
259 }
260 }
[db6f06a]261 else {
262 assert(this->finish.action_code == No_Action);
[8fcbb4c]263 }
[c84e80a]264}
265
[0c92c9f]266// Handles spinning logic
267// TODO : find some strategy to put cores to sleep after some time
[c84e80a]268void spin(processor * this, unsigned int * spin_count) {
269 (*spin_count)++;
270}
271
[0c92c9f]272// Context invoker for processors
273// This is the entry point for processors (kernel threads)
274// It effectively constructs a coroutine by stealing the pthread stack
[8def349]275void * CtxInvokeProcessor(void * arg) {
276 processor * proc = (processor *) arg;
277 this_processor = proc;
[1c273d0]278 this_coroutine = NULL;
279 this_thread = NULL;
[4e6fb8e]280 disable_preempt_count = 1;
[8def349]281 // SKULLDUGGERY: We want to create a context for the processor coroutine
282 // which is needed for the 2-step context switch. However, there is no reason
[1c273d0]283 // to waste the perfectly valid stack create by pthread.
[8def349]284 current_stack_info_t info;
285 machine_context_t ctx;
286 info.context = &ctx;
287 processorCtx_t proc_cor_storage = { proc, &info };
288
[9d944b2]289 LIB_DEBUG_PRINT_SAFE("Coroutine : created stack %p\n", proc_cor_storage.__cor.stack.base);
[8fcbb4c]290
[0c92c9f]291 //Set global state
[1c273d0]292 this_coroutine = &proc->runner->__cor;
293 this_thread = NULL;
[8def349]294
295 //We now have a proper context from which to schedule threads
[9d944b2]296 LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);
[8def349]297
[1c273d0]298 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
299 // resume it to start it like it normally would, it will just context switch
300 // back to here. Instead directly call the main since we already are on the
[8def349]301 // appropriate stack.
[17af7d1]302 proc_cor_storage.__cor.state = Active;
[83a071f9]303 main( proc_cor_storage );
[4aa2fb2]304 proc_cor_storage.__cor.state = Halted;
[8def349]305
[0c92c9f]306 // Main routine of the core returned, the core is now fully terminated
[1c273d0]307 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner);
[8def349]308
309 return NULL;
[c84e80a]310}
311
[8def349]312void start(processor * this) {
[9d944b2]313 LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this);
[82ff5845]314
[8fcbb4c]315 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
[eb2e723]316
[1c273d0]317 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);
[eb2e723]318}
319
[8def349]320//-----------------------------------------------------------------------------
321// Scheduler routines
[348006f]322void ScheduleThread( thread_desc * thrd ) {
[1c273d0]323 // if( !thrd ) return;
[135b431]324 verify( thrd );
325 verify( thrd->cor.state != Halted );
[1c273d0]326
327 verify( disable_preempt_count > 0 );
[690f13c]328
[4aa2fb2]329 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
[1c273d0]330
[969b3fe]331 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 );
332 append( &this_processor->cltr->ready_queue, thrd );
333 unlock( &this_processor->cltr->ready_queue_lock );
[1c273d0]334
335 verify( disable_preempt_count > 0 );
[db6f06a]336}
337
[348006f]338thread_desc * nextThread(cluster * this) {
[1c273d0]339 verify( disable_preempt_count > 0 );
[e60e0dc]340 lock( &this->ready_queue_lock DEBUG_CTX2 );
[348006f]341 thread_desc * head = pop_head( &this->ready_queue );
[e60e0dc]342 unlock( &this->ready_queue_lock );
[1c273d0]343 verify( disable_preempt_count > 0 );
[db6f06a]344 return head;
[eb2e723]345}
346
[82ff5845]347void BlockInternal() {
348 disable_interrupts();
[0b33412]349 verify( disable_preempt_count > 0 );
[75f3522]350 suspend();
[0b33412]351 verify( disable_preempt_count > 0 );
[2ac095d]352 enable_interrupts( DEBUG_CTX );
[75f3522]353}
354
[82ff5845]355void BlockInternal( spinlock * lock ) {
356 disable_interrupts();
[89a3df5]357 this_processor->finish.action_code = Release;
358 this_processor->finish.lock = lock;
[0b33412]359
360 verify( disable_preempt_count > 0 );
[db6f06a]361 suspend();
[0b33412]362 verify( disable_preempt_count > 0 );
363
[2ac095d]364 enable_interrupts( DEBUG_CTX );
[db6f06a]365}
366
[82ff5845]367void BlockInternal( thread_desc * thrd ) {
[97e3296]368 assert(thrd);
[82ff5845]369 disable_interrupts();
[1c273d0]370 assert( thrd->cor.state != Halted );
[89a3df5]371 this_processor->finish.action_code = Schedule;
372 this_processor->finish.thrd = thrd;
[0b33412]373
374 verify( disable_preempt_count > 0 );
[db6f06a]375 suspend();
[0b33412]376 verify( disable_preempt_count > 0 );
377
[2ac095d]378 enable_interrupts( DEBUG_CTX );
[db6f06a]379}
380
[82ff5845]381void BlockInternal( spinlock * lock, thread_desc * thrd ) {
[97e3296]382 assert(thrd);
[82ff5845]383 disable_interrupts();
[89a3df5]384 this_processor->finish.action_code = Release_Schedule;
385 this_processor->finish.lock = lock;
386 this_processor->finish.thrd = thrd;
[0b33412]387
388 verify( disable_preempt_count > 0 );
[db6f06a]389 suspend();
[0b33412]390 verify( disable_preempt_count > 0 );
391
[2ac095d]392 enable_interrupts( DEBUG_CTX );
[eb2e723]393}
394
[82ff5845]395void BlockInternal(spinlock ** locks, unsigned short count) {
396 disable_interrupts();
[0c78741]397 this_processor->finish.action_code = Release_Multi;
398 this_processor->finish.locks = locks;
399 this_processor->finish.lock_count = count;
[0b33412]400
401 verify( disable_preempt_count > 0 );
[0c78741]402 suspend();
[0b33412]403 verify( disable_preempt_count > 0 );
404
[2ac095d]405 enable_interrupts( DEBUG_CTX );
[0c78741]406}
407
[82ff5845]408void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
409 disable_interrupts();
[0c78741]410 this_processor->finish.action_code = Release_Multi_Schedule;
411 this_processor->finish.locks = locks;
412 this_processor->finish.lock_count = lock_count;
413 this_processor->finish.thrds = thrds;
414 this_processor->finish.thrd_count = thrd_count;
[0b33412]415
416 verify( disable_preempt_count > 0 );
[0c78741]417 suspend();
[0b33412]418 verify( disable_preempt_count > 0 );
419
[2ac095d]420 enable_interrupts( DEBUG_CTX );
[0c78741]421}
422
[f2b12406]423void LeaveThread(spinlock * lock, thread_desc * thrd) {
424 verify( disable_preempt_count > 0 );
425 this_processor->finish.action_code = thrd ? Release_Schedule : Release;
426 this_processor->finish.lock = lock;
427 this_processor->finish.thrd = thrd;
428
429 suspend();
430}
431
[fa21ac9]432//=============================================================================================
433// Kernel Setup logic
434//=============================================================================================
[eb2e723]435//-----------------------------------------------------------------------------
436// Kernel boot procedures
437void kernel_startup(void) {
[1c273d0]438 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");
[eb2e723]439
440 // Start by initializing the main thread
[1c273d0]441 // SKULLDUGGERY: the mainThread steals the process main thread
[969b3fe]442 // which will then be scheduled by the mainProcessor normally
443 mainThread = (thread_desc *)&storage_mainThread;
[8fcbb4c]444 current_stack_info_t info;
[83a071f9]445 (*mainThread){ &info };
[eb2e723]446
[fa21ac9]447 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n");
448
[969b3fe]449 // Initialize the main cluster
450 mainCluster = (cluster *)&storage_mainCluster;
[9236060]451 (*mainCluster){};
[bd98b58]452
[969b3fe]453 LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n");
[fa21ac9]454
[969b3fe]455 // Initialize the main processor and the main processor ctx
[eb2e723]456 // (the coroutine that contains the processing control flow)
[969b3fe]457 mainProcessor = (processor *)&storage_mainProcessor;
[9236060]458 (*mainProcessor){ mainCluster, *(processorCtx_t *)&storage_mainProcessorCtx };
[eb2e723]459
[dcb42b8]460 //initialize the global state variables
[969b3fe]461 this_processor = mainProcessor;
[1c273d0]462 this_thread = mainThread;
463 this_coroutine = &mainThread->cor;
[eb2e723]464
[82ff5845]465 // Enable preemption
466 kernel_start_preemption();
467
[969b3fe]468 // Add the main thread to the ready queue
469 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
470 ScheduleThread(mainThread);
471
472 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
[dcb42b8]473 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
[1c273d0]474 // mainThread is on the ready queue when this call is made.
[9236060]475 resume( *mainProcessor->runner );
[eb2e723]476
[dcb42b8]477
478
479 // THE SYSTEM IS NOW COMPLETELY RUNNING
[9d944b2]480 LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n");
[82ff5845]481
[2ac095d]482 enable_interrupts( DEBUG_CTX );
[eb2e723]483}
484
[dcb42b8]485void kernel_shutdown(void) {
[9d944b2]486 LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n");
[eb2e723]487
[4e6fb8e]488 disable_interrupts();
489
[969b3fe]490 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
[dcb42b8]491 // When its coroutine terminates, it return control to the mainThread
492 // which is currently here
[969b3fe]493 mainProcessor->do_terminate = true;
[eb2e723]494 suspend();
495
[dcb42b8]496 // THE SYSTEM IS NOW COMPLETELY STOPPED
[eb2e723]497
[82ff5845]498 // Disable preemption
499 kernel_stop_preemption();
500
[969b3fe]501 // Destroy the main processor and its context in reverse order of construction
[dcb42b8]502 // These were manually constructed so we need manually destroy them
[9236060]503 ^(*mainProcessor->runner){};
[969b3fe]504 ^(mainProcessor){};
[eb2e723]505
[dcb42b8]506 // Final step, destroy the main thread since it is no longer needed
507 // Since we provided a stack to this taxk it will not destroy anything
[eb2e723]508 ^(mainThread){};
509
[1c273d0]510 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");
[9d944b2]511}
512
513static spinlock kernel_abort_lock;
514static spinlock kernel_debug_lock;
515static bool kernel_abort_called = false;
516
517void * kernel_abort (void) __attribute__ ((__nothrow__)) {
518 // abort cannot be recursively entered by the same or different processors because all signal handlers return when
519 // the globalAbort flag is true.
[2ac095d]520 lock( &kernel_abort_lock DEBUG_CTX2 );
[9d944b2]521
522 // first task to abort ?
523 if ( !kernel_abort_called ) { // not first task to abort ?
524 kernel_abort_called = true;
525 unlock( &kernel_abort_lock );
[1c273d0]526 }
[9d944b2]527 else {
528 unlock( &kernel_abort_lock );
[1c273d0]529
[9d944b2]530 sigset_t mask;
531 sigemptyset( &mask );
532 sigaddset( &mask, SIGALRM ); // block SIGALRM signals
533 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals
534 sigsuspend( &mask ); // block the processor to prevent further damage during abort
[1c273d0]535 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it
[9d944b2]536 }
537
[1c273d0]538 return this_thread;
[9d944b2]539}
540
541void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
542 thread_desc * thrd = kernel_data;
543
544 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->cor.name, thrd );
545 __lib_debug_write( STDERR_FILENO, abort_text, len );
546
[1c273d0]547 if ( thrd != this_coroutine ) {
548 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine );
[9d944b2]549 __lib_debug_write( STDERR_FILENO, abort_text, len );
[1c273d0]550 }
[9d944b2]551 else {
552 __lib_debug_write( STDERR_FILENO, ".\n", 2 );
553 }
554}
555
556extern "C" {
557 void __lib_debug_acquire() {
[2ac095d]558 lock( &kernel_debug_lock DEBUG_CTX2 );
[9d944b2]559 }
560
561 void __lib_debug_release() {
[2ac095d]562 unlock( &kernel_debug_lock );
[9d944b2]563 }
[8118303]564}
565
[fa21ac9]566//=============================================================================================
567// Kernel Utilities
568//=============================================================================================
[bd98b58]569//-----------------------------------------------------------------------------
570// Locks
[242a902]571void ?{}( spinlock & this ) {
572 this.lock = 0;
[bd98b58]573}
[242a902]574void ^?{}( spinlock & this ) {
[bd98b58]575
[db6f06a]576}
577
[2ac095d]578bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) {
[b227f68]579 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0;
[c81ebf9]580}
581
[2ac095d]582void lock( spinlock * this DEBUG_CTX_PARAM2 ) {
[db6f06a]583 for ( unsigned int i = 1;; i += 1 ) {
[b227f68]584 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }
[db6f06a]585 }
[b227f68]586 LIB_DEBUG_DO(
587 this->prev_name = caller;
588 this->prev_thrd = this_thread;
589 )
[db6f06a]590}
[bd98b58]591
[b227f68]592void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) {
593 for ( unsigned int i = 1;; i += 1 ) {
594 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }
595 yield();
596 }
597 LIB_DEBUG_DO(
598 this->prev_name = caller;
599 this->prev_thrd = this_thread;
600 )
601}
602
603
[db6f06a]604void unlock( spinlock * this ) {
605 __sync_lock_release_4( &this->lock );
[bd98b58]606}
607
[242a902]608void ?{}( semaphore & this, int count = 1 ) {
609 (this.lock){};
610 this.count = count;
611 (this.waiting){};
[db6f06a]612}
[242a902]613void ^?{}(semaphore & this) {}
[db6f06a]614
[bdeba0b]615void P(semaphore * this) {
[2ac095d]616 lock( &this->lock DEBUG_CTX2 );
[bdeba0b]617 this->count -= 1;
618 if ( this->count < 0 ) {
619 // queue current task
620 append( &this->waiting, (thread_desc *)this_thread );
621
622 // atomically release spin lock and block
[82ff5845]623 BlockInternal( &this->lock );
[8def349]624 }
[4e6fb8e]625 else {
[bdeba0b]626 unlock( &this->lock );
[4e6fb8e]627 }
[bd98b58]628}
629
[bdeba0b]630void V(semaphore * this) {
631 thread_desc * thrd = NULL;
[2ac095d]632 lock( &this->lock DEBUG_CTX2 );
[bdeba0b]633 this->count += 1;
634 if ( this->count <= 0 ) {
635 // remove task at head of waiting list
636 thrd = pop_head( &this->waiting );
[bd98b58]637 }
[bdeba0b]638
[db6f06a]639 unlock( &this->lock );
[bdeba0b]640
641 // make new owner
642 WakeThread( thrd );
[bd98b58]643}
644
645//-----------------------------------------------------------------------------
646// Queues
[242a902]647void ?{}( __thread_queue_t & this ) {
648 this.head = NULL;
649 this.tail = &this.head;
[bd98b58]650}
651
[5ea06d6]652void append( __thread_queue_t * this, thread_desc * t ) {
[4aa2fb2]653 verify(this->tail != NULL);
[bd98b58]654 *this->tail = t;
655 this->tail = &t->next;
656}
657
[5ea06d6]658thread_desc * pop_head( __thread_queue_t * this ) {
[348006f]659 thread_desc * head = this->head;
[bd98b58]660 if( head ) {
661 this->head = head->next;
662 if( !head->next ) {
663 this->tail = &this->head;
664 }
665 head->next = NULL;
[1c273d0]666 }
[bd98b58]667 return head;
668}
[690f13c]669
[90c4df0]670thread_desc * remove( __thread_queue_t * this, thread_desc ** it ) {
671 thread_desc * thrd = *it;
672 verify( thrd );
673
674 (*it) = thrd->next;
675
676 if( this->tail == &thrd->next ) {
677 this->tail = it;
678 }
679
680 thrd->next = NULL;
681
682 verify( (this->head == NULL) == (&this->head == this->tail) );
683 verify( *this->tail == NULL );
684 return thrd;
685}
686
[242a902]687void ?{}( __condition_stack_t & this ) {
688 this.top = NULL;
[690f13c]689}
690
[0c78741]691void push( __condition_stack_t * this, __condition_criterion_t * t ) {
[4aa2fb2]692 verify( !t->next );
[690f13c]693 t->next = this->top;
694 this->top = t;
695}
696
[0c78741]697__condition_criterion_t * pop( __condition_stack_t * this ) {
698 __condition_criterion_t * top = this->top;
[690f13c]699 if( top ) {
700 this->top = top->next;
701 top->next = NULL;
[1c273d0]702 }
[690f13c]703 return top;
704}
[6b0b624]705
[8118303]706// Local Variables: //
707// mode: c //
708// tab-width: 4 //
709// End: //
Note: See TracBrowser for help on using the repository browser.