Changeset 3d560060 for src/libcfa/concurrency
- Timestamp:
- Nov 30, 2017, 3:05:25 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 557435e, 5da9d6a
- Parents:
- dd9b59e (diff), c2b9f21 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/alarm.c
rdd9b59e r3d560060 23 23 } 24 24 25 #include "libhdr.h"26 27 25 #include "alarm.h" 28 26 #include "kernel_private.h" … … 110 108 } 111 109 112 LIB_DEBUG_DO( bool validate( alarm_list_t * this ) {110 __cfaabi_dbg_debug_do( bool validate( alarm_list_t * this ) { 113 111 alarm_node_t ** it = &this->head; 114 112 while( (*it) ) { … … 186 184 187 185 disable_interrupts(); 188 lock( event_kernel->lock DEBUG_CTX2 );186 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 189 187 { 190 188 verify( validate( alarms ) ); … … 198 196 unlock( event_kernel->lock ); 199 197 this->set = true; 200 enable_interrupts( DEBUG_CTX);198 enable_interrupts( __cfaabi_dbg_ctx ); 201 199 } 202 200 203 201 void unregister_self( alarm_node_t * this ) { 204 202 disable_interrupts(); 205 lock( event_kernel->lock DEBUG_CTX2 );203 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 206 204 { 207 205 verify( validate( &event_kernel->alarms ) ); … … 209 207 } 210 208 unlock( event_kernel->lock ); 211 enable_interrupts( DEBUG_CTX);209 enable_interrupts( __cfaabi_dbg_ctx ); 212 210 this->set = false; 213 211 } -
src/libcfa/concurrency/coroutine.c
rdd9b59e r3d560060 29 29 #define __CFA_INVOKE_PRIVATE__ 30 30 #include "invoke.h" 31 32 31 33 32 //----------------------------------------------------------------------------- … … 76 75 void ^?{}(coStack_t & this) { 77 76 if ( ! this.userStack && this.storage ) { 78 LIB_DEBUG_DO(77 __cfaabi_dbg_debug_do( 79 78 if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) { 80 79 abortf( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); … … 131 130 132 131 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 133 LIB_DEBUG_DO( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) );134 LIB_NO_DEBUG_DO( this->storage = malloc( cxtSize + this->size + 8 ) );132 __cfaabi_dbg_debug_do( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) ); 133 __cfaabi_dbg_no_debug_do( this->storage = malloc( cxtSize + this->size + 8 ) ); 135 134 136 LIB_DEBUG_DO(135 __cfaabi_dbg_debug_do( 137 136 if ( mprotect( this->storage, pageSize, PROT_NONE ) == -1 ) { 138 137 abortf( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) ); … … 144 143 } // if 145 144 146 LIB_DEBUG_DO( this->limit = (char *)this->storage + pageSize );147 LIB_NO_DEBUG_DO( this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ) ); // minimum alignment145 __cfaabi_dbg_debug_do( this->limit = (char *)this->storage + pageSize ); 146 __cfaabi_dbg_no_debug_do( this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ) ); // minimum alignment 148 147 149 148 } else { -
src/libcfa/concurrency/invoke.c
rdd9b59e r3d560060 18 18 #include <stdio.h> 19 19 20 #include "libhdr.h"21 20 #include "invoke.h" 22 21 … … 31 30 extern void __leave_thread_monitor( struct thread_desc * this ); 32 31 extern void disable_interrupts(); 33 extern void enable_interrupts( DEBUG_CTX_PARAM);32 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 34 33 35 34 void CtxInvokeCoroutine( 36 37 38 35 void (*main)(void *), 36 struct coroutine_desc *(*get_coroutine)(void *), 37 void *this 39 38 ) { 40 // LIB_DEBUG_PRINTF("Invoke Coroutine : Received %p (main %p, get_c %p)\n", this, main, get_coroutine);39 struct coroutine_desc* cor = get_coroutine( this ); 41 40 42 struct coroutine_desc* cor = get_coroutine( this ); 41 if(cor->state == Primed) { 42 __suspend_internal(); 43 } 43 44 44 if(cor->state == Primed) { 45 __suspend_internal(); 46 } 45 cor->state = Active; 47 46 48 cor->state = Active;47 main( this ); 49 48 50 main( this );49 cor->state = Halted; 51 50 52 cor->state = Halted; 53 54 //Final suspend, should never return 55 __leave_coroutine(); 56 abortf("Resumed dead coroutine"); 51 //Final suspend, should never return 52 __leave_coroutine(); 53 abortf("Resumed dead coroutine"); 57 54 } 58 55 59 56 void CtxInvokeThread( 60 61 62 63 57 void (*dtor)(void *), 58 void (*main)(void *), 59 struct thread_desc *(*get_thread)(void *), 60 void *this 64 61 ) { 65 66 67 62 // First suspend, once the thread arrives here, 63 // the function pointer to main can be invalidated without risk 64 __suspend_internal(); 68 65 69 70 66 // Fetch the thread handle from the user defined thread structure 67 struct thread_desc* thrd = get_thread( this ); 71 68 72 73 enable_interrupts( DEBUG_CTX);69 // Officially start the thread by enabling preemption 70 enable_interrupts( __cfaabi_dbg_ctx ); 74 71 75 76 72 // Call the main of the thread 73 main( this ); 77 74 78 79 80 81 82 83 84 85 86 75 // To exit a thread we must : 76 // 1 - Mark it as halted 77 // 2 - Leave its monitor 78 // 3 - Disable the interupts 79 // 4 - Final suspend 80 // The order of these 4 operations is very important 81 //Final suspend, should never return 82 __leave_thread_monitor( thrd ); 83 abortf("Resumed dead thread"); 87 84 } 88 85 89 86 90 87 void CtxStart( 91 92 93 94 88 void (*main)(void *), 89 struct coroutine_desc *(*get_coroutine)(void *), 90 void *this, 91 void (*invoke)(void *) 95 92 ) { 96 // LIB_DEBUG_PRINTF("StartCoroutine : Passing in %p (main %p) to invoke (%p) from start (%p)\n", this, main, invoke, CtxStart); 97 98 struct coStack_t* stack = &get_coroutine( this )->stack; 93 struct coStack_t* stack = &get_coroutine( this )->stack; 99 94 100 95 #if defined( __i386__ ) … … 103 98 void *fixedRegisters[3]; // fixed registers ebx, edi, esi (popped on 1st uSwitch, values unimportant) 104 99 uint32_t mxcr; // SSE Status and Control bits (control bits are preserved across function calls) 105 100 uint16_t fcw; // X97 FPU control word (preserved across function calls) 106 101 void *rturn; // where to go on return from uSwitch 107 102 void *dummyReturn; // fake return compiler would have pushed on call to uInvoke … … 116 111 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->argument[0] = this; // argument to invoke 117 112 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke; 118 119 113 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 114 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 120 115 121 116 #elif defined( __x86_64__ ) 122 117 123 124 125 126 127 128 129 118 struct FakeStack { 119 void *fixedRegisters[5]; // fixed registers rbx, r12, r13, r14, r15 120 uint32_t mxcr; // SSE Status and Control bits (control bits are preserved across function calls) 121 uint16_t fcw; // X97 FPU control word (preserved across function calls) 122 void *rturn; // where to go on return from uSwitch 123 void *dummyReturn; // NULL return address to provide proper alignment 124 }; 130 125 131 132 126 ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack ); 127 ((struct machine_context_t *)stack->context)->FP = NULL; // terminate stack with NULL fp 133 128 134 135 136 137 138 139 129 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL; 130 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = CtxInvokeStub; 131 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[0] = this; 132 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke; 133 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 134 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 140 135 #else 141 136 #error Only __i386__ and __x86_64__ is supported for threads in cfa 142 137 #endif 143 138 } -
src/libcfa/concurrency/kernel.c
rdd9b59e r3d560060 14 14 // 15 15 16 #include "libhdr.h"17 18 16 //C Includes 19 17 #include <stddef.h> … … 150 148 151 149 this.runner = &runner; 152 LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", &runner);150 __cfaabi_dbg_print_safe("Kernel : constructing main processor context %p\n", &runner); 153 151 runner{ &this }; 154 152 } … … 156 154 void ^?{}(processor & this) { 157 155 if( ! this.do_terminate ) { 158 LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", &this);156 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this); 159 157 this.do_terminate = true; 160 158 P( this.terminated ); … … 181 179 processor * this = runner.proc; 182 180 183 LIB_DEBUG_PRINT_SAFE("Kernel : core %p starting\n", this);181 __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this); 184 182 185 183 { … … 187 185 preemption_scope scope = { this }; 188 186 189 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);187 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 190 188 191 189 thread_desc * readyThread = NULL; … … 213 211 } 214 212 215 LIB_DEBUG_PRINT_SAFE("Kernel : core %p stopping\n", this);213 __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this); 216 214 } 217 215 218 216 V( this->terminated ); 219 217 220 LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this);218 __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this); 221 219 } 222 220 … … 292 290 processorCtx_t proc_cor_storage = { proc, &info }; 293 291 294 LIB_DEBUG_PRINT_SAFE("Coroutine : created stack %p\n", proc_cor_storage.__cor.stack.base);292 __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", proc_cor_storage.__cor.stack.base); 295 293 296 294 //Set global state … … 299 297 300 298 //We now have a proper context from which to schedule threads 301 LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);299 __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx); 302 300 303 301 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't … … 310 308 311 309 // Main routine of the core returned, the core is now fully terminated 312 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner);310 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, proc->runner); 313 311 314 312 return NULL; … … 316 314 317 315 void start(processor * this) { 318 LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this);316 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this); 319 317 320 318 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 321 319 322 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);320 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 323 321 } 324 322 … … 334 332 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 335 333 336 lock( this_processor->cltr->ready_queue_lock DEBUG_CTX2 );334 lock( this_processor->cltr->ready_queue_lock __cfaabi_dbg_ctx2 ); 337 335 append( this_processor->cltr->ready_queue, thrd ); 338 336 unlock( this_processor->cltr->ready_queue_lock ); … … 343 341 thread_desc * nextThread(cluster * this) { 344 342 verify( disable_preempt_count > 0 ); 345 lock( this->ready_queue_lock DEBUG_CTX2 );343 lock( this->ready_queue_lock __cfaabi_dbg_ctx2 ); 346 344 thread_desc * head = pop_head( this->ready_queue ); 347 345 unlock( this->ready_queue_lock ); … … 355 353 suspend(); 356 354 verify( disable_preempt_count > 0 ); 357 enable_interrupts( DEBUG_CTX);355 enable_interrupts( __cfaabi_dbg_ctx ); 358 356 } 359 357 … … 367 365 verify( disable_preempt_count > 0 ); 368 366 369 enable_interrupts( DEBUG_CTX);367 enable_interrupts( __cfaabi_dbg_ctx ); 370 368 } 371 369 … … 381 379 verify( disable_preempt_count > 0 ); 382 380 383 enable_interrupts( DEBUG_CTX);381 enable_interrupts( __cfaabi_dbg_ctx ); 384 382 } 385 383 … … 395 393 verify( disable_preempt_count > 0 ); 396 394 397 enable_interrupts( DEBUG_CTX);395 enable_interrupts( __cfaabi_dbg_ctx ); 398 396 } 399 397 … … 408 406 verify( disable_preempt_count > 0 ); 409 407 410 enable_interrupts( DEBUG_CTX);408 enable_interrupts( __cfaabi_dbg_ctx ); 411 409 } 412 410 … … 423 421 verify( disable_preempt_count > 0 ); 424 422 425 enable_interrupts( DEBUG_CTX);423 enable_interrupts( __cfaabi_dbg_ctx ); 426 424 } 427 425 … … 441 439 // Kernel boot procedures 442 440 void kernel_startup(void) { 443 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");441 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 444 442 445 443 // Start by initializing the main thread … … 450 448 (*mainThread){ &info }; 451 449 452 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n");450 __cfaabi_dbg_print_safe("Kernel : Main thread ready\n"); 453 451 454 452 // Initialize the main cluster … … 456 454 (*mainCluster){}; 457 455 458 LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n");456 __cfaabi_dbg_print_safe("Kernel : main cluster ready\n"); 459 457 460 458 // Initialize the main processor and the main processor ctx … … 483 481 484 482 // THE SYSTEM IS NOW COMPLETELY RUNNING 485 LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n");486 487 enable_interrupts( DEBUG_CTX);483 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 484 485 enable_interrupts( __cfaabi_dbg_ctx ); 488 486 } 489 487 490 488 void kernel_shutdown(void) { 491 LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n");489 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 492 490 493 491 disable_interrupts(); … … 513 511 ^(mainThread){}; 514 512 515 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");513 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n"); 516 514 } 517 515 … … 523 521 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 524 522 // the globalAbort flag is true. 525 lock( kernel_abort_lock DEBUG_CTX2 );523 lock( kernel_abort_lock __cfaabi_dbg_ctx2 ); 526 524 527 525 // first task to abort ? … … 548 546 549 547 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->self_cor.name, thrd ); 550 __ lib_debug_write( abort_text, len );548 __cfaabi_dbg_bits_write( abort_text, len ); 551 549 552 550 if ( thrd != this_coroutine ) { 553 551 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine ); 554 __ lib_debug_write( abort_text, len );552 __cfaabi_dbg_bits_write( abort_text, len ); 555 553 } 556 554 else { 557 __ lib_debug_write( ".\n", 2 );555 __cfaabi_dbg_bits_write( ".\n", 2 ); 558 556 } 559 557 } 560 558 561 559 extern "C" { 562 void __ lib_debug_acquire() {563 lock( kernel_debug_lock DEBUG_CTX2 );564 } 565 566 void __ lib_debug_release() {560 void __cfaabi_dbg_bits_acquire() { 561 lock( kernel_debug_lock __cfaabi_dbg_ctx2 ); 562 } 563 564 void __cfaabi_dbg_bits_release() { 567 565 unlock( kernel_debug_lock ); 568 566 } … … 582 580 583 581 void P(semaphore & this) { 584 lock( this.lock DEBUG_CTX2 );582 lock( this.lock __cfaabi_dbg_ctx2 ); 585 583 this.count -= 1; 586 584 if ( this.count < 0 ) { … … 598 596 void V(semaphore & this) { 599 597 thread_desc * thrd = NULL; 600 lock( this.lock DEBUG_CTX2 );598 lock( this.lock __cfaabi_dbg_ctx2 ); 601 599 this.count += 1; 602 600 if ( this.count <= 0 ) { -
src/libcfa/concurrency/kernel_private.h
rdd9b59e r3d560060 16 16 #pragma once 17 17 18 #include "libhdr.h"19 20 18 #include "kernel" 21 19 #include "thread" … … 30 28 void disable_interrupts(); 31 29 void enable_interrupts_noPoll(); 32 void enable_interrupts( DEBUG_CTX_PARAM);30 void enable_interrupts( __cfaabi_dbg_ctx_param ); 33 31 } 34 32 … … 39 37 disable_interrupts(); 40 38 ScheduleThread( thrd ); 41 enable_interrupts( DEBUG_CTX);39 enable_interrupts( __cfaabi_dbg_ctx ); 42 40 } 43 41 thread_desc * nextThread(cluster * this); -
src/libcfa/concurrency/monitor.c
rdd9b59e r3d560060 19 19 #include <inttypes.h> 20 20 21 #include "libhdr.h"22 21 #include "kernel_private.h" 23 22 … … 91 90 static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) { 92 91 // Lock the monitor spinlock 93 DO_LOCK( this->lock DEBUG_CTX2 );92 DO_LOCK( this->lock __cfaabi_dbg_ctx2 ); 94 93 thread_desc * thrd = this_thread; 95 94 96 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);95 __cfaabi_dbg_print_safe("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 97 96 98 97 if( !this->owner ) { … … 100 99 set_owner( this, thrd ); 101 100 102 LIB_DEBUG_PRINT_SAFE("Kernel : mon is free \n");101 __cfaabi_dbg_print_safe("Kernel : mon is free \n"); 103 102 } 104 103 else if( this->owner == thrd) { … … 106 105 this->recursion += 1; 107 106 108 LIB_DEBUG_PRINT_SAFE("Kernel : mon already owned \n");107 __cfaabi_dbg_print_safe("Kernel : mon already owned \n"); 109 108 } 110 109 else if( is_accepted( this, group) ) { … … 115 114 reset_mask( this ); 116 115 117 LIB_DEBUG_PRINT_SAFE("Kernel : mon accepts \n");116 __cfaabi_dbg_print_safe("Kernel : mon accepts \n"); 118 117 } 119 118 else { 120 LIB_DEBUG_PRINT_SAFE("Kernel : blocking \n");119 __cfaabi_dbg_print_safe("Kernel : blocking \n"); 121 120 122 121 // Some one else has the monitor, wait in line for it … … 124 123 BlockInternal( &this->lock ); 125 124 126 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered mon %p\n", thrd, this);125 __cfaabi_dbg_print_safe("Kernel : %10p Entered mon %p\n", thrd, this); 127 126 128 127 // BlockInternal will unlock spinlock, no need to unlock ourselves … … 130 129 } 131 130 132 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered mon %p\n", thrd, this);131 __cfaabi_dbg_print_safe("Kernel : %10p Entered mon %p\n", thrd, this); 133 132 134 133 // Release the lock and leave … … 139 138 static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) { 140 139 // Lock the monitor spinlock 141 DO_LOCK( this->lock DEBUG_CTX2 );140 DO_LOCK( this->lock __cfaabi_dbg_ctx2 ); 142 141 thread_desc * thrd = this_thread; 143 142 144 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);143 __cfaabi_dbg_print_safe("Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 145 144 146 145 147 146 if( !this->owner ) { 148 LIB_DEBUG_PRINT_SAFE("Kernel : Destroying free mon %p\n", this);147 __cfaabi_dbg_print_safe("Kernel : Destroying free mon %p\n", this); 149 148 150 149 // No one has the monitor, just take it … … 164 163 __monitor_group_t group = { &this, 1, func }; 165 164 if( is_accepted( this, group) ) { 166 LIB_DEBUG_PRINT_SAFE("Kernel : mon accepts dtor, block and signal it \n");165 __cfaabi_dbg_print_safe("Kernel : mon accepts dtor, block and signal it \n"); 167 166 168 167 // Wake the thread that is waiting for this … … 183 182 } 184 183 else { 185 LIB_DEBUG_PRINT_SAFE("Kernel : blocking \n");184 __cfaabi_dbg_print_safe("Kernel : blocking \n"); 186 185 187 186 wait_ctx( this_thread, 0 ) … … 196 195 } 197 196 198 LIB_DEBUG_PRINT_SAFE("Kernel : Destroying %p\n", this);197 __cfaabi_dbg_print_safe("Kernel : Destroying %p\n", this); 199 198 200 199 } … … 203 202 void __leave_monitor_desc( monitor_desc * this ) { 204 203 // Lock the monitor spinlock, DO_LOCK to reduce contention 205 DO_LOCK( this->lock DEBUG_CTX2 );206 207 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);204 DO_LOCK( this->lock __cfaabi_dbg_ctx2 ); 205 206 __cfaabi_dbg_print_safe("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner); 208 207 209 208 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this ); … … 215 214 // it means we don't need to do anything 216 215 if( this->recursion != 0) { 217 LIB_DEBUG_PRINT_SAFE("Kernel : recursion still %d\n", this->recursion);216 __cfaabi_dbg_print_safe("Kernel : recursion still %d\n", this->recursion); 218 217 unlock( this->lock ); 219 218 return; … … 232 231 // Leave single monitor for the last time 233 232 void __leave_dtor_monitor_desc( monitor_desc * this ) { 234 LIB_DEBUG_DO(233 __cfaabi_dbg_debug_do( 235 234 if( this_thread != this->owner ) { 236 235 abortf("Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner); … … 249 248 250 249 // Lock the monitor now 251 DO_LOCK( this->lock DEBUG_CTX2 );250 DO_LOCK( this->lock __cfaabi_dbg_ctx2 ); 252 251 253 252 disable_interrupts(); … … 308 307 (this_thread->monitors){m, count, func}; 309 308 310 // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);309 // __cfaabi_dbg_print_safe("MGUARD : enter %d\n", count); 311 310 312 311 // Enter the monitors in order … … 314 313 enter( group ); 315 314 316 // LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");315 // __cfaabi_dbg_print_safe("MGUARD : entered\n"); 317 316 } 318 317 … … 320 319 // Dtor for monitor guard 321 320 void ^?{}( monitor_guard_t & this ) { 322 // LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);321 // __cfaabi_dbg_print_safe("MGUARD : leaving %d\n", this.count); 323 322 324 323 // Leave the monitors in order 325 324 leave( this.m, this.count ); 326 325 327 // LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");326 // __cfaabi_dbg_print_safe("MGUARD : left\n"); 328 327 329 328 // Restore thread context … … 430 429 431 430 //Some more checking in debug 432 LIB_DEBUG_DO(431 __cfaabi_dbg_debug_do( 433 432 thread_desc * this_thrd = this_thread; 434 433 if ( this.monitor_count != this_thrd->monitors.size ) { … … 487 486 set_owner( monitors, count, signallee ); 488 487 489 LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );488 __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); 490 489 491 490 //Everything is ready to go to sleep … … 496 495 497 496 498 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : signal_block returned\n" );497 __cfaabi_dbg_print_buffer_local( "Kernel : signal_block returned\n" ); 499 498 500 499 //We are back, restore the masks and recursions … … 535 534 __lock_size_t actual_count = aggregate( mon_storage, mask ); 536 535 537 LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max);536 __cfaabi_dbg_print_buffer_decl( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max); 538 537 539 538 if(actual_count == 0) return; 540 539 541 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : waitfor internal proceeding\n");540 __cfaabi_dbg_print_buffer_local( "Kernel : waitfor internal proceeding\n"); 542 541 543 542 // Create storage for monitor context … … 556 555 __acceptable_t& accepted = mask[index]; 557 556 if( accepted.is_dtor ) { 558 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : dtor already there\n");557 __cfaabi_dbg_print_buffer_local( "Kernel : dtor already there\n"); 559 558 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 560 559 … … 568 567 } 569 568 else { 570 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, baton-passing\n");569 __cfaabi_dbg_print_buffer_local( "Kernel : thread present, baton-passing\n"); 571 570 572 571 // Create the node specific to this wait operation … … 576 575 monitor_save; 577 576 578 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : baton of %d monitors : ", count );577 __cfaabi_dbg_print_buffer_local( "Kernel : baton of %d monitors : ", count ); 579 578 #ifdef __CFA_DEBUG_PRINT__ 580 579 for( int i = 0; i < count; i++) { 581 LIB_DEBUG_PRINT_BUFFER_LOCAL( "%p %p ", monitors[i], monitors[i]->signal_stack.top );580 __cfaabi_dbg_print_buffer_local( "%p %p ", monitors[i], monitors[i]->signal_stack.top ); 582 581 } 583 582 #endif 584 LIB_DEBUG_PRINT_BUFFER_LOCAL( "\n");583 __cfaabi_dbg_print_buffer_local( "\n"); 585 584 586 585 // Set the owners to be the next thread … … 593 592 monitor_restore; 594 593 595 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, returned\n");594 __cfaabi_dbg_print_buffer_local( "Kernel : thread present, returned\n"); 596 595 } 597 596 598 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);597 __cfaabi_dbg_print_buffer_local( "Kernel : accepted %d\n", *mask.accepted); 599 598 return; 600 599 } … … 603 602 604 603 if( duration == 0 ) { 605 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : non-blocking, exiting\n");604 __cfaabi_dbg_print_buffer_local( "Kernel : non-blocking, exiting\n"); 606 605 607 606 unlock_all( locks, count ); 608 607 609 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);608 __cfaabi_dbg_print_buffer_local( "Kernel : accepted %d\n", *mask.accepted); 610 609 return; 611 610 } … … 614 613 verifyf( duration < 0, "Timeout on waitfor statments not supported yet."); 615 614 616 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : blocking waitfor\n");615 __cfaabi_dbg_print_buffer_local( "Kernel : blocking waitfor\n"); 617 616 618 617 // Create the node specific to this wait operation … … 636 635 monitor_restore; 637 636 638 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : exiting\n");639 640 LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);637 __cfaabi_dbg_print_buffer_local( "Kernel : exiting\n"); 638 639 __cfaabi_dbg_print_buffer_local( "Kernel : accepted %d\n", *mask.accepted); 641 640 } 642 641 … … 645 644 646 645 static inline void set_owner( monitor_desc * this, thread_desc * owner ) { 647 // LIB_DEBUG_PRINT_SAFE("Kernal : Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );646 // __cfaabi_dbg_print_safe("Kernal : Setting owner of %p to %p ( was %p)\n", this, owner, this->owner ); 648 647 649 648 //Pass the monitor appropriately … … 677 676 static inline thread_desc * next_thread( monitor_desc * this ) { 678 677 //Check the signaller stack 679 LIB_DEBUG_PRINT_SAFE("Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top);678 __cfaabi_dbg_print_safe("Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); 680 679 __condition_criterion_t * urgent = pop( this->signal_stack ); 681 680 if( urgent ) { … … 729 728 for( __lock_size_t i = 0; i < count; i++) { 730 729 (criteria[i]){ monitors[i], waiter }; 731 LIB_DEBUG_PRINT_SAFE( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] );730 __cfaabi_dbg_print_safe( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] ); 732 731 push( criteria[i].target->signal_stack, &criteria[i] ); 733 732 } … … 738 737 static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) { 739 738 for( __lock_size_t i = 0; i < count; i++ ) { 740 DO_LOCK( *locks[i] DEBUG_CTX2 );739 DO_LOCK( *locks[i] __cfaabi_dbg_ctx2 ); 741 740 } 742 741 } … … 745 744 for( __lock_size_t i = 0; i < count; i++ ) { 746 745 __spinlock_t * l = &source[i]->lock; 747 DO_LOCK( *l DEBUG_CTX2 );746 DO_LOCK( *l __cfaabi_dbg_ctx2 ); 748 747 if(locks) locks[i] = l; 749 748 } … … 803 802 for( int i = 0; i < count; i++ ) { 804 803 805 // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );804 // __cfaabi_dbg_print_safe( "Checking %p for %p\n", &criteria[i], target ); 806 805 if( &criteria[i] == target ) { 807 806 criteria[i].ready = true; 808 // LIB_DEBUG_PRINT_SAFE( "True\n" );807 // __cfaabi_dbg_print_safe( "True\n" ); 809 808 } 810 809 … … 812 811 } 813 812 814 LIB_DEBUG_PRINT_SAFE( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL );813 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL ); 815 814 return ready2run ? node->waiting_thread : NULL; 816 815 } … … 819 818 thread_desc * thrd = this_thread; 820 819 if( !this.monitors ) { 821 // LIB_DEBUG_PRINT_SAFE("Branding\n");820 // __cfaabi_dbg_print_safe("Branding\n"); 822 821 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data ); 823 822 this.monitor_count = thrd->monitors.size; -
src/libcfa/concurrency/preemption.c
rdd9b59e r3d560060 14 14 // 15 15 16 #include "libhdr.h"17 16 #include "preemption.h" 18 17 … … 148 147 //============================================================================================= 149 148 150 LIB_DEBUG_DO( static thread_local void * last_interrupt = 0; )149 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 151 150 152 151 extern "C" { … … 159 158 // Enable interrupts by decrementing the counter 160 159 // If counter reaches 0, execute any pending CtxSwitch 161 void enable_interrupts( DEBUG_CTX_PARAM) {160 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 162 161 processor * proc = this_processor; // Cache the processor now since interrupts can start happening after the atomic add 163 162 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add … … 173 172 174 173 // For debugging purposes : keep track of the last person to enable the interrupts 175 LIB_DEBUG_DO( proc->last_enable = caller; )174 __cfaabi_dbg_debug_do( proc->last_enable = caller; ) 176 175 } 177 176 … … 233 232 // Called from kernel_startup 234 233 void kernel_start_preemption() { 235 LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n");234 __cfaabi_dbg_print_safe("Kernel : Starting preemption\n"); 236 235 237 236 // Start with preemption disabled until ready … … 255 254 // Called from kernel_shutdown 256 255 void kernel_stop_preemption() { 257 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n");256 __cfaabi_dbg_print_safe("Kernel : Preemption stopping\n"); 258 257 259 258 // Block all signals since we are already shutting down … … 271 270 // Preemption is now fully stopped 272 271 273 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n");272 __cfaabi_dbg_print_safe("Kernel : Preemption stopped\n"); 274 273 } 275 274 … … 297 296 // Receives SIGUSR1 signal and causes the current thread to yield 298 297 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 299 LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); )298 __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); ) 300 299 301 300 // Check if it is safe to preempt here … … 346 345 assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int); 347 346 348 // LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );347 // __cfaabi_dbg_print_safe("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int ); 349 348 // Switch on the code (a.k.a. the sender) to 350 349 switch( info.si_code ) … … 354 353 case SI_TIMER: 355 354 case SI_KERNEL: 356 // LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n");357 lock( event_kernel->lock DEBUG_CTX2 );355 // __cfaabi_dbg_print_safe("Kernel : Preemption thread tick\n"); 356 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 358 357 tick_preemption(); 359 358 unlock( event_kernel->lock ); … … 368 367 369 368 EXIT: 370 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread stopping\n");369 __cfaabi_dbg_print_safe("Kernel : Preemption thread stopping\n"); 371 370 return NULL; 372 371 } … … 380 379 381 380 if ( sigaction( sig, &act, NULL ) == -1 ) { 382 LIB_DEBUG_PRINT_BUFFER_DECL(381 __cfaabi_dbg_print_buffer_decl( 383 382 " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", 384 383 sig, handler, flags, errno, strerror( errno ) … … 397 396 398 397 if ( sigaction( sig, &act, NULL ) == -1 ) { 399 LIB_DEBUG_PRINT_BUFFER_DECL(398 __cfaabi_dbg_print_buffer_decl( 400 399 " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n", 401 400 sig, errno, strerror( errno ) … … 409 408 //============================================================================================= 410 409 411 LIB_DEBUG_DO(410 __cfaabi_dbg_debug_do( 412 411 static void __kernel_backtrace( int start ) { 413 412 // skip first N stack frames … … 476 475 477 476 // void sigHandler_segv( __CFA_SIGPARMS__ ) { 478 // LIB_DEBUG_DO(477 // __cfaabi_dbg_debug_do( 479 478 // #ifdef __USE_STREAM__ 480 479 // serr | "*CFA runtime error* program cfa-cpp terminated with" … … 493 492 // void sigHandler_abort( __CFA_SIGPARMS__ ) { 494 493 // // skip first 6 stack frames 495 // LIB_DEBUG_DO( __kernel_backtrace( 6 ); )494 // __cfaabi_dbg_debug_do( __kernel_backtrace( 6 ); ) 496 495 497 496 // // reset default signal handler -
src/libcfa/concurrency/thread.c
rdd9b59e r3d560060 17 17 18 18 #include "kernel_private.h" 19 #include "libhdr.h"20 19 21 20 #define __CFA_INVOKE_PRIVATE__ … … 72 71 thrd_c->last = this_coroutine; 73 72 74 // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);73 // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 75 74 76 75 disable_interrupts(); … … 82 81 83 82 ScheduleThread(thrd_h); 84 enable_interrupts( DEBUG_CTX);83 enable_interrupts( __cfaabi_dbg_ctx ); 85 84 } 86 85
Note:
See TracChangeset
for help on using the changeset viewer.