Changeset b826e6b for src/libcfa/concurrency/kernel.c
- Timestamp:
- Jul 19, 2017, 11:49:33 AM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 9cc0472
- Parents:
- fea3faa (diff), a57cb58 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/kernel.c
rfea3faa rb826e6b 15 15 // 16 16 17 #include "startup.h" 18 19 //Start and stop routine for the kernel, declared first to make sure they run first 20 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 21 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 22 23 //Header 24 #include "kernel_private.h" 17 #include "libhdr.h" 25 18 26 19 //C Includes … … 35 28 36 29 //CFA Includes 37 #include " libhdr.h"30 #include "kernel_private.h" 38 31 #include "preemption.h" 32 #include "startup.h" 39 33 40 34 //Private includes … … 42 36 #include "invoke.h" 43 37 38 //Start and stop routine for the kernel, declared first to make sure they run first 39 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 40 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 41 44 42 //----------------------------------------------------------------------------- 45 43 // Kernel storage 46 #define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)] 47 48 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx); 49 KERNEL_STORAGE(cluster, systemCluster); 50 KERNEL_STORAGE(system_proc_t, systemProcessor); 51 KERNEL_STORAGE(thread_desc, mainThread); 52 KERNEL_STORAGE(machine_context_t, mainThread_context); 53 54 cluster * systemCluster; 55 system_proc_t * systemProcessor; 44 KERNEL_STORAGE(cluster, mainCluster); 45 KERNEL_STORAGE(processor, mainProcessor); 46 KERNEL_STORAGE(processorCtx_t, mainProcessorCtx); 47 KERNEL_STORAGE(thread_desc, mainThread); 48 KERNEL_STORAGE(machine_context_t, mainThreadCtx); 49 50 cluster * mainCluster; 51 processor * mainProcessor; 56 52 thread_desc * mainThread; 57 53 … … 59 55 // Global state 60 56 61 thread_local processor * this_processor; 62 63 coroutine_desc * this_coroutine(void) { 64 return this_processor->current_coroutine; 65 } 66 67 thread_desc * this_thread(void) { 68 return this_processor->current_thread; 69 } 57 volatile thread_local coroutine_desc * this_coroutine; 58 volatile thread_local thread_desc * this_thread; 59 volatile thread_local processor * this_processor; 60 61 volatile thread_local bool preemption_in_progress = 0; 62 volatile thread_local unsigned short disable_preempt_count = 1; 70 63 71 64 //----------------------------------------------------------------------------- 72 65 // Main thread construction 73 66 struct current_stack_info_t { 74 machine_context_t ctx; 67 machine_context_t ctx; 75 68 unsigned int size; // size of stack 76 69 void *base; // base of stack … … 82 75 83 76 void ?{}( current_stack_info_t * this ) { 84 CtxGet( &this->ctx );77 CtxGet( this->ctx ); 85 78 this->base = this->ctx.FP; 86 79 this->storage = this->ctx.SP; … … 91 84 92 85 this->limit = (void *)(((intptr_t)this->base) - this->size); 93 this->context = & mainThread_context_storage;86 this->context = &storage_mainThreadCtx; 94 87 this->top = this->base; 95 88 } … … 106 99 107 100 void ?{}( coroutine_desc * this, current_stack_info_t * info) { 108 (&this->stack){ info }; 101 (&this->stack){ info }; 109 102 this->name = "Main Thread"; 110 103 this->errno_ = 0; … … 131 124 132 125 void ?{}(processor * this) { 133 this{ systemCluster };126 this{ mainCluster }; 134 127 } 135 128 136 129 void ?{}(processor * this, cluster * cltr) { 137 130 this->cltr = cltr; 138 this->current_coroutine = NULL; 139 this->current_thread = NULL; 140 (&this->terminated){}; 141 this->is_terminated = false; 131 (&this->terminated){ 0 }; 132 this->do_terminate = false; 142 133 this->preemption_alarm = NULL; 143 this->preemption = default_preemption();144 this->disable_preempt_count = 1; //Start with interrupts disabled145 134 this->pending_preemption = false; 146 135 … … 150 139 void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) { 151 140 this->cltr = cltr; 152 this->current_coroutine = NULL; 153 this->current_thread = NULL; 154 (&this->terminated){}; 155 this->is_terminated = false; 156 this->disable_preempt_count = 0; 141 (&this->terminated){ 0 }; 142 this->do_terminate = false; 143 this->preemption_alarm = NULL; 157 144 this->pending_preemption = false; 145 this->kernel_thread = pthread_self(); 158 146 159 147 this->runner = runner; 160 LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);148 LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", runner); 161 149 runner{ this }; 162 150 } 163 151 164 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {165 (&this->alarms){};166 (&this->alarm_lock){};167 this->pending_alarm = false;168 169 (&this->proc){ cltr, runner };170 }171 172 152 void ^?{}(processor * this) { 173 if( ! this-> is_terminated) {153 if( ! this->do_terminate ) { 174 154 LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this); 175 this->is_terminated = true; 176 wait( &this->terminated ); 155 this->do_terminate = true; 156 P( &this->terminated ); 157 pthread_join( this->kernel_thread, NULL ); 177 158 } 178 159 } … … 180 161 void ?{}(cluster * this) { 181 162 ( &this->ready_queue ){}; 182 ( &this->lock ){}; 163 ( &this->ready_queue_lock ){}; 164 165 this->preemption = default_preemption(); 183 166 } 184 167 185 168 void ^?{}(cluster * this) { 186 169 187 170 } 188 171 … … 203 186 204 187 thread_desc * readyThread = NULL; 205 for( unsigned int spin_count = 0; ! this-> is_terminated; spin_count++ )188 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ ) 206 189 { 207 190 readyThread = nextThread( this->cltr ); … … 209 192 if(readyThread) 210 193 { 194 verify( disable_preempt_count > 0 ); 195 211 196 runThread(this, readyThread); 197 198 verify( disable_preempt_count > 0 ); 212 199 213 200 //Some actions need to be taken from the kernel … … 225 212 } 226 213 227 signal( &this->terminated ); 214 V( &this->terminated ); 215 228 216 LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this); 229 217 } 230 218 231 // runThread runs a thread by context switching 232 // from the processor coroutine to the target thread 219 // runThread runs a thread by context switching 220 // from the processor coroutine to the target thread 233 221 void runThread(processor * this, thread_desc * dst) { 234 222 coroutine_desc * proc_cor = get_coroutine(this->runner); 235 223 coroutine_desc * thrd_cor = get_coroutine(dst); 236 224 237 225 //Reset the terminating actions here 238 226 this->finish.action_code = No_Action; 239 227 240 228 //Update global state 241 this ->current_thread = dst;229 this_thread = dst; 242 230 243 231 // Context Switch to the thread … … 246 234 } 247 235 248 // Once a thread has finished running, some of 236 // Once a thread has finished running, some of 249 237 // its final actions must be executed from the kernel 250 238 void finishRunning(processor * this) { … … 256 244 } 257 245 else if( this->finish.action_code == Release_Schedule ) { 258 unlock( this->finish.lock ); 246 unlock( this->finish.lock ); 259 247 ScheduleThread( this->finish.thrd ); 260 248 } … … 289 277 processor * proc = (processor *) arg; 290 278 this_processor = proc; 279 this_coroutine = NULL; 280 this_thread = NULL; 281 disable_preempt_count = 1; 291 282 // SKULLDUGGERY: We want to create a context for the processor coroutine 292 283 // which is needed for the 2-step context switch. However, there is no reason 293 // to waste the perfectly valid stack create by pthread. 284 // to waste the perfectly valid stack create by pthread. 294 285 current_stack_info_t info; 295 286 machine_context_t ctx; … … 300 291 301 292 //Set global state 302 proc->current_coroutine = &proc->runner->__cor;303 proc->current_thread = NULL;293 this_coroutine = &proc->runner->__cor; 294 this_thread = NULL; 304 295 305 296 //We now have a proper context from which to schedule threads 306 297 LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx); 307 298 308 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 309 // resume it to start it like it normally would, it will just context switch 310 // back to here. Instead directly call the main since we already are on the 299 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 300 // resume it to start it like it normally would, it will just context switch 301 // back to here. Instead directly call the main since we already are on the 311 302 // appropriate stack. 312 303 proc_cor_storage.__cor.state = Active; … … 315 306 316 307 // Main routine of the core returned, the core is now fully terminated 317 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 308 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 318 309 319 310 return NULL; … … 322 313 void start(processor * this) { 323 314 LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this); 324 315 325 316 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 326 317 327 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 318 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 328 319 } 329 320 … … 331 322 // Scheduler routines 332 323 void ScheduleThread( thread_desc * thrd ) { 333 if( !thrd ) return; 324 // if( !thrd ) return; 325 assert( thrd ); 326 assert( thrd->cor.state != Halted ); 327 328 verify( disable_preempt_count > 0 ); 334 329 335 330 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 336 337 lock( &systemProcessor->proc.cltr->lock ); 338 append( &systemProcessor->proc.cltr->ready_queue, thrd ); 339 unlock( &systemProcessor->proc.cltr->lock ); 331 332 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 ); 333 append( &this_processor->cltr->ready_queue, thrd ); 334 unlock( &this_processor->cltr->ready_queue_lock ); 335 336 verify( disable_preempt_count > 0 ); 340 337 } 341 338 342 339 thread_desc * nextThread(cluster * this) { 343 lock( &this->lock ); 340 verify( disable_preempt_count > 0 ); 341 lock( &this->ready_queue_lock DEBUG_CTX2 ); 344 342 thread_desc * head = pop_head( &this->ready_queue ); 345 unlock( &this->lock ); 343 unlock( &this->ready_queue_lock ); 344 verify( disable_preempt_count > 0 ); 346 345 return head; 347 346 } 348 347 349 void ScheduleInternal() { 350 suspend(); 351 } 352 353 void ScheduleInternal( spinlock * lock ) { 348 void BlockInternal() { 349 disable_interrupts(); 350 verify( disable_preempt_count > 0 ); 351 suspend(); 352 verify( disable_preempt_count > 0 ); 353 enable_interrupts( DEBUG_CTX ); 354 } 355 356 void BlockInternal( spinlock * lock ) { 357 disable_interrupts(); 354 358 this_processor->finish.action_code = Release; 355 359 this_processor->finish.lock = lock; 356 suspend(); 357 } 358 359 void ScheduleInternal( thread_desc * thrd ) { 360 361 verify( disable_preempt_count > 0 ); 362 suspend(); 363 verify( disable_preempt_count > 0 ); 364 365 enable_interrupts( DEBUG_CTX ); 366 } 367 368 void BlockInternal( thread_desc * thrd ) { 369 disable_interrupts(); 370 assert( thrd->cor.state != Halted ); 360 371 this_processor->finish.action_code = Schedule; 361 372 this_processor->finish.thrd = thrd; 362 suspend(); 363 } 364 365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) { 373 374 verify( disable_preempt_count > 0 ); 375 suspend(); 376 verify( disable_preempt_count > 0 ); 377 378 enable_interrupts( DEBUG_CTX ); 379 } 380 381 void BlockInternal( spinlock * lock, thread_desc * thrd ) { 382 disable_interrupts(); 366 383 this_processor->finish.action_code = Release_Schedule; 367 384 this_processor->finish.lock = lock; 368 385 this_processor->finish.thrd = thrd; 369 suspend(); 370 } 371 372 void ScheduleInternal(spinlock ** locks, unsigned short count) { 386 387 verify( disable_preempt_count > 0 ); 388 suspend(); 389 verify( disable_preempt_count > 0 ); 390 391 enable_interrupts( DEBUG_CTX ); 392 } 393 394 void BlockInternal(spinlock ** locks, unsigned short count) { 395 disable_interrupts(); 373 396 this_processor->finish.action_code = Release_Multi; 374 397 this_processor->finish.locks = locks; 375 398 this_processor->finish.lock_count = count; 376 suspend(); 377 } 378 379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 399 400 verify( disable_preempt_count > 0 ); 401 suspend(); 402 verify( disable_preempt_count > 0 ); 403 404 enable_interrupts( DEBUG_CTX ); 405 } 406 407 void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 408 disable_interrupts(); 380 409 this_processor->finish.action_code = Release_Multi_Schedule; 381 410 this_processor->finish.locks = locks; … … 383 412 this_processor->finish.thrds = thrds; 384 413 this_processor->finish.thrd_count = thrd_count; 414 415 verify( disable_preempt_count > 0 ); 416 suspend(); 417 verify( disable_preempt_count > 0 ); 418 419 enable_interrupts( DEBUG_CTX ); 420 } 421 422 void LeaveThread(spinlock * lock, thread_desc * thrd) { 423 verify( disable_preempt_count > 0 ); 424 this_processor->finish.action_code = thrd ? Release_Schedule : Release; 425 this_processor->finish.lock = lock; 426 this_processor->finish.thrd = thrd; 427 385 428 suspend(); 386 429 } … … 392 435 // Kernel boot procedures 393 436 void kernel_startup(void) { 394 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 437 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 395 438 396 439 // Start by initializing the main thread 397 // SKULLDUGGERY: the mainThread steals the process main thread 398 // which will then be scheduled by the systemProcessor normally399 mainThread = (thread_desc *)& mainThread_storage;440 // SKULLDUGGERY: the mainThread steals the process main thread 441 // which will then be scheduled by the mainProcessor normally 442 mainThread = (thread_desc *)&storage_mainThread; 400 443 current_stack_info_t info; 401 444 mainThread{ &info }; … … 403 446 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n"); 404 447 448 // Initialize the main cluster 449 mainCluster = (cluster *)&storage_mainCluster; 450 mainCluster{}; 451 452 LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n"); 453 454 // Initialize the main processor and the main processor ctx 455 // (the coroutine that contains the processing control flow) 456 mainProcessor = (processor *)&storage_mainProcessor; 457 mainProcessor{ mainCluster, (processorCtx_t *)&storage_mainProcessorCtx }; 458 459 //initialize the global state variables 460 this_processor = mainProcessor; 461 this_thread = mainThread; 462 this_coroutine = &mainThread->cor; 463 405 464 // Enable preemption 406 465 kernel_start_preemption(); 407 466 408 // Initialize the system cluster 409 systemCluster = (cluster *)&systemCluster_storage; 410 systemCluster{}; 411 412 LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n"); 413 414 // Initialize the system processor and the system processor ctx 415 // (the coroutine that contains the processing control flow) 416 systemProcessor = (system_proc_t *)&systemProcessor_storage; 417 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage }; 418 419 // Add the main thread to the ready queue 420 // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread 467 // Add the main thread to the ready queue 468 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 421 469 ScheduleThread(mainThread); 422 470 423 //initialize the global state variables 424 this_processor = &systemProcessor->proc; 425 this_processor->current_thread = mainThread; 426 this_processor->current_coroutine = &mainThread->cor; 427 428 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX 471 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 429 472 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 430 // mainThread is on the ready queue when this call is made. 431 resume( systemProcessor->proc.runner );473 // mainThread is on the ready queue when this call is made. 474 resume( mainProcessor->runner ); 432 475 433 476 … … 435 478 // THE SYSTEM IS NOW COMPLETELY RUNNING 436 479 LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n"); 480 481 enable_interrupts( DEBUG_CTX ); 437 482 } 438 483 … … 440 485 LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n"); 441 486 442 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates. 487 disable_interrupts(); 488 489 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. 443 490 // When its coroutine terminates, it return control to the mainThread 444 491 // which is currently here 445 systemProcessor->proc.is_terminated= true;492 mainProcessor->do_terminate = true; 446 493 suspend(); 447 494 448 495 // THE SYSTEM IS NOW COMPLETELY STOPPED 449 496 450 // Destroy the system processor and its context in reverse order of construction 497 // Disable preemption 498 kernel_stop_preemption(); 499 500 // Destroy the main processor and its context in reverse order of construction 451 501 // These were manually constructed so we need manually destroy them 452 ^( systemProcessor->proc.runner){};453 ^( systemProcessor){};502 ^(mainProcessor->runner){}; 503 ^(mainProcessor){}; 454 504 455 505 // Final step, destroy the main thread since it is no longer needed … … 457 507 ^(mainThread){}; 458 508 459 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 509 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 460 510 } 461 511 … … 467 517 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 468 518 // the globalAbort flag is true. 469 lock( &kernel_abort_lock );519 lock( &kernel_abort_lock DEBUG_CTX2 ); 470 520 471 521 // first task to abort ? … … 473 523 kernel_abort_called = true; 474 524 unlock( &kernel_abort_lock ); 475 } 525 } 476 526 else { 477 527 unlock( &kernel_abort_lock ); 478 528 479 529 sigset_t mask; 480 530 sigemptyset( &mask ); … … 482 532 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals 483 533 sigsuspend( &mask ); // block the processor to prevent further damage during abort 484 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 485 } 486 487 return this_thread ();534 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 535 } 536 537 return this_thread; 488 538 } 489 539 … … 494 544 __lib_debug_write( STDERR_FILENO, abort_text, len ); 495 545 496 if ( thrd != this_coroutine ()) {497 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine ()->name, this_coroutine());546 if ( thrd != this_coroutine ) { 547 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine ); 498 548 __lib_debug_write( STDERR_FILENO, abort_text, len ); 499 } 549 } 500 550 else { 501 551 __lib_debug_write( STDERR_FILENO, ".\n", 2 ); … … 505 555 extern "C" { 506 556 void __lib_debug_acquire() { 507 lock( &kernel_debug_lock);557 lock( &kernel_debug_lock DEBUG_CTX2 ); 508 558 } 509 559 510 560 void __lib_debug_release() { 511 unlock( &kernel_debug_lock);561 unlock( &kernel_debug_lock ); 512 562 } 513 563 } … … 525 575 } 526 576 527 bool try_lock( spinlock * this ) {577 bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) { 528 578 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 529 579 } 530 580 531 void lock( spinlock * this ) {581 void lock( spinlock * this DEBUG_CTX_PARAM2 ) { 532 582 for ( unsigned int i = 1;; i += 1 ) { 533 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break; 534 } 535 } 583 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 584 } 585 LIB_DEBUG_DO( 586 this->prev_name = caller; 587 this->prev_thrd = this_thread; 588 ) 589 } 590 591 void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) { 592 for ( unsigned int i = 1;; i += 1 ) { 593 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 594 yield(); 595 } 596 LIB_DEBUG_DO( 597 this->prev_name = caller; 598 this->prev_thrd = this_thread; 599 ) 600 } 601 536 602 537 603 void unlock( spinlock * this ) { … … 539 605 } 540 606 541 void ?{}( signal_once * this ) { 542 this->cond = false; 543 } 544 void ^?{}( signal_once * this ) { 545 546 } 547 548 void wait( signal_once * this ) { 549 lock( &this->lock ); 550 if( !this->cond ) { 551 append( &this->blocked, this_thread() ); 552 ScheduleInternal( &this->lock ); 553 lock( &this->lock ); 554 } 607 void ?{}( semaphore * this, int count = 1 ) { 608 (&this->lock){}; 609 this->count = count; 610 (&this->waiting){}; 611 } 612 void ^?{}(semaphore * this) {} 613 614 void P(semaphore * this) { 615 lock( &this->lock DEBUG_CTX2 ); 616 this->count -= 1; 617 if ( this->count < 0 ) { 618 // queue current task 619 append( &this->waiting, (thread_desc *)this_thread ); 620 621 // atomically release spin lock and block 622 BlockInternal( &this->lock ); 623 } 624 else { 625 unlock( &this->lock ); 626 } 627 } 628 629 void V(semaphore * this) { 630 thread_desc * thrd = NULL; 631 lock( &this->lock DEBUG_CTX2 ); 632 this->count += 1; 633 if ( this->count <= 0 ) { 634 // remove task at head of waiting list 635 thrd = pop_head( &this->waiting ); 636 } 637 555 638 unlock( &this->lock ); 556 } 557 558 void signal( signal_once * this ) { 559 lock( &this->lock ); 560 { 561 this->cond = true; 562 563 thread_desc * it; 564 while( it = pop_head( &this->blocked) ) { 565 ScheduleThread( it ); 566 } 567 } 568 unlock( &this->lock ); 639 640 // make new owner 641 WakeThread( thrd ); 569 642 } 570 643 … … 590 663 } 591 664 head->next = NULL; 592 } 665 } 593 666 return head; 594 667 } … … 609 682 this->top = top->next; 610 683 top->next = NULL; 611 } 684 } 612 685 return top; 613 686 }
Note:
See TracChangeset
for help on using the changeset viewer.