Changes in / [6e2b04e:720f2fe2]
- Files:
-
- 12 deleted
- 7 edited
-
libcfa/src/bits/locks.hfa (modified) (3 diffs)
-
libcfa/src/concurrency/invoke.h (modified) (3 diffs)
-
libcfa/src/concurrency/kernel.cfa (modified) (1 diff)
-
libcfa/src/concurrency/locks.cfa (modified) (2 diffs)
-
libcfa/src/concurrency/locks.hfa (modified) (9 diffs)
-
libcfa/src/concurrency/thread.cfa (modified) (2 diffs)
-
libcfa/src/startup.cfa (modified) (1 diff)
-
tests/unified_locking/.expect/block_spin_lock.txt (deleted)
-
tests/unified_locking/.expect/clh.txt (deleted)
-
tests/unified_locking/.expect/mcs_block_spin_lock.txt (deleted)
-
tests/unified_locking/.expect/pthread_locks.txt (deleted)
-
tests/unified_locking/.expect/simple_owner_lock.txt (deleted)
-
tests/unified_locking/.expect/spin_queue_lock.txt (deleted)
-
tests/unified_locking/block_spin_lock.cfa (deleted)
-
tests/unified_locking/clh.cfa (deleted)
-
tests/unified_locking/mcs_block_spin_lock.cfa (deleted)
-
tests/unified_locking/pthread_locks.cfa (deleted)
-
tests/unified_locking/simple_owner_lock.cfa (deleted)
-
tests/unified_locking/spin_queue_lock.cfa (deleted)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/locks.hfa
r6e2b04e r720f2fe2 26 26 // Wrap in struct to prevent false sharing with debug info 27 27 volatile bool lock; 28 #ifdef __CFA_DEBUG__ 29 // previous function to acquire the lock 30 const char * prev_name; 31 // previous thread to acquire the lock 32 void* prev_thrd; 33 // keep track of number of times we had to spin, just in case the number is unexpectedly huge 34 size_t spin_count; 35 #endif 28 36 }; 29 37 … … 32 40 extern void disable_interrupts() OPTIONAL_THREAD; 33 41 extern void enable_interrupts( bool poll = true ) OPTIONAL_THREAD; 34 #define __cfaabi_dbg_record_lock(x, y) 42 43 #ifdef __CFA_DEBUG__ 44 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]); 45 #else 46 #define __cfaabi_dbg_record_lock(x, y) 47 #endif 35 48 } 36 49 37 50 static inline void ?{}( __spinlock_t & this ) { 38 51 this.lock = 0; 52 #ifdef __CFA_DEBUG__ 53 this.spin_count = 0; 54 #endif 39 55 } 40 56 … … 61 77 for ( unsigned int i = 1;; i += 1 ) { 62 78 if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; 79 #ifdef __CFA_DEBUG__ 80 this.spin_count++; 81 #endif 63 82 #ifndef NOEXPBACK 64 83 // exponential spin -
libcfa/src/concurrency/invoke.h
r6e2b04e r720f2fe2 195 195 struct __monitor_group_t monitors; 196 196 197 // used to put threads on user data structures 198 struct { 199 struct thread$ * next; 200 struct thread$ * back; 201 } seqable; 202 197 203 // used to put threads on dlist data structure 198 204 __cfa_dlink(thread$); … … 202 208 struct thread$ * prev; 203 209 } node; 204 205 // used to store state between clh lock/unlock206 volatile bool * clh_prev;207 208 // used to point to this thd's current clh node209 volatile bool * clh_node;210 210 211 211 struct processor * last_proc; … … 240 240 } 241 241 242 static inline thread$ * volatile & ?`next ( thread$ * this ) __attribute__((const)) { 243 return this->seqable.next; 244 } 245 246 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) { 247 return this->seqable.back; 248 } 249 250 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) { 251 return this->seqable.next; 252 } 253 254 static inline bool listed( thread$ * this ) { 255 return this->seqable.next != 0p; 256 } 257 242 258 static inline void ?{}(__monitor_group_t & this) { 243 259 (this.data){0p}; -
libcfa/src/concurrency/kernel.cfa
r6e2b04e r720f2fe2 834 834 #endif 835 835 836 837 838 //----------------------------------------------------------------------------- 839 // Debug 840 __cfaabi_dbg_debug_do( 841 extern "C" { 842 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) { 843 this.prev_name = prev_name; 844 this.prev_thrd = kernelTLS().this_thread; 845 } 846 } 847 ) 848 836 849 //----------------------------------------------------------------------------- 837 850 // Debug -
libcfa/src/concurrency/locks.cfa
r6e2b04e r720f2fe2 219 219 // this casts the alarm node to our wrapped type since we used type erasure 220 220 static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); } 221 222 struct pthread_alarm_node_wrap {223 alarm_node_t alarm_node;224 pthread_cond_var(L) * cond;225 info_thread(L) * info_thd;226 };227 228 void ?{}( pthread_alarm_node_wrap(L) & this, Duration alarm, Duration period, Alarm_Callback callback, pthread_cond_var(L) * c, info_thread(L) * i ) {229 this.alarm_node{ callback, alarm, period };230 this.cond = c;231 this.info_thd = i;232 }233 234 void ^?{}( pthread_alarm_node_wrap(L) & this ) { }235 236 static void timeout_handler ( pthread_alarm_node_wrap(L) & this ) with( this ) {237 // This pthread_cond_var member is called from the kernel, and therefore, cannot block, but it can spin.238 lock( cond->lock __cfaabi_dbg_ctx2 );239 240 // this check is necessary to avoid a race condition since this timeout handler241 // may still be called after a thread has been removed from the queue but242 // before the alarm is unregistered243 if ( (*info_thd)`isListed ) { // is thread on queue244 info_thd->signalled = false;245 // remove this thread O(1)246 remove( *info_thd );247 on_notify(*info_thd->lock, info_thd->t);248 }249 unlock( cond->lock );250 }251 252 // this casts the alarm node to our wrapped type since we used type erasure253 static void pthread_alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (pthread_alarm_node_wrap(L) &)a ); }254 221 } 255 222 … … 421 388 on_wakeup(*i.lock, recursion_count); 422 389 } 423 424 //----------------------------------------------------------------------------- 425 // pthread_cond_var 426 427 void ?{}( pthread_cond_var(L) & this ) with(this) { 428 blocked_threads{}; 429 lock{}; 430 } 431 432 void ^?{}( pthread_cond_var(L) & this ) { } 433 434 bool notify_one( pthread_cond_var(L) & this ) with(this) { 435 lock( lock __cfaabi_dbg_ctx2 ); 436 bool ret = ! blocked_threads`isEmpty; 437 if ( ret ) { 438 info_thread(L) & popped = try_pop_front( blocked_threads ); 439 on_notify(*popped.lock, popped.t); 440 } 441 unlock( lock ); 442 return ret; 443 } 444 445 bool notify_all( pthread_cond_var(L) & this ) with(this) { 446 lock( lock __cfaabi_dbg_ctx2 ); 447 bool ret = ! blocked_threads`isEmpty; 448 while( ! blocked_threads`isEmpty ) { 449 info_thread(L) & popped = try_pop_front( blocked_threads ); 450 on_notify(*popped.lock, popped.t); 451 } 452 unlock( lock ); 453 return ret; 454 } 455 456 uintptr_t front( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; } 457 bool empty ( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; } 458 459 static size_t queue_and_get_recursion( pthread_cond_var(L) & this, info_thread(L) * i ) with(this) { 460 // add info_thread to waiting queue 461 insert_last( blocked_threads, *i ); 462 size_t recursion_count = 0; 463 recursion_count = on_wait( *i->lock ); 464 return recursion_count; 465 } 466 467 static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 468 lock( lock __cfaabi_dbg_ctx2 ); 469 size_t recursion_count = queue_and_get_recursion(this, &info); 470 pthread_alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info }; 471 register_self( &node_wrap.alarm_node ); 472 unlock( lock ); 473 474 // blocks here 475 park(); 476 477 // unregisters alarm so it doesn't go off if this happens first 478 unregister_self( &node_wrap.alarm_node ); 479 480 // resets recursion count here after waking 481 if (info.lock) on_wakeup(*info.lock, recursion_count); 482 } 483 484 void wait( pthread_cond_var(L) & this, L & l ) with(this) { 485 wait( this, l, 0 ); 486 } 487 488 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info ) with(this) { 489 lock( lock __cfaabi_dbg_ctx2 ); 490 info_thread( L ) i = { active_thread(), info, &l }; 491 size_t recursion_count = queue_and_get_recursion(this, &i); 492 unlock( lock ); 493 park( ); 494 on_wakeup(*i.lock, recursion_count); 495 } 496 497 #define PTHREAD_WAIT_TIME( u, l, t ) \ 498 info_thread( L ) i = { active_thread(), u, l }; \ 499 queue_info_thread_timeout(this, i, t, pthread_alarm_node_wrap_cast ); \ 500 return i.signalled; 501 502 bool wait( pthread_cond_var(L) & this, L & l, timespec t ) { 503 Duration d = { t }; 504 WAIT_TIME( 0, &l , d ) 505 } 506 507 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t ) { 508 Duration d = { t }; 509 WAIT_TIME( info, &l , d ) 510 } 511 } 390 } 391 512 392 //----------------------------------------------------------------------------- 513 393 // Semaphore -
libcfa/src/concurrency/locks.hfa
r6e2b04e r720f2fe2 98 98 mcs_node * next = advance(l.queue, &n); 99 99 if(next) post(next->sem); 100 }101 102 //-----------------------------------------------------------------------------103 // MCS Spin Lock104 // - No recursive acquisition105 // - Needs to be released by owner106 107 struct mcs_spin_node {108 mcs_spin_node * volatile next;109 bool locked:1;110 };111 112 struct mcs_spin_queue {113 mcs_spin_node * volatile tail;114 };115 116 static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }117 118 static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {119 return node->next;120 }121 122 struct mcs_spin_lock {123 mcs_spin_queue queue;124 };125 126 static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {127 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);128 if(prev != 0p) {129 prev->next = &n;130 while(n.locked) Pause();131 }132 }133 134 static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {135 mcs_spin_node * n_ptr = &n;136 if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {137 while (n.next == 0p) {}138 n.next->locked = false;139 }140 }141 142 //-----------------------------------------------------------------------------143 // CLH Spinlock144 // - No recursive acquisition145 // - Needs to be released by owner146 147 struct clh_lock {148 volatile bool * volatile tail;149 };150 151 static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }152 static inline void ^?{}( clh_lock & this ) { free(this.tail); }153 154 static inline void lock(clh_lock & l) {155 thread$ * curr_thd = active_thread();156 *(curr_thd->clh_node) = false;157 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);158 while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();159 curr_thd->clh_prev = prev;160 }161 162 static inline void unlock(clh_lock & l) {163 thread$ * curr_thd = active_thread();164 __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);165 curr_thd->clh_node = curr_thd->clh_prev;166 100 } 167 101 … … 271 205 // Fast Block Lock 272 206 273 // minimal blocking lock207 // High efficiency minimal blocking lock 274 208 // - No reacquire for cond var 275 209 // - No recursive acquisition 276 210 // - No ownership 277 211 struct fast_block_lock { 212 // Spin lock used for mutual exclusion 213 __spinlock_t lock; 214 278 215 // List of blocked threads 279 216 dlist( thread$ ) blocked_threads; 280 217 281 // Spin lock used for mutual exclusion282 __spinlock_t lock;283 284 // flag showing if lock is held285 218 bool held:1; 286 287 #ifdef __CFA_DEBUG__288 // for deadlock detection289 struct thread$ * owner;290 #endif291 219 }; 292 220 … … 303 231 static inline void lock(fast_block_lock & this) with(this) { 304 232 lock( lock __cfaabi_dbg_ctx2 ); 305 306 #ifdef __CFA_DEBUG__307 assert(!(held && owner == active_thread()));308 #endif309 233 if (held) { 310 234 insert_last( blocked_threads, *active_thread() ); … … 314 238 } 315 239 held = true; 316 #ifdef __CFA_DEBUG__317 owner = active_thread();318 #endif319 240 unlock( lock ); 320 241 } … … 325 246 thread$ * t = &try_pop_front( blocked_threads ); 326 247 held = ( t ? true : false ); 327 #ifdef __CFA_DEBUG__328 owner = ( t ? t : 0p );329 #endif330 248 unpark( t ); 331 249 unlock( lock ); … … 335 253 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } 336 254 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } 337 338 //-----------------------------------------------------------------------------339 // simple_owner_lock340 341 // pthread owner lock342 // - reacquire for cond var343 // - recursive acquisition344 // - ownership345 struct simple_owner_lock {346 // List of blocked threads347 dlist( thread$ ) blocked_threads;348 349 // Spin lock used for mutual exclusion350 __spinlock_t lock;351 352 // owner showing if lock is held353 struct thread$ * owner;354 355 size_t recursion_count;356 };357 358 static inline void ?{}( simple_owner_lock & this ) with(this) {359 lock{};360 blocked_threads{};361 owner = 0p;362 recursion_count = 0;363 }364 static inline void ^?{}( simple_owner_lock & this ) {}365 static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;366 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;367 368 static inline void lock(simple_owner_lock & this) with(this) {369 if (owner == active_thread()) {370 recursion_count++;371 return;372 }373 lock( lock __cfaabi_dbg_ctx2 );374 375 if (owner != 0p) {376 insert_last( blocked_threads, *active_thread() );377 unlock( lock );378 park( );379 return;380 }381 owner = active_thread();382 recursion_count = 1;383 unlock( lock );384 }385 386 // TODO: fix duplicate def issue and bring this back387 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {388 // thread$ * t = &try_pop_front( blocked_threads );389 // owner = t;390 // recursion_count = ( t ? 1 : 0 );391 // unpark( t );392 // }393 394 static inline void unlock(simple_owner_lock & this) with(this) {395 lock( lock __cfaabi_dbg_ctx2 );396 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );397 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );398 // if recursion count is zero release lock and set new owner if one is waiting399 recursion_count--;400 if ( recursion_count == 0 ) {401 // pop_and_set_new_owner( this );402 thread$ * t = &try_pop_front( blocked_threads );403 owner = t;404 recursion_count = ( t ? 1 : 0 );405 unpark( t );406 }407 unlock( lock );408 }409 410 static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {411 lock( lock __cfaabi_dbg_ctx2 );412 // lock held413 if ( owner != 0p ) {414 insert_last( blocked_threads, *t );415 unlock( lock );416 }417 // lock not held418 else {419 owner = t;420 recursion_count = 1;421 unpark( t );422 unlock( lock );423 }424 }425 426 static inline size_t on_wait(simple_owner_lock & this) with(this) {427 lock( lock __cfaabi_dbg_ctx2 );428 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );429 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );430 431 size_t ret = recursion_count;432 433 // pop_and_set_new_owner( this );434 435 thread$ * t = &try_pop_front( blocked_threads );436 owner = t;437 recursion_count = ( t ? 1 : 0 );438 unpark( t );439 440 unlock( lock );441 return ret;442 }443 444 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }445 446 //-----------------------------------------------------------------------------447 // Spin Queue Lock448 449 // - No reacquire for cond var450 // - No recursive acquisition451 // - No ownership452 // - spin lock with no locking/atomics in unlock453 struct spin_queue_lock {454 // Spin lock used for mutual exclusion455 mcs_spin_lock lock;456 457 // flag showing if lock is held458 bool held:1;459 460 #ifdef __CFA_DEBUG__461 // for deadlock detection462 struct thread$ * owner;463 #endif464 };465 466 static inline void ?{}( spin_queue_lock & this ) with(this) {467 lock{};468 held = false;469 }470 static inline void ^?{}( spin_queue_lock & this ) {}471 static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;472 static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;473 474 // if this is called recursively IT WILL DEADLOCK!!!!!475 static inline void lock(spin_queue_lock & this) with(this) {476 mcs_spin_node node;477 #ifdef __CFA_DEBUG__478 assert(!(held && owner == active_thread()));479 #endif480 lock( lock, node );481 while(held) Pause();482 held = true;483 unlock( lock, node );484 #ifdef __CFA_DEBUG__485 owner = active_thread();486 #endif487 }488 489 static inline void unlock(spin_queue_lock & this) with(this) {490 #ifdef __CFA_DEBUG__491 owner = 0p;492 #endif493 held = false;494 }495 496 static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }497 static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }498 static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }499 500 501 //-----------------------------------------------------------------------------502 // MCS Block Spin Lock503 504 // - No reacquire for cond var505 // - No recursive acquisition506 // - No ownership507 // - Blocks but first node spins (like spin queue but blocking for not first thd)508 struct mcs_block_spin_lock {509 // Spin lock used for mutual exclusion510 mcs_lock lock;511 512 // flag showing if lock is held513 bool held:1;514 515 #ifdef __CFA_DEBUG__516 // for deadlock detection517 struct thread$ * owner;518 #endif519 };520 521 static inline void ?{}( mcs_block_spin_lock & this ) with(this) {522 lock{};523 held = false;524 }525 static inline void ^?{}( mcs_block_spin_lock & this ) {}526 static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;527 static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;528 529 // if this is called recursively IT WILL DEADLOCK!!!!!530 static inline void lock(mcs_block_spin_lock & this) with(this) {531 mcs_node node;532 #ifdef __CFA_DEBUG__533 assert(!(held && owner == active_thread()));534 #endif535 lock( lock, node );536 while(held) Pause();537 held = true;538 unlock( lock, node );539 #ifdef __CFA_DEBUG__540 owner = active_thread();541 #endif542 }543 544 static inline void unlock(mcs_block_spin_lock & this) with(this) {545 #ifdef __CFA_DEBUG__546 owner = 0p;547 #endif548 held = false;549 }550 551 static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }552 static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }553 static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }554 555 //-----------------------------------------------------------------------------556 // Block Spin Lock557 558 // - No reacquire for cond var559 // - No recursive acquisition560 // - No ownership561 // - Blocks but first node spins (like spin queue but blocking for not first thd)562 struct block_spin_lock {563 // Spin lock used for mutual exclusion564 fast_block_lock lock;565 566 // flag showing if lock is held567 bool held:1;568 569 #ifdef __CFA_DEBUG__570 // for deadlock detection571 struct thread$ * owner;572 #endif573 };574 575 static inline void ?{}( block_spin_lock & this ) with(this) {576 lock{};577 held = false;578 }579 static inline void ^?{}( block_spin_lock & this ) {}580 static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;581 static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;582 583 // if this is called recursively IT WILL DEADLOCK!!!!!584 static inline void lock(block_spin_lock & this) with(this) {585 #ifdef __CFA_DEBUG__586 assert(!(held && owner == active_thread()));587 #endif588 lock( lock );589 while(held) Pause();590 held = true;591 unlock( lock );592 #ifdef __CFA_DEBUG__593 owner = active_thread();594 #endif595 }596 597 static inline void unlock(block_spin_lock & this) with(this) {598 #ifdef __CFA_DEBUG__599 owner = 0p;600 #endif601 held = false;602 }603 604 static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }605 static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }606 static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }607 255 608 256 //----------------------------------------------------------------------------- … … 684 332 // - signalling without holding branded lock is UNSAFE! 685 333 // - only allows usage of one lock, cond var is branded after usage 686 687 334 struct fast_cond_var { 688 335 // List of blocked threads 689 336 dlist( info_thread(L) ) blocked_threads; 337 690 338 #ifdef __CFA_DEBUG__ 691 339 L * lock_used; … … 693 341 }; 694 342 343 695 344 void ?{}( fast_cond_var(L) & this ); 696 345 void ^?{}( fast_cond_var(L) & this ); … … 700 349 701 350 uintptr_t front( fast_cond_var(L) & this ); 351 702 352 bool empty ( fast_cond_var(L) & this ); 703 353 704 354 void wait( fast_cond_var(L) & this, L & l ); 705 355 void wait( fast_cond_var(L) & this, L & l, uintptr_t info ); 706 707 708 //----------------------------------------------------------------------------- 709 // pthread_cond_var 710 // 711 // - cond var with minimal footprint 712 // - supports operations needed for phthread cond 713 714 struct pthread_cond_var { 715 dlist( info_thread(L) ) blocked_threads; 716 __spinlock_t lock; 717 }; 718 719 void ?{}( pthread_cond_var(L) & this ); 720 void ^?{}( pthread_cond_var(L) & this ); 721 722 bool notify_one( pthread_cond_var(L) & this ); 723 bool notify_all( pthread_cond_var(L) & this ); 724 725 uintptr_t front( pthread_cond_var(L) & this ); 726 bool empty ( pthread_cond_var(L) & this ); 727 728 void wait( pthread_cond_var(L) & this, L & l ); 729 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info ); 730 bool wait( pthread_cond_var(L) & this, L & l, timespec t ); 731 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t ); 732 } 356 } -
libcfa/src/concurrency/thread.cfa
r6e2b04e r720f2fe2 53 53 #endif 54 54 55 seqable.next = 0p; 56 seqable.back = 0p; 57 55 58 node.next = 0p; 56 59 node.prev = 0p; 60 doregister(curr_cluster, this); 57 61 58 clh_node = malloc( );59 *clh_node = false;60 61 doregister(curr_cluster, this);62 62 monitors{ &self_mon_p, 1, (fptr_t)0 }; 63 63 } … … 67 67 canary = 0xDEADDEADDEADDEADp; 68 68 #endif 69 free(clh_node);70 69 unregister(curr_cluster, this); 71 70 ^self_cor{}; -
libcfa/src/startup.cfa
r6e2b04e r720f2fe2 63 63 64 64 struct __spinlock_t; 65 extern "C" { 66 void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {} 67 } 65 68 66 69 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.