Changes in / [17b6fc9:dd53f75]
- Files:
-
- 7 added
- 2 deleted
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
benchmark/io/http/main.cfa
r17b6fc9 rdd53f75 125 125 workers[i].flags = 0; 126 126 } 127 unpark( workers[i] __cfaabi_dbg_ctx2);127 unpark( workers[i] ); 128 128 } 129 129 printf("%d workers started on %d processors\n", options.clopts.nworkers, options.clopts.nprocs); -
benchmark/io/http/worker.cfa
r17b6fc9 rdd53f75 22 22 23 23 void main( Worker & this ) { 24 park( __cfaabi_dbg_ctx);24 park(); 25 25 /* paranoid */ assert( this.pipe[0] != -1 ); 26 26 /* paranoid */ assert( this.pipe[1] != -1 ); -
benchmark/io/readv.cfa
r17b6fc9 rdd53f75 54 54 55 55 void main( Reader & ) { 56 park( __cfaabi_dbg_ctx);56 park(); 57 57 /* paranoid */ assert( true == __atomic_load_n(&run, __ATOMIC_RELAXED) ); 58 58 … … 151 151 152 152 for(i; nthreads) { 153 unpark( threads[i] __cfaabi_dbg_ctx2);153 unpark( threads[i] ); 154 154 } 155 155 wait(duration, start, end, is_tty); -
benchmark/readyQ/yield.cfa
r17b6fc9 rdd53f75 32 32 33 33 void main( Yielder & this ) { 34 park( __cfaabi_dbg_ctx);34 park(); 35 35 /* paranoid */ assert( true == __atomic_load_n(&run, __ATOMIC_RELAXED) ); 36 36 … … 70 70 71 71 for(i; nthreads) { 72 unpark( threads[i] __cfaabi_dbg_ctx2);72 unpark( threads[i] ); 73 73 } 74 74 wait(duration, start, end, is_tty); -
doc/theses/thierry_delisle_PhD/code/readQ_example/proto-gui/main.cpp
r17b6fc9 rdd53f75 1 #include "thrdlib/thread.h "1 #include "thrdlib/thread.hpp" 2 2 3 3 #include <cassert> … … 5 5 #include <algorithm> 6 6 #include <atomic> 7 #include <iostream> 7 8 #include <memory> 8 9 #include <vector> 9 10 10 11 #include <getopt.h> 12 using thrdlib::thread_t; 13 14 15 extern __attribute__((aligned(128))) thread_local struct { 16 void * volatile this_thread; 17 void * volatile this_processor; 18 void * volatile this_stats; 19 20 struct { 21 volatile unsigned short disable_count; 22 volatile bool enabled; 23 volatile bool in_progress; 24 } preemption_state; 25 26 #if defined(__SIZEOF_INT128__) 27 __uint128_t rand_seed; 28 #else 29 uint64_t rand_seed; 30 #endif 31 struct { 32 uint64_t fwd_seed; 33 uint64_t bck_seed; 34 } ready_rng; 35 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 11 36 12 37 //-------------------- … … 36 61 assert( expected == reset ); 37 62 if( std::atomic_compare_exchange_strong( &state, &expected, self) ) { 38 thrdlib _park( self );63 thrdlib::park( self ); 39 64 ret = true; 40 65 goto END; … … 54 79 if( got == reset ) return false; 55 80 56 thrdlib _unpark( got );81 thrdlib::unpark( got ); 57 82 return true; 58 83 } … … 109 134 the_stats_thread = self; 110 135 fence(); 111 thrdlib _park( self );136 thrdlib::park( self ); 112 137 113 138 std::vector<bool> seen; … … 115 140 116 141 while(last_produced < nproduce) { 117 thrdlib _yield();142 thrdlib::yield(); 118 143 thrd_stats.stats.ran++; 119 144 if( last_produced > 0 ) seen.at(last_produced - 1) = true; … … 147 172 148 173 void Renderer( thread_t self ) { 149 thrdlib _unpark( the_stats_thread );174 thrdlib::unpark( the_stats_thread ); 150 175 for(unsigned i = 0; i < nproduce; i++) { 151 176 auto & frame = frames[i % nframes]; … … 178 203 fsize = 1000; 179 204 nproduce = 60; 205 206 const char * framework; 180 207 181 208 for(;;) { … … 196 223 case -1: 197 224 /* paranoid */ assert(optind <= argc); 225 if( optind == argc ) { 226 std::cerr << "Must specify a framework" << std::endl; 227 goto usage; 228 229 } 230 framework = argv[optind]; 198 231 goto run; 199 232 case 'b': … … 228 261 std::cerr << opt << std::endl; 229 262 usage: 230 std::cerr << "Usage: " << argv[0] << " [options] " << std::endl;263 std::cerr << "Usage: " << argv[0] << " [options] framework" << std::endl; 231 264 std::cerr << std::endl; 232 265 std::cerr << " -b, --buff=COUNT Number of frames to buffer" << std::endl; … … 237 270 } 238 271 run: 272 assert( framework ); 239 273 240 274 frames.reset(new Frame[nframes]); … … 246 280 std::cout << "(Buffering " << nframes << ")" << std::endl; 247 281 248 thrdlib _setproccnt(2 );249 250 thread_t stats = thrdlib _create( Stats);282 thrdlib::init( framework, 2 ); 283 284 thread_t stats = thrdlib::create( Stats ); 251 285 std::cout << "Created Stats Thread" << std::endl; 252 while( the_stats_thread == nullptr ) thrdlib_yield(); 286 while( the_stats_thread == nullptr ) thrdlib::yield(); 287 253 288 std::cout << "Creating Main Threads" << std::endl; 254 thread_t renderer = thrdlib_create( Renderer ); 255 // while(true); 256 thread_t simulator = thrdlib_create( Simulator ); 289 thread_t renderer = thrdlib::create( Renderer ); 290 thread_t simulator = thrdlib::create( Simulator ); 257 291 258 292 std::cout << "Running" << std::endl; 259 293 260 thrdlib_join( simulator ); 261 thrdlib_join( renderer ); 262 thrdlib_join( stats ); 294 thrdlib::join( simulator ); 295 thrdlib::join( renderer ); 296 thrdlib::join( stats ); 297 298 thrdlib::clean(); 263 299 264 300 std::cout << "----------" << std::endl; -
libcfa/src/bits/locks.hfa
r17b6fc9 rdd53f75 164 164 165 165 struct $thread; 166 extern void park( __cfaabi_dbg_ctx_param);167 extern void unpark( struct $thread * this __cfaabi_dbg_ctx_param2);166 extern void park( void ); 167 extern void unpark( struct $thread * this ); 168 168 static inline struct $thread * active_thread (); 169 169 … … 191 191 /* paranoid */ verify( expected == 0p ); 192 192 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 193 park( __cfaabi_dbg_ctx);193 park(); 194 194 return true; 195 195 } … … 210 210 else { 211 211 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 212 unpark( expected __cfaabi_dbg_ctx2);212 unpark( expected ); 213 213 return true; 214 214 } … … 244 244 /* paranoid */ verify( expected == 0p ); 245 245 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 246 park( __cfaabi_dbg_ctx);246 park(); 247 247 /* paranoid */ verify( this.ptr == 1p ); 248 248 return true; … … 256 256 struct $thread * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 257 257 if( got == 0p ) return false; 258 unpark( got __cfaabi_dbg_ctx2);258 unpark( got ); 259 259 return true; 260 260 } -
libcfa/src/concurrency/alarm.cfa
r17b6fc9 rdd53f75 130 130 131 131 register_self( &node ); 132 park( __cfaabi_dbg_ctx);132 park(); 133 133 134 134 /* paranoid */ verify( !node.set ); -
libcfa/src/concurrency/clib/cfathread.cfa
r17b6fc9 rdd53f75 34 34 extern "C" { 35 35 //-------------------- 36 // Basic thread manage nemt36 // Basic thread management 37 37 CRunner * cfathread_create( void (*main)( CRunner * ) ) { 38 38 return new( main ); … … 44 44 45 45 void cfathread_park( void ) { 46 park( __cfaabi_dbg_ctx);46 park(); 47 47 } 48 48 49 49 void cfathread_unpark( CRunner * thrd ) { 50 unpark( *thrd __cfaabi_dbg_ctx2);50 unpark( *thrd ); 51 51 } 52 52 -
libcfa/src/concurrency/clib/cfathread.h
r17b6fc9 rdd53f75 17 17 #include "invoke.h" 18 18 19 #if defined(__cforall) || defined(__cplu plus)19 #if defined(__cforall) || defined(__cplusplus) 20 20 extern "C" { 21 21 #endif … … 39 39 40 40 41 #if defined(__cforall) || defined(__cplu plus)41 #if defined(__cforall) || defined(__cplusplus) 42 42 } 43 43 #endif -
libcfa/src/concurrency/invoke.h
r17b6fc9 rdd53f75 93 93 94 94 }; 95 // Wrapper for gdb 96 struct cfathread_coroutine_t { struct $coroutine debug; }; 95 97 96 98 static inline struct __stack_t * __get_stack( struct $coroutine * cor ) { … … 129 131 struct __condition_node_t * dtor_node; 130 132 }; 133 // Wrapper for gdb 134 struct cfathread_monitor_t { struct $monitor debug; }; 131 135 132 136 struct __monitor_group_t { … … 186 190 } node; 187 191 188 #ifdef __CFA_DEBUG__ 189 // previous function to park/unpark the thread 190 const char * park_caller; 191 int park_result; 192 enum __Coroutine_State park_state; 193 bool park_stale; 194 const char * unpark_caller; 195 int unpark_result; 196 enum __Coroutine_State unpark_state; 197 bool unpark_stale; 192 #if defined( __CFA_WITH_VERIFY__ ) 193 unsigned long long canary; 198 194 #endif 199 195 }; 196 // Wrapper for gdb 197 struct cfathread_thread_t { struct $thread debug; }; 200 198 201 199 #ifdef __CFA_DEBUG__ -
libcfa/src/concurrency/io.cfa
r17b6fc9 rdd53f75 69 69 if( block ) { 70 70 enable_interrupts( __cfaabi_dbg_ctx ); 71 park( __cfaabi_dbg_ctx);71 park(); 72 72 disable_interrupts(); 73 73 } … … 97 97 98 98 if(nextt) { 99 unpark( nextt __cfaabi_dbg_ctx2);99 unpark( nextt ); 100 100 enable_interrupts( __cfaabi_dbg_ctx ); 101 101 return true; -
libcfa/src/concurrency/io/setup.cfa
r17b6fc9 rdd53f75 247 247 thrd.link.next = 0p; 248 248 thrd.link.prev = 0p; 249 __cfaabi_dbg_debug_do( thrd.unpark_stale = true );250 249 251 250 // Fixup the thread state -
libcfa/src/concurrency/kernel.cfa
r17b6fc9 rdd53f75 246 246 thrd_dst->state = Active; 247 247 248 __cfaabi_dbg_debug_do(249 thrd_dst->park_stale = true;250 thrd_dst->unpark_stale = true;251 )252 248 // Update global state 253 249 kernelTLS.this_thread = thrd_dst; … … 255 251 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 256 252 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst ); 253 /* paranoid */ verify( thrd_dst->context.SP ); 257 254 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 258 255 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor 256 /* paranoid */ verify( 0x0D15EA5E0D15EA5E == thrd_dst->canary ); 257 259 258 260 259 261 260 // set context switch to the thread that the processor is executing 262 verify( thrd_dst->context.SP );263 261 __cfactx_switch( &proc_cor->context, &thrd_dst->context ); 264 262 // when __cfactx_switch returns we are back in the processor coroutine 265 263 264 /* paranoid */ verify( 0x0D15EA5E0D15EA5E == thrd_dst->canary ); 266 265 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); 267 266 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 267 /* paranoid */ verify( thrd_dst->context.SP ); 268 268 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst ); 269 269 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 288 288 // The thread has halted, it should never be scheduled/run again 289 289 // We may need to wake someone up here since 290 unpark( this->destroyer __cfaabi_dbg_ctx2);290 unpark( this->destroyer ); 291 291 this->destroyer = 0p; 292 292 break RUNNING; … … 298 298 // set state of processor coroutine to active and the thread to inactive 299 299 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST); 300 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_ticket; )301 300 switch(old_ticket) { 302 301 case 1: … … 335 334 __x87_store; 336 335 #endif 337 verify( proc_cor->context.SP ); 336 /* paranoid */ verify( proc_cor->context.SP ); 337 /* paranoid */ verify( 0x0D15EA5E0D15EA5E == thrd_src->canary ); 338 338 __cfactx_switch( &thrd_src->context, &proc_cor->context ); 339 /* paranoid */ verify( 0x0D15EA5E0D15EA5E == thrd_src->canary ); 339 340 #if defined( __i386 ) || defined( __x86_64 ) 340 341 __x87_load; … … 368 369 /* paranoid */ #endif 369 370 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next ); 371 /* paranoid */ verify( 0x0D15EA5E0D15EA5E == thrd->canary ); 372 370 373 371 374 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; … … 404 407 405 408 // KERNEL ONLY unpark with out disabling interrupts 406 void __unpark( struct __processor_id_t * id, $thread * thrd __cfaabi_dbg_ctx_param2 ) { 407 // record activity 408 __cfaabi_dbg_record_thrd( *thrd, false, caller ); 409 409 void __unpark( struct __processor_id_t * id, $thread * thrd ) { 410 410 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 411 __cfaabi_dbg_debug_do( thrd->unpark_result = old_ticket; thrd->unpark_state = thrd->state; )412 411 switch(old_ticket) { 413 412 case 1: … … 427 426 } 428 427 429 void unpark( $thread * thrd __cfaabi_dbg_ctx_param2) {428 void unpark( $thread * thrd ) { 430 429 if( !thrd ) return; 431 430 432 431 disable_interrupts(); 433 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd __cfaabi_dbg_ctx_fwd2);432 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd ); 434 433 enable_interrupts( __cfaabi_dbg_ctx ); 435 434 } 436 435 437 void park( __cfaabi_dbg_ctx_param) {436 void park( void ) { 438 437 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 439 438 disable_interrupts(); 440 439 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 441 440 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 442 443 // record activity444 __cfaabi_dbg_record_thrd( *kernelTLS.this_thread, true, caller );445 441 446 442 returnToKernel(); … … 650 646 // atomically release spin lock and block 651 647 unlock( lock ); 652 park( __cfaabi_dbg_ctx);648 park(); 653 649 return true; 654 650 } … … 671 667 672 668 // make new owner 673 unpark( thrd __cfaabi_dbg_ctx2);669 unpark( thrd ); 674 670 675 671 return thrd != 0p; … … 682 678 count += diff; 683 679 for(release) { 684 unpark( pop_head( waiting ) __cfaabi_dbg_ctx2);680 unpark( pop_head( waiting ) ); 685 681 } 686 682 … … 698 694 this.prev_thrd = kernelTLS.this_thread; 699 695 } 700 701 void __cfaabi_dbg_record_thrd($thread & this, bool park, const char prev_name[]) {702 if(park) {703 this.park_caller = prev_name;704 this.park_stale = false;705 }706 else {707 this.unpark_caller = prev_name;708 this.unpark_stale = false;709 }710 }711 696 } 712 697 ) -
libcfa/src/concurrency/kernel/fwd.hfa
r17b6fc9 rdd53f75 118 118 119 119 extern "Cforall" { 120 extern void park( __cfaabi_dbg_ctx_param);121 extern void unpark( struct $thread * this __cfaabi_dbg_ctx_param2);120 extern void park( void ); 121 extern void unpark( struct $thread * this ); 122 122 static inline struct $thread * active_thread () { return TL_GET( this_thread ); } 123 123 -
libcfa/src/concurrency/kernel/startup.cfa
r17b6fc9 rdd53f75 451 451 link.next = 0p; 452 452 link.prev = 0p; 453 #if defined( __CFA_WITH_VERIFY__ ) 454 canary = 0x0D15EA5E0D15EA5E; 455 #endif 453 456 454 457 node.next = 0p; -
libcfa/src/concurrency/kernel_private.hfa
r17b6fc9 rdd53f75 64 64 65 65 // KERNEL ONLY unpark with out disabling interrupts 66 void __unpark( struct __processor_id_t *, $thread * thrd __cfaabi_dbg_ctx_param2);66 void __unpark( struct __processor_id_t *, $thread * thrd ); 67 67 68 68 static inline bool __post(single_sem & this, struct __processor_id_t * id) { … … 77 77 else { 78 78 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 79 __unpark( id, expected __cfaabi_dbg_ctx2);79 __unpark( id, expected ); 80 80 return true; 81 81 } -
libcfa/src/concurrency/monitor.cfa
r17b6fc9 rdd53f75 122 122 123 123 unlock( this->lock ); 124 park( __cfaabi_dbg_ctx);124 park(); 125 125 126 126 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); … … 201 201 // Release the next thread 202 202 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 203 unpark( urgent->owner->waiting_thread __cfaabi_dbg_ctx2);203 unpark( urgent->owner->waiting_thread ); 204 204 205 205 // Park current thread waiting 206 park( __cfaabi_dbg_ctx);206 park(); 207 207 208 208 // Some one was waiting for us, enter … … 222 222 223 223 // Park current thread waiting 224 park( __cfaabi_dbg_ctx);224 park(); 225 225 226 226 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); … … 264 264 //We need to wake-up the thread 265 265 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 266 unpark( new_owner __cfaabi_dbg_ctx2);266 unpark( new_owner ); 267 267 } 268 268 … … 493 493 // Wake the threads 494 494 for(int i = 0; i < thread_count; i++) { 495 unpark( threads[i] __cfaabi_dbg_ctx2);495 unpark( threads[i] ); 496 496 } 497 497 498 498 // Everything is ready to go to sleep 499 park( __cfaabi_dbg_ctx);499 park(); 500 500 501 501 // We are back, restore the owners and recursions … … 575 575 576 576 // unpark the thread we signalled 577 unpark( signallee __cfaabi_dbg_ctx2);577 unpark( signallee ); 578 578 579 579 //Everything is ready to go to sleep 580 park( __cfaabi_dbg_ctx);580 park(); 581 581 582 582 … … 679 679 680 680 // unpark the thread we signalled 681 unpark( next __cfaabi_dbg_ctx2);681 unpark( next ); 682 682 683 683 //Everything is ready to go to sleep 684 park( __cfaabi_dbg_ctx);684 park(); 685 685 686 686 // We are back, restore the owners and recursions … … 724 724 725 725 //Everything is ready to go to sleep 726 park( __cfaabi_dbg_ctx);726 park(); 727 727 728 728 -
libcfa/src/concurrency/mutex.cfa
r17b6fc9 rdd53f75 42 42 append( blocked_threads, kernelTLS.this_thread ); 43 43 unlock( lock ); 44 park( __cfaabi_dbg_ctx);44 park(); 45 45 } 46 46 else { … … 65 65 this.is_locked = (this.blocked_threads != 0); 66 66 unpark( 67 pop_head( this.blocked_threads ) __cfaabi_dbg_ctx267 pop_head( this.blocked_threads ) 68 68 ); 69 69 unlock( this.lock ); … … 97 97 append( blocked_threads, kernelTLS.this_thread ); 98 98 unlock( lock ); 99 park( __cfaabi_dbg_ctx);99 park(); 100 100 } 101 101 } … … 124 124 owner = thrd; 125 125 recursion_count = (thrd ? 1 : 0); 126 unpark( thrd __cfaabi_dbg_ctx2);126 unpark( thrd ); 127 127 } 128 128 unlock( lock ); … … 142 142 lock( lock __cfaabi_dbg_ctx2 ); 143 143 unpark( 144 pop_head( this.blocked_threads ) __cfaabi_dbg_ctx2144 pop_head( this.blocked_threads ) 145 145 ); 146 146 unlock( lock ); … … 151 151 while(this.blocked_threads) { 152 152 unpark( 153 pop_head( this.blocked_threads ) __cfaabi_dbg_ctx2153 pop_head( this.blocked_threads ) 154 154 ); 155 155 } … … 161 161 append( this.blocked_threads, kernelTLS.this_thread ); 162 162 unlock( this.lock ); 163 park( __cfaabi_dbg_ctx);163 park(); 164 164 } 165 165 … … 170 170 unlock(l); 171 171 unlock(this.lock); 172 park( __cfaabi_dbg_ctx);172 park(); 173 173 lock(l); 174 174 } -
libcfa/src/concurrency/preemption.cfa
r17b6fc9 rdd53f75 274 274 kernelTLS.this_stats = this->curr_cluster->stats; 275 275 #endif 276 __unpark( id, this __cfaabi_dbg_ctx2);276 __unpark( id, this ); 277 277 } 278 278 -
libcfa/src/concurrency/thread.cfa
r17b6fc9 rdd53f75 39 39 link.prev = 0p; 40 40 link.preferred = -1; 41 #if defined( __CFA_WITH_VERIFY__ ) 42 canary = 0x0D15EA5E0D15EA5E; 43 #endif 41 44 42 45 node.next = 0p; … … 48 51 49 52 void ^?{}($thread& this) with( this ) { 53 #if defined( __CFA_WITH_VERIFY__ ) 54 canary = 0xDEADDEADDEADDEAD; 55 #endif 50 56 unregister(curr_cluster, this); 51 57 ^self_cor{}; -
libcfa/src/concurrency/thread.hfa
r17b6fc9 rdd53f75 88 88 //---------- 89 89 // Park thread: block until corresponding call to unpark, won't block if unpark is already called 90 void park( __cfaabi_dbg_ctx_param);90 void park( void ); 91 91 92 92 //---------- 93 93 // Unpark a thread, if the thread is already blocked, schedule it 94 94 // if the thread is not yet block, signal that it should rerun immediately 95 void unpark( $thread * this __cfaabi_dbg_ctx_param2);95 void unpark( $thread * this ); 96 96 97 97 forall( dtype T | is_thread(T) ) 98 static inline void unpark( T & this __cfaabi_dbg_ctx_param2 ) { if(!&this) return; unpark( get_thread( this ) __cfaabi_dbg_ctx_fwd2);}98 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );} 99 99 100 100 //---------- -
tests/concurrent/park/contention.cfa
r17b6fc9 rdd53f75 21 21 if(blocked[idx]) { 22 22 Thread * thrd = __atomic_exchange_n(&blocked[idx], 0p, __ATOMIC_SEQ_CST); 23 unpark( *thrd __cfaabi_dbg_ctx2);23 unpark( *thrd ); 24 24 } else { 25 25 Thread * thrd = __atomic_exchange_n(&blocked[idx], &this, __ATOMIC_SEQ_CST); 26 unpark( *thrd __cfaabi_dbg_ctx2);27 park( __cfaabi_dbg_ctx);26 unpark( *thrd ); 27 park(); 28 28 } 29 29 } … … 41 41 int idx = myrand() % blocked_size; 42 42 Thread * thrd = __atomic_exchange_n(&blocked[idx], 0p, __ATOMIC_SEQ_CST); 43 unpark( *thrd __cfaabi_dbg_ctx2);43 unpark( *thrd ); 44 44 yield( myrand() % 20 ); 45 45 } -
tests/concurrent/park/force_preempt.cfa
r17b6fc9 rdd53f75 30 30 31 31 // Unpark this thread, don't force a yield 32 unpark( this __cfaabi_dbg_ctx2);32 unpark( this ); 33 33 assert(mask == 0xCAFEBABA); 34 34 … … 43 43 // Park this thread, 44 44 assert(mask == (id_hash ^ 0xCAFEBABA)); 45 park( __cfaabi_dbg_ctx);45 park(); 46 46 assert(mask == (id_hash ^ 0xCAFEBABA)); 47 47 -
tests/concurrent/park/start_parked.cfa
r17b6fc9 rdd53f75 3 3 thread Parker {}; 4 4 void main( Parker & ) { 5 park( __cfaabi_dbg_ctx);5 park(); 6 6 } 7 7 … … 9 9 for(1000) { 10 10 Parker parker; 11 unpark( parker __cfaabi_dbg_ctx2);11 unpark( parker ); 12 12 } 13 13 printf( "done\n" ); // non-empty .expect file
Note: See TracChangeset
for help on using the changeset viewer.