Changeset 633a642
- Timestamp:
- Jan 30, 2018, 4:52:54 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 1449d83, 5ff188f
- Parents:
- 320eb73a (diff), 7416d46a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/bits/containers.h
r320eb73a r633a642 140 140 141 141 #ifdef __cforall 142 142 143 forall(dtype T | is_node(T)) 143 static inline void ?{}( __queue(T) & this ) {144 (this.head){ NULL };145 (this.tail){ &this.head };144 static inline void ?{}( __queue(T) & this ) with( this ) { 145 head{ NULL }; 146 tail{ &head }; 146 147 } 147 148 148 149 forall(dtype T | is_node(T) | sized(T)) 149 static inline void append( __queue(T) & this, T * val ) {150 verify(t his.tail != NULL);151 *t his.tail = val;152 t his.tail = &get_next( *val );150 static inline void append( __queue(T) & this, T * val ) with( this ) { 151 verify(tail != NULL); 152 *tail = val; 153 tail = &get_next( *val ); 153 154 } 154 155 … … 167 168 168 169 forall(dtype T | is_node(T) | sized(T)) 169 static inline T * remove( __queue(T) & this, T ** it ) {170 static inline T * remove( __queue(T) & this, T ** it ) with( this ) { 170 171 T * val = *it; 171 172 verify( val ); … … 173 174 (*it) = get_next( *val ); 174 175 175 if( t his.tail == &get_next( *val ) ) {176 t his.tail = it;176 if( tail == &get_next( *val ) ) { 177 tail = it; 177 178 } 178 179 179 180 get_next( *val ) = NULL; 180 181 181 verify( ( this.head == NULL) == (&this.head == this.tail) );182 verify( *t his.tail == NULL );182 verify( (head == NULL) == (&head == tail) ); 183 verify( *tail == NULL ); 183 184 return val; 184 185 } 185 186 #endif 187 188 //----------------------------------------------------------------------------- 189 // Tools 190 //----------------------------------------------------------------------------- 191 #ifdef __cforall 192 193 #endif -
src/libcfa/concurrency/coroutine.c
r320eb73a r633a642 118 118 } //ctxSwitchDirect 119 119 120 void create_stack( coStack_t* this, unsigned int storageSize ) {120 void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) { 121 121 //TEMP HACK do this on proper kernel startup 122 122 if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE ); … … 124 124 size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment 125 125 126 if ( (intptr_t) this->storage == 0 ) {127 this->userStack = false;128 this->size = libCeiling( storageSize, 16 );126 if ( (intptr_t)storage == 0 ) { 127 userStack = false; 128 size = libCeiling( storageSize, 16 ); 129 129 // use malloc/memalign because "new" raises an exception for out-of-memory 130 130 131 131 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 132 __cfaabi_dbg_debug_do( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) );133 __cfaabi_dbg_no_debug_do( this->storage = malloc( cxtSize + this->size + 8 ) );132 __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) ); 133 __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) ); 134 134 135 135 __cfaabi_dbg_debug_do( 136 if ( mprotect( this->storage, pageSize, PROT_NONE ) == -1 ) {136 if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) { 137 137 abortf( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) ); 138 138 } // if 139 139 ); 140 140 141 if ( (intptr_t) this->storage == 0 ) {142 abortf( "Attempt to allocate %d bytes of storage for coroutine or task execution-state but insufficient memory available.", this->size );141 if ( (intptr_t)storage == 0 ) { 142 abortf( "Attempt to allocate %d bytes of storage for coroutine or task execution-state but insufficient memory available.", size ); 143 143 } // if 144 144 145 __cfaabi_dbg_debug_do( this->limit = (char *)this->storage + pageSize );146 __cfaabi_dbg_no_debug_do( this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ) ); // minimum alignment145 __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize ); 146 __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment 147 147 148 148 } else { 149 assertf( ((size_t) this->storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", this->storage, (int)libAlign() );150 this->userStack = true;151 this->size = storageSize - cxtSize;149 assertf( ((size_t)storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() ); 150 userStack = true; 151 size = storageSize - cxtSize; 152 152 153 if ( this->size % 16 != 0u ) this->size -= 8;153 if ( size % 16 != 0u ) size -= 8; 154 154 155 this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ); // minimum alignment155 limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment 156 156 } // if 157 assertf( this->size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", this->size, MinStackSize );157 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize ); 158 158 159 this->base = (char *)this->limit + this->size;160 this->context = this->base;161 t his->top = (char *)this->context + cxtSize;159 base = (char *)limit + size; 160 context = base; 161 top = (char *)context + cxtSize; 162 162 } 163 163 -
src/libcfa/concurrency/kernel.c
r320eb73a r633a642 87 87 } 88 88 89 void ?{}( coStack_t & this, current_stack_info_t * info) {90 this.size= info->size;91 this.storage= info->storage;92 this.limit= info->limit;93 this.base= info->base;94 this.context= info->context;95 t his.top= info->top;96 this.userStack = true;97 } 98 99 void ?{}( coroutine_desc & this, current_stack_info_t * info) {100 (this.stack){ info };101 this.name = "Main Thread";102 this.errno_ = 0;103 this.state = Start;104 this.starter = NULL;105 } 106 107 void ?{}( thread_desc & this, current_stack_info_t * info) {108 (this.self_cor){ info };89 void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) { 90 size = info->size; 91 storage = info->storage; 92 limit = info->limit; 93 base = info->base; 94 context = info->context; 95 top = info->top; 96 userStack = true; 97 } 98 99 void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) { 100 stack{ info }; 101 name = "Main Thread"; 102 errno_ = 0; 103 state = Start; 104 starter = NULL; 105 } 106 107 void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) { 108 self_cor{ info }; 109 109 } 110 110 … … 133 133 void ?{}(processor & this, cluster * cltr) { 134 134 this.cltr = cltr; 135 (this.terminated){ 0 };135 this.terminated{ 0 }; 136 136 this.do_terminate = false; 137 137 this.preemption_alarm = NULL; … … 143 143 void ?{}(processor & this, cluster * cltr, processorCtx_t & runner) { 144 144 this.cltr = cltr; 145 (this.terminated){ 0 };145 this.terminated{ 0 }; 146 146 this.do_terminate = false; 147 147 this.preemption_alarm = NULL; … … 154 154 } 155 155 156 void ^?{}(processor & this) {157 if( ! this.do_terminate ) {156 void ^?{}(processor & this) with( this ){ 157 if( ! do_terminate ) { 158 158 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this); 159 this.do_terminate = true;160 P( t his.terminated );161 pthread_join( this.kernel_thread, NULL );162 } 163 } 164 165 void ?{}(cluster & this) {166 (this.ready_queue){};167 ( this.ready_queue_lock ){};168 169 this.preemption = default_preemption();159 do_terminate = true; 160 P( terminated ); 161 pthread_join( kernel_thread, NULL ); 162 } 163 } 164 165 void ?{}(cluster & this) with( this ) { 166 ready_queue{}; 167 ready_queue_lock{}; 168 169 preemption = default_preemption(); 170 170 } 171 171 … … 240 240 // Once a thread has finished running, some of 241 241 // its final actions must be executed from the kernel 242 void finishRunning(processor * this) {243 if( this->finish.action_code == Release ) {242 void finishRunning(processor * this) with( this->finish ) { 243 if( action_code == Release ) { 244 244 verify( disable_preempt_count > 1 ); 245 unlock( * this->finish.lock );246 } 247 else if( this->finish.action_code == Schedule ) {248 ScheduleThread( th is->finish.thrd );249 } 250 else if( this->finish.action_code == Release_Schedule ) {245 unlock( *lock ); 246 } 247 else if( action_code == Schedule ) { 248 ScheduleThread( thrd ); 249 } 250 else if( action_code == Release_Schedule ) { 251 251 verify( disable_preempt_count > 1 ); 252 unlock( * this->finish.lock );253 ScheduleThread( th is->finish.thrd );254 } 255 else if( this->finish.action_code == Release_Multi ) {256 verify( disable_preempt_count > this->finish.lock_count );257 for(int i = 0; i < this->finish.lock_count; i++) {258 unlock( * this->finish.locks[i] );252 unlock( *lock ); 253 ScheduleThread( thrd ); 254 } 255 else if( action_code == Release_Multi ) { 256 verify( disable_preempt_count > lock_count ); 257 for(int i = 0; i < lock_count; i++) { 258 unlock( *locks[i] ); 259 259 } 260 260 } 261 else if( this->finish.action_code == Release_Multi_Schedule ) { 262 verify( disable_preempt_count > this->finish.lock_count ); 263 for(int i = 0; i < this->finish.lock_count; i++) { 264 unlock( *this->finish.locks[i] ); 261 else if( action_code == Release_Multi_Schedule ) { 262 for(int i = 0; i < lock_count; i++) { 263 unlock( *locks[i] ); 265 264 } 266 for(int i = 0; i < th is->finish.thrd_count; i++) {267 ScheduleThread( th is->finish.thrds[i] );265 for(int i = 0; i < thrd_count; i++) { 266 ScheduleThread( thrds[i] ); 268 267 } 269 268 } 270 269 else { 271 assert( this->finish.action_code == No_Action);270 assert(action_code == No_Action); 272 271 } 273 272 } … … 338 337 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 339 338 340 lock( this_processor->cltr->ready_queue_lock __cfaabi_dbg_ctx2 ); 341 append( this_processor->cltr->ready_queue, thrd ); 342 unlock( this_processor->cltr->ready_queue_lock ); 343 344 verify( disable_preempt_count > 0 ); 345 } 346 347 thread_desc * nextThread(cluster * this) { 348 verify( disable_preempt_count > 0 ); 349 lock( this->ready_queue_lock __cfaabi_dbg_ctx2 ); 350 thread_desc * head = pop_head( this->ready_queue ); 351 unlock( this->ready_queue_lock ); 339 with( *this_processor->cltr ) { 340 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 341 append( ready_queue, thrd ); 342 unlock( ready_queue_lock ); 343 } 344 345 verify( disable_preempt_count > 0 ); 346 } 347 348 thread_desc * nextThread(cluster * this) with( *this ) { 349 verify( disable_preempt_count > 0 ); 350 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 351 thread_desc * head = pop_head( ready_queue ); 352 unlock( ready_queue_lock ); 352 353 verify( disable_preempt_count > 0 ); 353 354 return head; … … 365 366 disable_interrupts(); 366 367 this_processor->finish.action_code = Release; 367 this_processor->finish.lock = lock;368 this_processor->finish.lock = lock; 368 369 369 370 verify( disable_preempt_count > 1 ); … … 375 376 376 377 void BlockInternal( thread_desc * thrd ) { 377 assert(thrd);378 378 disable_interrupts(); 379 assert( thrd->self_cor.state != Halted );380 379 this_processor->finish.action_code = Schedule; 381 this_processor->finish.thrd = thrd;380 this_processor->finish.thrd = thrd; 382 381 383 382 verify( disable_preempt_count > 0 ); … … 392 391 disable_interrupts(); 393 392 this_processor->finish.action_code = Release_Schedule; 394 this_processor->finish.lock = lock;395 this_processor->finish.thrd = thrd;393 this_processor->finish.lock = lock; 394 this_processor->finish.thrd = thrd; 396 395 397 396 verify( disable_preempt_count > 1 ); … … 405 404 disable_interrupts(); 406 405 this_processor->finish.action_code = Release_Multi; 407 this_processor->finish.locks = locks;408 this_processor->finish.lock_count = count;406 this_processor->finish.locks = locks; 407 this_processor->finish.lock_count = count; 409 408 410 409 verify( disable_preempt_count > 0 ); … … 418 417 disable_interrupts(); 419 418 this_processor->finish.action_code = Release_Multi_Schedule; 420 this_processor->finish.locks = locks;421 this_processor->finish.lock_count = lock_count;422 this_processor->finish.thrds = thrds;423 this_processor->finish.thrd_count = thrd_count;419 this_processor->finish.locks = locks; 420 this_processor->finish.lock_count = lock_count; 421 this_processor->finish.thrds = thrds; 422 this_processor->finish.thrd_count = thrd_count; 424 423 425 424 verify( disable_preempt_count > 0 ); … … 433 432 verify( disable_preempt_count > 0 ); 434 433 this_processor->finish.action_code = thrd ? Release_Schedule : Release; 435 this_processor->finish.lock = lock;436 this_processor->finish.thrd = thrd;434 this_processor->finish.lock = lock; 435 this_processor->finish.thrd = thrd; 437 436 438 437 suspend(); … … 590 589 void ^?{}(semaphore & this) {} 591 590 592 void P(semaphore & this) {593 lock( this.lock __cfaabi_dbg_ctx2 );594 this.count -= 1;595 if ( this.count < 0 ) {591 void P(semaphore & this) with( this ){ 592 lock( lock __cfaabi_dbg_ctx2 ); 593 count -= 1; 594 if ( count < 0 ) { 596 595 // queue current task 597 append( this.waiting, (thread_desc *)this_thread );596 append( waiting, (thread_desc *)this_thread ); 598 597 599 598 // atomically release spin lock and block 600 BlockInternal( & this.lock );599 BlockInternal( &lock ); 601 600 } 602 601 else { 603 unlock( this.lock );604 } 605 } 606 607 void V(semaphore & this) {602 unlock( lock ); 603 } 604 } 605 606 void V(semaphore & this) with( this ) { 608 607 thread_desc * thrd = NULL; 609 lock( this.lock __cfaabi_dbg_ctx2 );610 this.count += 1;611 if ( this.count <= 0 ) {608 lock( lock __cfaabi_dbg_ctx2 ); 609 count += 1; 610 if ( count <= 0 ) { 612 611 // remove task at head of waiting list 613 thrd = pop_head( this.waiting );614 } 615 616 unlock( this.lock );612 thrd = pop_head( waiting ); 613 } 614 615 unlock( lock ); 617 616 618 617 // make new owner -
src/libcfa/concurrency/preemption.c
r320eb73a r633a642 60 60 static pthread_t alarm_thread; // pthread handle to alarm thread 61 61 62 void ?{}(event_kernel_t & this) {63 (this.alarms){};64 (this.lock){};62 void ?{}(event_kernel_t & this) with( this ) { 63 alarms{}; 64 lock{}; 65 65 } 66 66 … … 149 149 // If counter reaches 0, execute any pending CtxSwitch 150 150 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 151 processor * proc= this_processor; // Cache the processor now since interrupts can start happening after the atomic add151 processor * proc = this_processor; // Cache the processor now since interrupts can start happening after the atomic add 152 152 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add 153 153 -
src/libcfa/concurrency/thread.c
r320eb73a r633a642 31 31 // Thread ctors and dtors 32 32 33 void ?{}(thread_desc& this) {34 (this.self_cor){};35 this.self_cor.name = "Anonymous Thread";36 this.self_mon.owner = &this;37 this.self_mon.recursion = 1;38 this.self_mon_p = &this.self_mon;39 this.next = NULL;33 void ?{}(thread_desc& this) with( this ) { 34 self_cor{}; 35 self_cor.name = "Anonymous Coroutine"; 36 self_mon.owner = &this; 37 self_mon.recursion = 1; 38 self_mon_p = &self_mon; 39 next = NULL; 40 40 __cfaabi_dbg_debug_do( 41 this.dbg_next = NULL;42 this.dbg_prev = NULL;41 dbg_next = NULL; 42 dbg_prev = NULL; 43 43 __cfaabi_dbg_thread_register(&this); 44 44 ) 45 45 46 (this.monitors){ &this.self_mon_p, 1, (fptr_t)0 };46 monitors{ &self_mon_p, 1, (fptr_t)0 }; 47 47 } 48 48 49 void ^?{}(thread_desc& this) { 50 __cfaabi_dbg_debug_do( 51 __cfaabi_dbg_thread_unregister(&this); 52 ) 53 ^(this.self_cor){}; 49 void ^?{}(thread_desc& this) with( this ) { 50 ^self_cor{}; 54 51 } 55 52 56 53 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } ) 57 void ?{}( scoped(T)& this ) {58 (this.handle){};59 __thrd_start( this.handle);54 void ?{}( scoped(T)& this ) with( this ) { 55 handle{}; 56 __thrd_start(handle); 60 57 } 61 58 62 59 forall( dtype T, ttype P | sized(T) | is_thread(T) | { void ?{}(T&, P); } ) 63 void ?{}( scoped(T)& this, P params ) {64 (this.handle){ params };65 __thrd_start( this.handle);60 void ?{}( scoped(T)& this, P params ) with( this ) { 61 handle{ params }; 62 __thrd_start(handle); 66 63 } 67 64 68 65 forall( dtype T | sized(T) | is_thread(T) ) 69 void ^?{}( scoped(T)& this ) {70 ^ (this.handle){};66 void ^?{}( scoped(T)& this ) with( this ) { 67 ^handle{}; 71 68 } 72 69 … … 76 73 void __thrd_start( T& this ) { 77 74 coroutine_desc* thrd_c = get_coroutine(this); 78 thread_desc *thrd_h = get_thread (this);75 thread_desc * thrd_h = get_thread (this); 79 76 thrd_c->last = this_coroutine; 80 77 -
src/libcfa/interpose.h
r320eb73a r633a642 16 16 #pragma once 17 17 18 void * interpose_symbol( const char* symbol, ,const char *version );18 void * interpose_symbol( const char* symbol, const char *version ); 19 19 20 20 extern __typeof__( abort ) libc_abort __attribute__(( noreturn )); -
src/tests/Makefile.am
r320eb73a r633a642 65 65 66 66 concurrency : 67 @+python test.py --debug=${debug} ${concurrent} ${concurrent_test}67 @+python test.py --debug=${debug} -Iconcurrent 68 68 69 69 .dummy : .dummy.c @CFA_BINDIR@/@CFA_NAME@ -
src/tests/Makefile.in
r320eb73a r633a642 743 743 744 744 concurrency : 745 @+python test.py --debug=${debug} ${concurrent} ${concurrent_test}745 @+python test.py --debug=${debug} -Iconcurrent 746 746 747 747 .dummy : .dummy.c @CFA_BINDIR@/@CFA_NAME@
Note: See TracChangeset
for help on using the changeset viewer.