- Timestamp:
- Mar 1, 2026, 5:47:54 PM (45 hours ago)
- Branches:
- master
- Children:
- 02e7483
- Parents:
- 8086004
- Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
-
coroutine.cfa (modified) (3 diffs)
-
coroutine.hfa (modified) (5 diffs)
-
preemption.cfa (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/coroutine.cfa
r8086004 r0957f62 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 25 06:48:19 202513 // Update Count : 3112 // Last Modified On : Sun Mar 1 17:36:41 2026 13 // Update Count : 115 14 14 // 15 15 … … 326 326 void ^?{}( ehm_cleanup & this ) { free( this.ex ); } 327 327 328 void * stack_pointer( coroutine$ * cor ) libcfa_public { 329 if ( active_coroutine() == cor ) { // accessing myself ? 330 void * sp; // use my current stack value 331 #if defined( __i386__ ) 332 asm( "movl %%esp,%0" : "=m" (sp) : ); 333 #elif defined( __x86_64__ ) 334 asm( "movq %%rsp,%0" : "=m" (sp) : ); 335 #elif defined( __arm_64__ ) 336 asm( "mov x9, sp; str x9,%0" : "=m" (sp) : : "x9" ); 337 #else 338 #error Cforall : internal error, unsupported architecture 339 #endif 340 return sp; 341 } else { // accessing another coroutine 342 return cor->context.SP; 343 } // if 344 } // stackPointer 345 346 void * stack_pointer() libcfa_public { return stack_pointer( active_coroutine() ); } 347 348 #define xstr(s) str(s) 349 #define str(s) #s 350 #define STACK_ERROR 4 351 #define STACK_WARNING 16 352 353 void stack_verify( coroutine$ * cor ) libcfa_public { 354 void * sp = stack_pointer( cor ); // optimizations 355 struct __stack_t * cor_stack = __get_stack( cor ); 356 void * safelimit = (void *)((char *)cor_stack->limit + STACK_ERROR * 1024); // space needed for printing abort message and backtrace 357 358 if ( sp < safelimit ) { // must leave stack space to call abort 359 abort( "Stack overflow detected: stack pointer %p below safe limit %p.\n" 360 "Possible cause is allocation of large stack frame(s) and/or deep call stack.", 361 sp, safelimit ); 362 } else if ( sp < (void *)((char *)cor_stack->limit + STACK_WARNING * 1024) ) { // must leave stack space to call abort 363 #define STACK_WARNING_MSG "Cforall Runtime warning : within " xstr(STACK_WARNING) "K of stack limit.\n" 364 __cfaabi_bits_write( STDERR_FILENO, STACK_WARNING_MSG, sizeof( STACK_WARNING_MSG ) - 1 ); 365 } else if ( sp > cor_stack->base ) { 366 abort( "Stack underflow detected: stack pointer %p above base %p.\n" 367 "Possible cause is corrupted stack frame via overwriting memory.", 368 sp, cor_stack->base ); 369 } // if 370 } // verify 371 372 void stack_verify() libcfa_public { return stack_verify( active_coroutine() ); } 373 328 374 bool poll( coroutine$ * cor ) libcfa_public { 329 375 nonlocal_exception * nl_ex = pop_ehm_head( cor ); … … 351 397 // user facing ehm operations 352 398 forall(T & | is_coroutine(T)) { 399 void * stack_pointer( T & cor ) libcfa_public { return stack_pointer( get_coroutine( cor ) ); } 400 void stack_verify( T & cor ) libcfa_public { return stack_verify( get_coroutine( cor ) ); } 401 353 402 // enable/disable non-local exceptions 354 403 void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; } -
libcfa/src/concurrency/coroutine.hfa
r8086004 r0957f62 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 25 06:52:04 202513 // Update Count : 1512 // Last Modified On : Sun Mar 1 17:44:11 2026 13 // Update Count : 43 14 14 // 15 15 … … 98 98 } 99 99 100 void stack_verify( coroutine$ * cor ); 101 void stack_verify(); 102 100 103 // Private wrappers for context switch and stack creation 101 // Wrapper for co102 104 static inline void $ctx_switch( coroutine$ * src, coroutine$ * dst ) __attribute__((nonnull (1, 2))) { 103 105 // set state of current coroutine to inactive … … 110 112 /* paranoid */ verify( !athrd->corctx_flag ); 111 113 athrd->corctx_flag = true; 114 115 #if defined( __CFA_DEBUG__ ) 116 stack_verify( src ); // test on front side of context switch, backside is too late. 117 #endif // __CFA_DEBUG__ 112 118 113 119 // set new coroutine that task is executing … … 225 231 226 232 // non local ehm and coroutine utility routines 233 void * stack_pointer( coroutine$ * cor ); 234 void * stack_pointer(); 227 235 void enable_ehm(); 228 236 void disable_ehm(); … … 234 242 235 243 forall(T & | is_coroutine(T)) { 244 void * stack_pointer( T & cor ); 245 void stack_verify( T & cor ); 236 246 void enable_ehm( T & cor ); // enable checking non-local exceptions for cor via checked_poll 237 247 void disable_ehm( T & cor ); // disable checking non-local exceptions for cor via checked_poll -
libcfa/src/concurrency/preemption.cfa
r8086004 r0957f62 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 25 07:24:39 202513 // Update Count : 6 312 // Last Modified On : Sun Mar 1 10:00:18 2026 13 // Update Count : 68 14 14 // 15 15 … … 571 571 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); 572 572 573 #if defined( __CFA_DEBUG__ ) 574 stack_verify(); // good place to check for stack overflow 575 #endif // __CFA_DEBUG__ 576 573 577 // Sync flag : prevent recursive calls to the signal handler 574 578 __cfaabi_tls.preemption_state.in_progress = true; … … 591 595 #endif 592 596 593 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch597 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch 594 598 } 595 599
Note:
See TracChangeset
for help on using the changeset viewer.