- Timestamp:
- Nov 4, 2020, 2:56:30 PM (3 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- eeb5023
- Parents:
- 4b30e8cc (diff), a3f5208a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa
- Files:
-
- 1 added
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/prelude/builtins.c
r4b30e8cc rc28ea4e 9 9 // Author : Peter A. Buhr 10 10 // Created On : Fri Jul 21 16:21:03 2017 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Fri Oct 9 18:26:19202013 // Update Count : 11 011 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Oct 27 14:42:00 2020 13 // Update Count : 111 14 14 // 15 16 #define __cforall_builtins__ 15 17 16 18 // type that wraps a pointer and a destructor-like function - used in generating implicit destructor calls for struct members in user-defined functions -
libcfa/src/concurrency/clib/cfathread.cfa
r4b30e8cc rc28ea4e 59 59 void cfathread_setproccnt( int ncnt ) { 60 60 assert( ncnt >= 1 ); 61 adelete( proc_cnt, procs);61 adelete( procs ); 62 62 63 63 proc_cnt = ncnt - 1; -
libcfa/src/concurrency/coroutine.cfa
r4b30e8cc rc28ea4e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue May 26 22:06:09202013 // Update Count : 2 112 // Last Modified On : Fri Oct 23 23:05:24 2020 13 // Update Count : 22 14 14 // 15 15 … … 24 24 #include <unistd.h> 25 25 #include <sys/mman.h> // mprotect 26 extern "C" {27 // use this define to make unwind.h play nice, definitely a hack28 #define HIDE_EXPORTS29 26 #include <unwind.h> 30 #undef HIDE_EXPORTS31 }32 27 33 28 #include "kernel_private.hfa" 29 #include "exception.hfa" 34 30 35 31 #define __CFA_INVOKE_PRIVATE__ … … 49 45 FORALL_DATA_INSTANCE(CoroutineCancelled, (dtype coroutine_t), (coroutine_t)) 50 46 51 struct __cfaehm_node {52 struct _Unwind_Exception unwind_exception;53 struct __cfaehm_node * next;54 int handler_index;55 };56 57 47 forall(dtype T) 58 48 void mark_exception(CoroutineCancelled(T) *) {} … … 60 50 forall(dtype T) 61 51 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) { 52 dst->virtual_table = src->virtual_table; 62 53 dst->the_coroutine = src->the_coroutine; 63 54 dst->the_exception = src->the_exception; … … 74 65 verify( desc->cancellation ); 75 66 desc->state = Cancelled; 76 exception_t * except = (exception_t *)(1 + (__cfaehm_node *)desc->cancellation);67 exception_t * except = __cfaehm_cancellation_exception( desc->cancellation ); 77 68 78 69 // TODO: Remove explitate vtable set once trac#186 is fixed. … … 92 83 93 84 // minimum feasible stack size in bytes 94 #define MinStackSize 1000 85 static const size_t MinStackSize = 1000; 95 86 extern size_t __page_size; // architecture pagesize HACK, should go in proper runtime singleton 96 87 … … 217 208 size = libFloor(create_size - stack_data_size - diff, libAlign()); 218 209 } // if 219 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of % d bytes for a stack.", size, MinStackSize );210 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize ); 220 211 221 212 this->storage = (__stack_t *)((intptr_t)storage + size); -
libcfa/src/concurrency/exception.cfa
r4b30e8cc rc28ea4e 10 10 // Created On : Mon Aug 17 10:41:00 2020 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Aug 25 14:41:00 202013 // Update Count : 012 // Last Modified On : Wed Oct 28 14:34:00 2020 13 // Update Count : 1 14 14 // 15 15 16 extern "C" { 17 // use this define to make unwind.h play nice, definitely a hack 18 #define HIDE_EXPORTS 19 #include <unwind.h> 20 #undef HIDE_EXPORTS 21 } 16 #define __cforall_thread__ 22 17 23 #include "invoke.h"24 18 #include "exception.hfa" 19 25 20 #include "coroutine.hfa" 26 21 27 22 extern struct $thread * mainThread; 23 extern "C" { 24 extern void __cfactx_thrd_leave(); 25 } 28 26 29 27 // Common pattern for all the stop functions, wait until the end then act. … … 52 50 53 51 STOP_AT_END_FUNCTION(thread_cancelstop, 54 // TODO: Instead pass information to the joiner.55 abort();52 __cfactx_thrd_leave(); 53 __cabi_abort( "Resumed cancelled thread" ); 56 54 ) 57 55 … … 85 83 stop_param = (void *)0x22; 86 84 } else { 85 this_thread->self_cor.cancellation = unwind_exception; 86 87 87 stop_func = thread_cancelstop; 88 88 stop_param = this_thread; -
libcfa/src/concurrency/exception.hfa
r4b30e8cc rc28ea4e 16 16 #pragma once 17 17 18 // This is an internal bridge between the two modes and must be C compatable. 19 20 #include <unwind.h> 18 21 #include "bits/defs.hfa" 19 22 #include "invoke.h" 23 #include "exception.h" 20 24 21 25 #ifdef __cforall 22 26 extern "C" { 23 24 #define HIDE_EXPORTS25 27 #endif 26 #include "unwind.h"27 28 28 29 struct exception_context_t * this_exception_context(void) OPTIONAL_THREAD; … … 32 33 33 34 #ifdef __cforall 34 #undef HIDE_EXPORTS35 35 } 36 36 #endif -
libcfa/src/concurrency/invoke.h
r4b30e8cc rc28ea4e 157 157 158 158 // current execution status for coroutine 159 // Possible values are: 160 // - TICKET_BLOCKED (-1) thread is blocked 161 // - TICKET_RUNNING ( 0) thread is running 162 // - TICKET_UNBLOCK ( 1) thread should ignore next block 159 163 volatile int ticket; 160 164 enum __Coroutine_State state:8; -
libcfa/src/concurrency/io/call.cfa.in
r4b30e8cc rc28ea4e 47 47 #include "kernel/fwd.hfa" 48 48 49 #if defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_IO_DRAIN) && defined(CFA_HAVE_IOSQE_ASYNC) 50 #define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_DRAIN | IOSQE_ASYNC) 51 #elif defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_ASYNC) 52 #define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_ASYNC) 53 #elif defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_IO_DRAIN) 54 #define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_DRAIN) 55 #elif defined(CFA_HAVE_IOSQE_IO_DRAIN) && defined(CFA_HAVE_IOSQE_ASYNC) 56 #define REGULAR_FLAGS (IOSQE_IO_DRAIN | IOSQE_ASYNC) 57 #elif defined(CFA_HAVE_IOSQE_FIXED_FILE) 58 #define REGULAR_FLAGS (IOSQE_FIXED_FILE) 59 #elif defined(CFA_HAVE_IOSQE_IO_DRAIN) 60 #define REGULAR_FLAGS (IOSQE_IO_DRAIN) 61 #elif defined(CFA_HAVE_IOSQE_ASYNC) 62 #define REGULAR_FLAGS (IOSQE_ASYNC) 63 #else 64 #define REGULAR_FLAGS (0) 65 #endif 66 67 #if defined(CFA_HAVE_IOSQE_IO_LINK) && defined(CFA_HAVE_IOSQE_IO_HARDLINK) 68 #define LINK_FLAGS (IOSQE_IO_LINK | IOSQE_IO_HARDLINK) 69 #elif defined(CFA_HAVE_IOSQE_IO_LINK) 70 #define LINK_FLAGS (IOSQE_IO_LINK) 71 #elif defined(CFA_HAVE_IOSQE_IO_HARDLINK) 72 #define LINK_FLAGS (IOSQE_IO_HARDLINK) 73 #else 74 #define LINK_FLAGS (0) 75 #endif 76 77 #if defined(CFA_HAVE_SPLICE_F_FD_IN_FIXED) 78 #define SPLICE_FLAGS (SPLICE_F_FD_IN_FIXED) 79 #else 80 #define SPLICE_FLAGS (0) 81 #endif 49 static const __u8 REGULAR_FLAGS = 0 50 #if defined(CFA_HAVE_IOSQE_FIXED_FILE) 51 | IOSQE_FIXED_FILE 52 #endif 53 #if defined(CFA_HAVE_IOSQE_IO_DRAIN) 54 | IOSQE_IO_DRAIN 55 #endif 56 #if defined(CFA_HAVE_IOSQE_ASYNC) 57 | IOSQE_ASYNC 58 #endif 59 ; 60 61 static const __u32 LINK_FLAGS = 0 62 #if defined(CFA_HAVE_IOSQE_IO_LINK) 63 | IOSQE_IO_LINK 64 #endif 65 #if defined(CFA_HAVE_IOSQE_IO_HARDLINK) 66 | IOSQE_IO_HARDLINK 67 #endif 68 ; 69 70 static const __u32 SPLICE_FLAGS = 0 71 #if defined(CFA_HAVE_SPLICE_F_FD_IN_FIXED) 72 | SPLICE_F_FD_IN_FIXED 73 #endif 74 ; 82 75 83 76 extern [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data ); -
libcfa/src/concurrency/io/setup.cfa
r4b30e8cc rc28ea4e 149 149 id.full_proc = false; 150 150 id.id = doregister(&id); 151 kernelTLS.this_proc_id = &id; 151 152 __cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" ); 152 153 … … 180 181 kernelTLS.this_stats = io_ctx->self.curr_cluster->stats; 181 182 #endif 182 __post( io_ctx->sem, &id);183 post( io_ctx->sem ); 183 184 } 184 185 } … … 235 236 if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) { 236 237 237 ready_schedule_lock( (struct __processor_id_t *)active_processor());238 ready_schedule_lock(); 238 239 239 240 // This is the tricky case … … 250 251 // Fixup the thread state 251 252 thrd.state = Blocked; 252 thrd.ticket = 0;253 thrd.ticket = TICKET_BLOCKED; 253 254 thrd.preempted = __NO_PREEMPTION; 254 255 255 ready_schedule_unlock( (struct __processor_id_t *)active_processor());256 ready_schedule_unlock(); 256 257 257 258 // Pretend like the thread was blocked all along … … 275 276 } 276 277 } else { 277 unpark( &thrd);278 post( this.thrd.sem ); 278 279 } 279 280 -
libcfa/src/concurrency/kernel.cfa
r4b30e8cc rc28ea4e 108 108 static $thread * __next_thread_slow(cluster * this); 109 109 static void __run_thread(processor * this, $thread * dst); 110 static void __wake_one( struct __processor_id_t * id,cluster * cltr);110 static void __wake_one(cluster * cltr); 111 111 112 112 static void push (__cluster_idles & idles, processor & proc); … … 252 252 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst ); 253 253 /* paranoid */ verify( thrd_dst->context.SP ); 254 /* paranoid */ verify( thrd_dst->state != Halted ); 254 255 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 255 256 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor … … 281 282 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 282 283 // The thread was preempted, reschedule it and reset the flag 283 __schedule_thread( (__processor_id_t*)this,thrd_dst );284 __schedule_thread( thrd_dst ); 284 285 break RUNNING; 285 286 } … … 287 288 if(unlikely(thrd_dst->state == Halted)) { 288 289 // The thread has halted, it should never be scheduled/run again 289 // We may need to wake someone up here since 290 unpark( this->destroyer ); 291 this->destroyer = 0p; 290 // finish the thread 291 __thread_finish( thrd_dst ); 292 292 break RUNNING; 293 293 } … … 299 299 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST); 300 300 switch(old_ticket) { 301 case 1:301 case TICKET_RUNNING: 302 302 // This is case 1, the regular case, nothing more is needed 303 303 break RUNNING; 304 case 2:304 case TICKET_UNBLOCK: 305 305 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 306 306 // In this case, just run it again. … … 358 358 // Scheduler routines 359 359 // KERNEL ONLY 360 void __schedule_thread( struct __processor_id_t * id,$thread * thrd ) {360 void __schedule_thread( $thread * thrd ) { 361 361 /* paranoid */ verify( thrd ); 362 362 /* paranoid */ verify( thrd->state != Halted ); 363 363 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 364 /* paranoid */ verify( kernelTLS.this_proc_id ); 364 365 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 365 366 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, … … 374 375 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 375 376 376 ready_schedule_lock ( id);377 ready_schedule_lock(); 377 378 push( thrd->curr_cluster, thrd ); 378 __wake_one( id,thrd->curr_cluster);379 ready_schedule_unlock( id);379 __wake_one(thrd->curr_cluster); 380 ready_schedule_unlock(); 380 381 381 382 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 384 385 // KERNEL ONLY 385 386 static inline $thread * __next_thread(cluster * this) with( *this ) { 386 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 387 388 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 387 /* paranoid */ verify( kernelTLS.this_proc_id ); 388 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 389 390 ready_schedule_lock(); 389 391 $thread * thrd = pop( this ); 390 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 391 392 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 392 ready_schedule_unlock(); 393 394 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 395 /* paranoid */ verify( kernelTLS.this_proc_id ); 393 396 return thrd; 394 397 } … … 396 399 // KERNEL ONLY 397 400 static inline $thread * __next_thread_slow(cluster * this) with( *this ) { 398 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 399 400 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 401 /* paranoid */ verify( kernelTLS.this_proc_id ); 402 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 403 404 ready_schedule_lock(); 401 405 $thread * thrd = pop_slow( this ); 402 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 403 404 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 406 ready_schedule_unlock(); 407 408 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 409 /* paranoid */ verify( kernelTLS.this_proc_id ); 405 410 return thrd; 406 411 } 407 412 408 // KERNEL ONLY unpark with out disabling interrupts 409 void __unpark( struct __processor_id_t * id, $thread * thrd ) { 413 void unpark( $thread * thrd ) { 414 if( !thrd ) return; 415 416 /* paranoid */ verify( kernelTLS.this_proc_id ); 417 bool full = kernelTLS.this_proc_id->full_proc; 418 if(full) disable_interrupts(); 419 420 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 410 421 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 411 422 switch(old_ticket) { 412 case 1:423 case TICKET_RUNNING: 413 424 // Wake won the race, the thread will reschedule/rerun itself 414 425 break; 415 case 0:426 case TICKET_BLOCKED: 416 427 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 417 428 /* paranoid */ verify( thrd->state == Blocked ); 418 429 419 430 // Wake lost the race, 420 __schedule_thread( id,thrd );431 __schedule_thread( thrd ); 421 432 break; 422 433 default: 423 434 // This makes no sense, something is wrong abort 424 abort(); 425 } 426 } 427 428 void unpark( $thread * thrd ) { 429 if( !thrd ) return; 430 431 disable_interrupts(); 432 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd ); 433 enable_interrupts( __cfaabi_dbg_ctx ); 435 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name); 436 } 437 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 438 439 if(full) enable_interrupts( __cfaabi_dbg_ctx ); 440 /* paranoid */ verify( kernelTLS.this_proc_id ); 434 441 } 435 442 … … 448 455 } 449 456 450 // KERNEL ONLY 451 void __leave_thread() { 452 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 453 returnToKernel(); 454 abort(); 457 extern "C" { 458 // Leave the thread monitor 459 // last routine called by a thread. 460 // Should never return 461 void __cfactx_thrd_leave() { 462 $thread * thrd = TL_GET( this_thread ); 463 $monitor * this = &thrd->self_mon; 464 465 // Lock the monitor now 466 lock( this->lock __cfaabi_dbg_ctx2 ); 467 468 disable_interrupts(); 469 470 thrd->state = Halted; 471 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } 472 if( thrd != this->owner || this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 473 474 // Leave the thread 475 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 476 returnToKernel(); 477 abort(); 478 479 // Control flow should never reach here! 480 } 455 481 } 456 482 … … 486 512 //============================================================================================= 487 513 // Wake a thread from the front if there are any 488 static void __wake_one( struct __processor_id_t * id,cluster * this) {489 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 490 /* paranoid */ verify( ready_schedule_islocked( id) );514 static void __wake_one(cluster * this) { 515 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 516 /* paranoid */ verify( ready_schedule_islocked() ); 491 517 492 518 // Check if there is a sleeping processor … … 506 532 #endif 507 533 508 /* paranoid */ verify( ready_schedule_islocked( id) );534 /* paranoid */ verify( ready_schedule_islocked() ); 509 535 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 510 536 … … 709 735 this.print_halts = true; 710 736 } 737 738 void print_stats_now( cluster & this, int flags ) { 739 __print_stats( this.stats, this.print_stats, true, this.name, (void*)&this ); 740 } 711 741 #endif 712 742 // Local Variables: // -
libcfa/src/concurrency/kernel.hfa
r4b30e8cc rc28ea4e 79 79 // Handle to pthreads 80 80 pthread_t kernel_thread; 81 82 // RunThread data83 // Action to do after a thread is ran84 $thread * destroyer;85 81 86 82 // Preemption data -
libcfa/src/concurrency/kernel/fwd.hfa
r4b30e8cc rc28ea4e 35 35 extern "Cforall" { 36 36 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 37 struct $thread * volatile this_thread; 38 struct processor * volatile this_processor; 39 struct __stats_t * volatile this_stats; 37 struct $thread * volatile this_thread; 38 struct processor * volatile this_processor; 39 struct __processor_id_t * volatile this_proc_id; 40 struct __stats_t * volatile this_stats; 40 41 41 42 struct { -
libcfa/src/concurrency/kernel/startup.cfa
r4b30e8cc rc28ea4e 122 122 NULL, 123 123 NULL, 124 NULL, 124 125 { 1, false, false }, 125 126 }; … … 212 213 //initialize the global state variables 213 214 kernelTLS.this_processor = mainProcessor; 215 kernelTLS.this_proc_id = (__processor_id_t*)mainProcessor; 214 216 kernelTLS.this_thread = mainThread; 215 217 … … 227 229 // Add the main thread to the ready queue 228 230 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 229 __schedule_thread( (__processor_id_t *)mainProcessor,mainThread);231 __schedule_thread(mainThread); 230 232 231 233 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX … … 324 326 processor * proc = (processor *) arg; 325 327 kernelTLS.this_processor = proc; 328 kernelTLS.this_proc_id = (__processor_id_t*)proc; 326 329 kernelTLS.this_thread = 0p; 327 330 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; … … 441 444 442 445 static void ?{}( $thread & this, current_stack_info_t * info) with( this ) { 443 ticket = 1;446 ticket = TICKET_RUNNING; 444 447 state = Start; 445 448 self_cor{ info }; … … 474 477 this.cltr = &_cltr; 475 478 full_proc = true; 476 destroyer = 0p;477 479 do_terminate = false; 478 480 preemption_alarm = 0p; -
libcfa/src/concurrency/kernel_private.hfa
r4b30e8cc rc28ea4e 33 33 } 34 34 35 void __schedule_thread( struct __processor_id_t *,$thread * )35 void __schedule_thread( $thread * ) 36 36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__)) 37 __attribute__((nonnull ( 2)))37 __attribute__((nonnull (1))) 38 38 #endif 39 39 ; 40 40 41 // Block current thread andrelease/wake-up the following resources42 void __ leave_thread() __attribute__((noreturn));41 //release/wake-up the following resources 42 void __thread_finish( $thread * thrd ); 43 43 44 44 //----------------------------------------------------------------------------- … … 63 63 ) 64 64 65 // KERNEL ONLY unpark with out disabling interrupts 66 void __unpark( struct __processor_id_t *, $thread * thrd ); 67 68 static inline bool __post(single_sem & this, struct __processor_id_t * id) { 69 for() { 70 struct $thread * expected = this.ptr; 71 if(expected == 1p) return false; 72 if(expected == 0p) { 73 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 74 return false; 75 } 76 } 77 else { 78 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 79 __unpark( id, expected ); 80 return true; 81 } 82 } 83 } 84 } 65 #define TICKET_BLOCKED (-1) // thread is blocked 66 #define TICKET_RUNNING ( 0) // thread is running 67 #define TICKET_UNBLOCK ( 1) // thread should ignore next block 85 68 86 69 //----------------------------------------------------------------------------- … … 197 180 // Reader side : acquire when using the ready queue to schedule but not 198 181 // creating/destroying queues 199 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 200 unsigned iproc = proc->id; 201 /*paranoid*/ verify(data[iproc].handle == proc); 182 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 183 /*paranoid*/ verify( kernelTLS.this_proc_id ); 184 185 unsigned iproc = kernelTLS.this_proc_id->id; 186 /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id); 202 187 /*paranoid*/ verify(iproc < ready); 203 188 … … 221 206 } 222 207 223 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 224 unsigned iproc = proc->id; 225 /*paranoid*/ verify(data[iproc].handle == proc); 208 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 209 /*paranoid*/ verify( kernelTLS.this_proc_id ); 210 211 unsigned iproc = kernelTLS.this_proc_id->id; 212 /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id); 226 213 /*paranoid*/ verify(iproc < ready); 227 214 /*paranoid*/ verify(data[iproc].lock); … … 235 222 236 223 #ifdef __CFA_WITH_VERIFY__ 237 static inline bool ready_schedule_islocked( struct __processor_id_t * proc) { 224 static inline bool ready_schedule_islocked(void) { 225 /*paranoid*/ verify( kernelTLS.this_proc_id ); 226 __processor_id_t * proc = kernelTLS.this_proc_id; 238 227 return __scheduler_lock->data[proc->id].owned; 239 228 } -
libcfa/src/concurrency/monitor.cfa
r4b30e8cc rc28ea4e 281 281 } 282 282 283 extern "C" { 284 // Leave the thread monitor 285 // last routine called by a thread. 286 // Should never return 287 void __cfactx_thrd_leave() { 288 $thread * thrd = TL_GET( this_thread ); 289 $monitor * this = &thrd->self_mon; 290 291 // Lock the monitor now 292 lock( this->lock __cfaabi_dbg_ctx2 ); 293 294 disable_interrupts(); 295 296 thrd->state = Halted; 297 298 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 299 300 // Leaving a recursion level, decrement the counter 301 this->recursion -= 1; 302 303 // If we haven't left the last level of recursion 304 // it must mean there is an error 305 if( this->recursion != 0) { abort( "Thread internal monitor has unbalanced recursion" ); } 306 307 // Fetch the next thread, can be null 308 $thread * new_owner = next_thread( this ); 309 310 // Release the monitor lock 311 unlock( this->lock ); 312 313 // Unpark the next owner if needed 314 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 315 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 316 /* paranoid */ verify( ! kernelTLS.this_processor->destroyer ); 317 /* paranoid */ verify( thrd->state == Halted ); 318 319 kernelTLS.this_processor->destroyer = new_owner; 320 321 // Leave the thread 322 __leave_thread(); 323 324 // Control flow should never reach here! 325 } 326 } 327 328 // Join a thread 329 forall( dtype T | is_thread(T) ) 330 T & join( T & this ) { 331 $monitor * m = get_monitor(this); 332 void (*dtor)(T& mutex this) = ^?{}; 333 monitor_dtor_guard_t __guard = { &m, (fptr_t)dtor, true }; 334 { 335 return this; 336 } 283 void __thread_finish( $thread * thrd ) { 284 $monitor * this = &thrd->self_mon; 285 286 // Lock the monitor now 287 /* paranoid */ verify( this->lock.lock ); 288 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 289 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 290 /* paranoid */ verify( thrd->state == Halted ); 291 /* paranoid */ verify( this->recursion == 1 ); 292 293 // Leaving a recursion level, decrement the counter 294 this->recursion -= 1; 295 this->owner = 0p; 296 297 // Fetch the next thread, can be null 298 $thread * new_owner = next_thread( this ); 299 300 // Release the monitor lock 301 unlock( this->lock ); 302 303 // Unpark the next owner if needed 304 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 305 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 306 /* paranoid */ verify( thrd->state == Halted ); 307 unpark( new_owner ); 337 308 } 338 309 -
libcfa/src/concurrency/preemption.cfa
r4b30e8cc rc28ea4e 38 38 // FwdDeclarations : timeout handlers 39 39 static void preempt( processor * this ); 40 static void timeout( struct __processor_id_t * id,$thread * this );40 static void timeout( $thread * this ); 41 41 42 42 // FwdDeclarations : Signal handlers … … 91 91 92 92 // Tick one frame of the Discrete Event Simulation for alarms 93 static void tick_preemption( struct __processor_id_t * id) {93 static void tick_preemption(void) { 94 94 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 95 95 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading … … 109 109 } 110 110 else if( node->type == User ) { 111 timeout( id,node->thrd );111 timeout( node->thrd ); 112 112 } 113 113 else { 114 114 bool unpark_thd = node->callback(*node); 115 if (unpark_thd) timeout( id,node->thrd );115 if (unpark_thd) timeout( node->thrd ); 116 116 } 117 117 … … 274 274 275 275 // reserved for future use 276 static void timeout( struct __processor_id_t * id,$thread * this ) {276 static void timeout( $thread * this ) { 277 277 #if !defined( __CFA_NO_STATISTICS__ ) 278 278 kernelTLS.this_stats = this->curr_cluster->stats; 279 279 #endif 280 __unpark( id,this );280 unpark( this ); 281 281 } 282 282 … … 417 417 id.full_proc = false; 418 418 id.id = doregister(&id); 419 kernelTLS.this_proc_id = &id; 419 420 420 421 // Block sigalrms to control when they arrive … … 462 463 // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" ); 463 464 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 464 tick_preemption( &id);465 tick_preemption(); 465 466 unlock( event_kernel->lock ); 466 467 break; -
libcfa/src/concurrency/snzi.hfa
r4b30e8cc rc28ea4e 36 36 static inline void depart( __snzi_node_t & ); 37 37 38 #define __snzi_half -1 38 static const int __snzi_half = -1; 39 39 40 40 //-------------------------------------------------- -
libcfa/src/concurrency/thread.cfa
r4b30e8cc rc28ea4e 19 19 20 20 #include "kernel_private.hfa" 21 #include "exception.hfa" 21 22 22 23 #define __CFA_INVOKE_PRIVATE__ … … 28 29 context{ 0p, 0p }; 29 30 self_cor{ name, storage, storageSize }; 30 ticket = 1;31 ticket = TICKET_RUNNING; 31 32 state = Start; 32 33 preempted = __NO_PREEMPTION; … … 58 59 } 59 60 61 FORALL_DATA_INSTANCE(ThreadCancelled, (dtype thread_t), (thread_t)) 62 63 forall(dtype T) 64 void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src) { 65 dst->virtual_table = src->virtual_table; 66 dst->the_thread = src->the_thread; 67 dst->the_exception = src->the_exception; 68 } 69 70 forall(dtype T) 71 const char * msg(ThreadCancelled(T) *) { 72 return "ThreadCancelled"; 73 } 74 75 forall(dtype T) 76 static void default_thread_cancel_handler(ThreadCancelled(T) & ) { 77 abort( "Unhandled thread cancellation.\n" ); 78 } 79 80 forall(dtype T | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))) 81 void ?{}( thread_dtor_guard_t & this, 82 T & thrd, void(*defaultResumptionHandler)(ThreadCancelled(T) &)) { 83 $monitor * m = get_monitor(thrd); 84 void (*dtor)(T& mutex this) = ^?{}; 85 bool join = defaultResumptionHandler != (void(*)(ThreadCancelled(T)&))0; 86 (this.mg){&m, (void(*)())dtor, join}; 87 88 // After the guard set-up and any wait, check for cancellation. 89 $thread * desc = get_thread(thrd); 90 struct _Unwind_Exception * cancellation = desc->self_cor.cancellation; 91 if ( likely( 0p == cancellation ) ) { 92 return; 93 } else if ( Cancelled == desc->state ) { 94 return; 95 } 96 desc->state = Cancelled; 97 if (!join) { 98 defaultResumptionHandler = default_thread_cancel_handler; 99 } 100 101 ThreadCancelled(T) except; 102 // TODO: Remove explitate vtable set once trac#186 is fixed. 103 except.virtual_table = &get_exception_vtable(&except); 104 except.the_thread = &thrd; 105 except.the_exception = __cfaehm_cancellation_exception( cancellation ); 106 throwResume except; 107 108 except.the_exception->virtual_table->free( except.the_exception ); 109 free( cancellation ); 110 desc->self_cor.cancellation = 0p; 111 } 112 113 void ^?{}( thread_dtor_guard_t & this ) { 114 ^(this.mg){}; 115 } 116 60 117 //----------------------------------------------------------------------------- 61 118 // Starting and stopping threads … … 70 127 verify( this_thrd->context.SP ); 71 128 72 __schedule_thread( (__processor_id_t *)kernelTLS.this_processor, this_thrd);129 __schedule_thread( this_thrd ); 73 130 enable_interrupts( __cfaabi_dbg_ctx ); 74 131 } … … 93 150 } 94 151 152 //----------------------------------------------------------------------------- 153 forall(dtype T | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T))) 154 T & join( T & this ) { 155 thread_dtor_guard_t guard = { this, defaultResumptionHandler }; 156 return this; 157 } 158 95 159 // Local Variables: // 96 160 // mode: c // -
libcfa/src/concurrency/thread.hfa
r4b30e8cc rc28ea4e 22 22 #include "kernel.hfa" 23 23 #include "monitor.hfa" 24 #include "exception.hfa" 24 25 25 26 //----------------------------------------------------------------------------- 26 27 // thread trait 27 28 trait is_thread(dtype T) { 28 29 30 29 void ^?{}(T& mutex this); 30 void main(T& this); 31 $thread* get_thread(T& this); 31 32 }; 33 34 FORALL_DATA_EXCEPTION(ThreadCancelled, (dtype thread_t), (thread_t)) ( 35 thread_t * the_thread; 36 exception_t * the_exception; 37 ); 38 39 forall(dtype T) 40 void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src); 41 42 forall(dtype T) 43 const char * msg(ThreadCancelled(T) *); 32 44 33 45 // define that satisfies the trait without using the thread keyword … … 65 77 static inline void ?{}($thread & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 66 78 static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 79 80 struct thread_dtor_guard_t { 81 monitor_dtor_guard_t mg; 82 }; 83 84 forall( dtype T | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T)) ) 85 void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) ); 86 void ^?{}( thread_dtor_guard_t & this ); 67 87 68 88 //----------------------------------------------------------------------------- … … 108 128 //---------- 109 129 // join 110 forall( dtype T | is_thread(T) )130 forall( dtype T | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T)) ) 111 131 T & join( T & this ); 112 132 -
libcfa/src/exception.c
r4b30e8cc rc28ea4e 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Jun 26 15:13:00 2017 11 // Last Modified By : Peter A. Buhr12 // Last Modified On : Sat Aug 29 15:52:22202013 // Update Count : 3 411 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue Oct 27 16:27:00 2020 13 // Update Count : 35 14 14 // 15 15 … … 17 17 #include <stddef.h> // for size_t 18 18 19 #include <unwind.h> // for struct _Unwind_Exception {...}; 20 19 21 #include "exception.h" 20 22 21 23 #include <stdlib.h> 22 24 #include <stdio.h> 23 #include <unwind.h>24 25 #include <bits/debug.hfa> 25 26 #include "concurrency/invoke.h" … … 113 114 114 115 // MEMORY MANAGEMENT ========================================================= 115 116 struct __cfaehm_node {117 struct _Unwind_Exception unwind_exception;118 struct __cfaehm_node * next;119 int handler_index;120 };121 116 122 117 #define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node))) -
libcfa/src/exception.h
r4b30e8cc rc28ea4e 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // exception.h -- Builtins for exception handling.7 // exception.h -- Internal exception handling definitions. 8 8 // 9 9 // Author : Andrew Beach 10 10 // Created On : Mon Jun 26 15:11:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tue May 19 14:17:00 202013 // Update Count : 1 012 // Last Modified On : Tue Oct 27 14:45:00 2020 13 // Update Count : 11 14 14 // 15 15 16 16 #pragma once 17 17 18 // This could be considered several headers. All are internal to the exception 19 // system but needed to depending on whether they are C/Cforall code and 20 // whether or not they are part of the builtins. 18 21 19 22 #ifdef __cforall 20 23 extern "C" { 21 24 #endif 25 26 // Included in C code or the built-ins. 27 #if !defined(__cforall) || defined(__cforall_builtins__) 22 28 23 29 struct __cfaehm_base_exception_t; … … 47 53 // Function catches termination exceptions. 48 54 void __cfaehm_try_terminate( 49 50 51 55 void (*try_block)(), 56 void (*catch_block)(int index, exception_t * except), 57 int (*match_block)(exception_t * except)); 52 58 53 59 // Clean-up the exception in catch blocks. … … 56 62 // Data structure creates a list of resume handlers. 57 63 struct __cfaehm_try_resume_node { 58 59 64 struct __cfaehm_try_resume_node * next; 65 _Bool (*handler)(exception_t * except); 60 66 }; 61 67 62 68 // These act as constructor and destructor for the resume node. 63 69 void __cfaehm_try_resume_setup( 64 65 70 struct __cfaehm_try_resume_node * node, 71 _Bool (*handler)(exception_t * except)); 66 72 void __cfaehm_try_resume_cleanup( 67 73 struct __cfaehm_try_resume_node * node); 68 74 69 75 // Check for a standard way to call fake deconstructors. 70 76 struct __cfaehm_cleanup_hook {}; 71 77 78 #endif 79 80 // Included in C code and the library. 81 #if !defined(__cforall) || !defined(__cforall_builtins__) 82 struct __cfaehm_node { 83 struct _Unwind_Exception unwind_exception; 84 struct __cfaehm_node * next; 85 int handler_index; 86 }; 87 88 static inline exception_t * __cfaehm_cancellation_exception( 89 struct _Unwind_Exception * unwind_exception ) { 90 return (exception_t *)(1 + (struct __cfaehm_node *)unwind_exception); 91 } 92 #endif 93 72 94 #ifdef __cforall 73 95 } 96 97 // Built-ins not visible in C. 98 #if defined(__cforall_builtins__) 74 99 75 100 // Not all the built-ins can be expressed in C. These can't be … … 124 149 125 150 #endif 151 152 #endif -
libcfa/src/stdlib.cfa
r4b30e8cc rc28ea4e 58 58 59 59 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) 60 void adelete( size_t dim,T arr[] ) {60 void adelete( T arr[] ) { 61 61 if ( arr ) { // ignore null 62 size_t dim = malloc_size( arr ) / sizeof( T ); 62 63 for ( int i = dim - 1; i >= 0; i -= 1 ) { // reverse allocation order, must be unsigned 63 64 ^(arr[i]){}; // run destructor … … 68 69 69 70 forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } ) 70 void adelete( size_t dim,T arr[], Params rest ) {71 void adelete( T arr[], Params rest ) { 71 72 if ( arr ) { // ignore null 73 size_t dim = malloc_size( arr ) / sizeof( T ); 72 74 for ( int i = dim - 1; i >= 0; i -= 1 ) { // reverse allocation order, must be unsigned 73 75 ^(arr[i]){}; // run destructor -
libcfa/src/stdlib.hfa
r4b30e8cc rc28ea4e 263 263 // Cforall allocation/deallocation and constructor/destructor, array types 264 264 forall( dtype T | sized(T), ttype Params | { void ?{}( T &, Params ); } ) T * anew( size_t dim, Params p ); 265 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( size_t dim,T arr[] );266 forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } ) void adelete( size_t dim,T arr[], Params rest );265 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( T arr[] ); 266 forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } ) void adelete( T arr[], Params rest ); 267 267 268 268 //---------------------------------------
Note: See TracChangeset
for help on using the changeset viewer.