Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    r39fc03e re660761  
    1818
    1919//C Includes
    20 #include <stddef.h>
    2120#include <errno.h>
    22 #include <string.h>
    2321#include <stdio.h>
    24 #include <fenv.h>
    2522#include <signal.h>
    2623#include <unistd.h>
    27 #include <limits.h>                                                                             // PTHREAD_STACK_MIN
    28 #include <sys/mman.h>                                                                   // mprotect
    29 extern "C" {
    30 #include <sys/resource.h>
    31 }
    3224
    3325//CFA Includes
    34 #include "time.hfa"
    3526#include "kernel_private.hfa"
    3627#include "preemption.hfa"
    37 #include "startup.hfa"
    3828
    3929//Private includes
     
    4535// Some assembly required
    4636#if defined( __i386 )
    47         #define CtxGet( ctx )        \
    48                 __asm__ volatile (     \
    49                         "movl %%esp,%0\n"\
    50                         "movl %%ebp,%1\n"\
    51                         : "=rm" (ctx.SP),\
    52                                 "=rm" (ctx.FP) \
    53                 )
    54 
    5537        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
    5638        // fcw  : X87 FPU control word (preserved across function calls)
     
    7456
    7557#elif defined( __x86_64 )
    76         #define CtxGet( ctx )        \
    77                 __asm__ volatile (     \
    78                         "movq %%rsp,%0\n"\
    79                         "movq %%rbp,%1\n"\
    80                         : "=rm" (ctx.SP),\
    81                                 "=rm" (ctx.FP) \
    82                 )
    83 
    8458        #define __x87_store         \
    8559                uint32_t __mxcr;      \
     
    10276
    10377#elif defined( __ARM_ARCH )
    104 #define CtxGet( ctx ) __asm__ ( \
    105                 "mov %0,%%sp\n"   \
    106                 "mov %1,%%r11\n"   \
    107         : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    10878#else
    10979        #error unknown hardware architecture
    11080#endif
    11181
    112 //-----------------------------------------------------------------------------
    113 //Start and stop routine for the kernel, declared first to make sure they run first
    114 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    115 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     82extern $thread * mainThread;
     83extern processor * mainProcessor;
    11684
    11785//-----------------------------------------------------------------------------
     
    12088static bool __has_next_thread(cluster * this);
    12189static void __run_thread(processor * this, $thread * dst);
    122 static bool __wake_proc(processor *);
    12390static bool __wake_one(struct __processor_id_t * id, cluster * cltr);
    12491static void __halt(processor * this);
    125 
    126 //-----------------------------------------------------------------------------
    127 // Kernel storage
    128 KERNEL_STORAGE(cluster,              mainCluster);
    129 KERNEL_STORAGE(processor,            mainProcessor);
    130 KERNEL_STORAGE($thread,              mainThread);
    131 KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    132 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
    133 #if !defined(__CFA_NO_STATISTICS__)
    134 KERNEL_STORAGE(__stats_t, mainProcStats);
    135 #endif
    136 
    137 cluster              * mainCluster;
    138 processor            * mainProcessor;
    139 $thread              * mainThread;
    140 __scheduler_RWLock_t * __scheduler_lock;
    141 
    142 extern "C" {
    143         struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    144 }
    145 
    146 size_t __page_size = 0;
    147 
    148 //-----------------------------------------------------------------------------
    149 // Global state
    150 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) @= {
    151         NULL,                                                                                           // cannot use 0p
    152         NULL,
    153         NULL,
    154         { 1, false, false },
    155 };
    156 
    157 //-----------------------------------------------------------------------------
    158 // Struct to steal stack
    159 struct current_stack_info_t {
    160         __stack_t * storage;                                                            // pointer to stack object
    161         void * base;                                                                            // base of stack
    162         void * limit;                                                                           // stack grows towards stack limit
    163         void * context;                                                                         // address of cfa_context_t
    164 };
    165 
    166 void ?{}( current_stack_info_t & this ) {
    167         __stack_context_t ctx;
    168         CtxGet( ctx );
    169         this.base = ctx.FP;
    170 
    171         rlimit r;
    172         getrlimit( RLIMIT_STACK, &r);
    173         size_t size = r.rlim_cur;
    174 
    175         this.limit = (void *)(((intptr_t)this.base) - size);
    176         this.context = &storage_mainThreadCtx;
    177 }
    178 
    179 //-----------------------------------------------------------------------------
    180 // Main thread construction
    181 
    182 void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
    183         stack.storage = info->storage;
    184         with(*stack.storage) {
    185                 limit     = info->limit;
    186                 base      = info->base;
    187         }
    188         __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
    189         *istorage |= 0x1;
    190         name = "Main Thread";
    191         state = Start;
    192         starter = 0p;
    193         last = 0p;
    194         cancellation = 0p;
    195 }
    196 
    197 void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
    198         ticket = 1;
    199         state = Start;
    200         self_cor{ info };
    201         curr_cor = &self_cor;
    202         curr_cluster = mainCluster;
    203         self_mon.owner = &this;
    204         self_mon.recursion = 1;
    205         self_mon_p = &self_mon;
    206         link.next = 0p;
    207         link.prev = 0p;
    208 
    209         node.next = 0p;
    210         node.prev = 0p;
    211         doregister(curr_cluster, this);
    212 
    213         monitors{ &self_mon_p, 1, (fptr_t)0 };
    214 }
    215 
    216 //-----------------------------------------------------------------------------
    217 // Processor coroutine
    218 void ?{}(processorCtx_t & this) {
    219 
    220 }
    221 
    222 // Construct the processor context of non-main processors
    223 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
    224         (this.__cor){ info };
    225         this.proc = proc;
    226 }
    227 
    228 static void * __invoke_processor(void * arg);
    229 
    230 static init(processor & this, const char name[], cluster & _cltr) with( this ) {
    231         this.name = name;
    232         this.cltr = &_cltr;
    233         id = -1u;
    234         destroyer = 0p;
    235         do_terminate = false;
    236         preemption_alarm = 0p;
    237         pending_preemption = false;
    238 
    239         #if !defined(__CFA_NO_STATISTICS__)
    240                 print_stats = 0;
    241                 print_halts = false;
    242         #endif
    243 
    244         int target = __atomic_add_fetch( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    245 
    246         id = doregister((__processor_id_t*)&this);
    247 
    248         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    249         uint_fast32_t last_size = ready_mutate_lock();
    250 
    251                 // Adjust the ready queue size
    252                 ready_queue_grow( cltr, target );
    253 
    254         // Unlock the RWlock
    255         ready_mutate_unlock( last_size );
    256 
    257         __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
    258 }
    259 
    260 // Not a ctor, it just preps the destruction but should not destroy members
    261 void deinit(processor & this) {
    262 
    263         int target = __atomic_sub_fetch( &this.cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
    264 
    265         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    266         uint_fast32_t last_size = ready_mutate_lock();
    267 
    268                 // Adjust the ready queue size
    269                 ready_queue_shrink( this.cltr, target );
    270 
    271                 // Make sure we aren't on the idle queue
    272                 unsafe_remove( this.cltr->idles, &this );
    273 
    274         // Unlock the RWlock
    275         ready_mutate_unlock( last_size );
    276 
    277         // Finally we don't need the read_lock any more
    278         unregister((__processor_id_t*)&this);
    279 }
    280 
    281 void ?{}(processor & this, const char name[], cluster & _cltr) {
    282         ( this.idle ){};
    283         ( this.terminated ){ 0 };
    284         ( this.runner ){};
    285         init( this, name, _cltr );
    286 
    287         __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
    288 
    289         this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
    290 
    291 }
    292 
    293 void ^?{}(processor & this) with( this ){
    294         if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
    295                 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
    296 
    297                 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
    298                 __wake_proc( &this );
    299 
    300                 P( terminated );
    301                 verify( kernelTLS.this_processor != &this);
    302         }
    303 
    304         int err = pthread_join( kernel_thread, 0p );
    305         if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err));
    306 
    307         free( this.stack );
    308 
    309         deinit( this );
    310 }
    311 
    312 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned io_flags) with( this ) {
    313         this.name = name;
    314         this.preemption_rate = preemption_rate;
    315         this.nprocessors = 0;
    316         ready_queue{};
    317 
    318         #if !defined(__CFA_NO_STATISTICS__)
    319                 print_stats = 0;
    320                 stats = alloc();
    321                 __init_stats( stats );
    322         #endif
    323 
    324         threads{ __get };
    325 
    326         doregister(this);
    327 
    328         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    329         uint_fast32_t last_size = ready_mutate_lock();
    330 
    331                 // Adjust the ready queue size
    332                 ready_queue_grow( &this, 0 );
    333 
    334         // Unlock the RWlock
    335         ready_mutate_unlock( last_size );
    336 
    337 
    338         __kernel_io_startup( this, io_flags, &this == mainCluster );
    339 }
    340 
    341 void ^?{}(cluster & this) {
    342         __kernel_io_shutdown( this, &this == mainCluster );
    343 
    344         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    345         uint_fast32_t last_size = ready_mutate_lock();
    346 
    347                 // Adjust the ready queue size
    348                 ready_queue_shrink( &this, 0 );
    349 
    350         // Unlock the RWlock
    351         ready_mutate_unlock( last_size );
    352 
    353         #if !defined(__CFA_NO_STATISTICS__)
    354                 if( 0 != this.print_stats ) {
    355                         __print_stats( this.stats, this.print_stats, true, this.name, (void*)&this );
    356                 }
    357                 free( this.stats );
    358         #endif
    359 
    360         unregister(this);
    361 }
     92bool __wake_proc(processor *);
    36293
    36394//=============================================================================================
     
    550281}
    551282
    552 // KERNEL_ONLY
    553 // Context invoker for processors
    554 // This is the entry point for processors (kernel threads)
    555 // It effectively constructs a coroutine by stealing the pthread stack
    556 static void * __invoke_processor(void * arg) {
    557         #if !defined( __CFA_NO_STATISTICS__ )
    558                 __stats_t local_stats;
    559                 __init_stats( &local_stats );
    560                 kernelTLS.this_stats = &local_stats;
    561         #endif
    562 
    563         processor * proc = (processor *) arg;
    564         kernelTLS.this_processor = proc;
    565         kernelTLS.this_thread    = 0p;
    566         kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
    567         // SKULLDUGGERY: We want to create a context for the processor coroutine
    568         // which is needed for the 2-step context switch. However, there is no reason
    569         // to waste the perfectly valid stack create by pthread.
    570         current_stack_info_t info;
    571         __stack_t ctx;
    572         info.storage = &ctx;
    573         (proc->runner){ proc, &info };
    574 
    575         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
    576 
    577         //Set global state
    578         kernelTLS.this_thread = 0p;
    579 
    580         //We now have a proper context from which to schedule threads
    581         __cfadbg_print_safe(runtime_core, "Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
    582 
    583         // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
    584         // resume it to start it like it normally would, it will just context switch
    585         // back to here. Instead directly call the main since we already are on the
    586         // appropriate stack.
    587         get_coroutine(proc->runner)->state = Active;
    588         main( proc->runner );
    589         get_coroutine(proc->runner)->state = Halted;
    590 
    591         // Main routine of the core returned, the core is now fully terminated
    592         __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
    593 
    594         #if !defined(__CFA_NO_STATISTICS__)
    595                 __tally_stats(proc->cltr->stats, &local_stats);
    596                 if( 0 != proc->print_stats ) {
    597                         __print_stats( &local_stats, proc->print_stats, true, proc->name, (void*)proc );
    598                 }
    599         #endif
    600 
    601         return 0p;
    602 }
    603 
    604 static void Abort( int ret, const char func[] ) {
    605         if ( ret ) {                                                                            // pthread routines return errno values
    606                 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
    607         } // if
    608 } // Abort
    609 
    610 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
    611         pthread_attr_t attr;
    612 
    613         Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
    614 
    615         size_t stacksize;
    616         // default stack size, normally defined by shell limit
    617         Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
    618         assert( stacksize >= PTHREAD_STACK_MIN );
    619 
    620         void * stack;
    621         __cfaabi_dbg_debug_do(
    622                 stack = memalign( __page_size, stacksize + __page_size );
    623                 // pthread has no mechanism to create the guard page in user supplied stack.
    624                 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
    625                         abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
    626                 } // if
    627         );
    628         __cfaabi_dbg_no_debug_do(
    629                 stack = malloc( stacksize );
    630         );
    631 
    632         Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
    633 
    634         Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
    635         return stack;
    636 }
    637 
    638 // KERNEL_ONLY
    639 static void __kernel_first_resume( processor * this ) {
    640         $thread * src = mainThread;
    641         $coroutine * dst = get_coroutine(this->runner);
    642 
    643         verify( ! kernelTLS.preemption_state.enabled );
    644 
    645         kernelTLS.this_thread->curr_cor = dst;
    646         __stack_prepare( &dst->stack, 65000 );
    647         __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
    648 
    649         verify( ! kernelTLS.preemption_state.enabled );
    650 
    651         dst->last = &src->self_cor;
    652         dst->starter = dst->starter ? dst->starter : &src->self_cor;
    653 
    654         // make sure the current state is still correct
    655         /* paranoid */ verify(src->state == Ready);
    656 
    657         // context switch to specified coroutine
    658         verify( dst->context.SP );
    659         __cfactx_switch( &src->context, &dst->context );
    660         // when __cfactx_switch returns we are back in the src coroutine
    661 
    662         mainThread->curr_cor = &mainThread->self_cor;
    663 
    664         // make sure the current state has been update
    665         /* paranoid */ verify(src->state == Active);
    666 
    667         verify( ! kernelTLS.preemption_state.enabled );
    668 }
    669 
    670 // KERNEL_ONLY
    671 static void __kernel_last_resume( processor * this ) {
    672         $coroutine * src = &mainThread->self_cor;
    673         $coroutine * dst = get_coroutine(this->runner);
    674 
    675         verify( ! kernelTLS.preemption_state.enabled );
    676         verify( dst->starter == src );
    677         verify( dst->context.SP );
    678 
    679         // SKULLDUGGERY in debug the processors check that the
    680         // stack is still within the limit of the stack limits after running a thread.
    681         // that check doesn't make sense if we context switch to the processor using the
    682         // coroutine semantics. Since this is a special case, use the current context
    683         // info to populate these fields.
    684         __cfaabi_dbg_debug_do(
    685                 __stack_context_t ctx;
    686                 CtxGet( ctx );
    687                 mainThread->context.SP = ctx.SP;
    688                 mainThread->context.FP = ctx.FP;
    689         )
    690 
    691         // context switch to the processor
    692         __cfactx_switch( &src->context, &dst->context );
    693 }
    694 
    695283//-----------------------------------------------------------------------------
    696284// Scheduler routines
     
    834422
    835423//=============================================================================================
    836 // Kernel Setup logic
    837 //=============================================================================================
    838 //-----------------------------------------------------------------------------
    839 // Kernel boot procedures
    840 static void __kernel_startup(void) {
    841         verify( ! kernelTLS.preemption_state.enabled );
    842         __cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
    843 
    844         __page_size = sysconf( _SC_PAGESIZE );
    845 
    846         __cfa_dbg_global_clusters.list{ __get };
    847         __cfa_dbg_global_clusters.lock{};
    848 
    849         // Initialize the global scheduler lock
    850         __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
    851         (*__scheduler_lock){};
    852 
    853         // Initialize the main cluster
    854         mainCluster = (cluster *)&storage_mainCluster;
    855         (*mainCluster){"Main Cluster"};
    856 
    857         __cfadbg_print_safe(runtime_core, "Kernel : Main cluster ready\n");
    858 
    859         // Start by initializing the main thread
    860         // SKULLDUGGERY: the mainThread steals the process main thread
    861         // which will then be scheduled by the mainProcessor normally
    862         mainThread = ($thread *)&storage_mainThread;
    863         current_stack_info_t info;
    864         info.storage = (__stack_t*)&storage_mainThreadCtx;
    865         (*mainThread){ &info };
    866 
    867         __cfadbg_print_safe(runtime_core, "Kernel : Main thread ready\n");
    868 
    869 
    870 
    871         // Construct the processor context of the main processor
    872         void ?{}(processorCtx_t & this, processor * proc) {
    873                 (this.__cor){ "Processor" };
    874                 this.__cor.starter = 0p;
    875                 this.proc = proc;
    876         }
    877 
    878         void ?{}(processor & this) with( this ) {
    879                 ( this.idle ){};
    880                 ( this.terminated ){ 0 };
    881                 ( this.runner ){};
    882                 init( this, "Main Processor", *mainCluster );
    883                 kernel_thread = pthread_self();
    884 
    885                 runner{ &this };
    886                 __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
    887         }
    888 
    889         // Initialize the main processor and the main processor ctx
    890         // (the coroutine that contains the processing control flow)
    891         mainProcessor = (processor *)&storage_mainProcessor;
    892         (*mainProcessor){};
    893 
    894         //initialize the global state variables
    895         kernelTLS.this_processor = mainProcessor;
    896         kernelTLS.this_thread    = mainThread;
    897 
    898         #if !defined( __CFA_NO_STATISTICS__ )
    899                 kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats;
    900                 __init_stats( kernelTLS.this_stats );
    901         #endif
    902 
    903         // Enable preemption
    904         kernel_start_preemption();
    905 
    906         // Add the main thread to the ready queue
    907         // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    908         __schedule_thread((__processor_id_t *)mainProcessor, mainThread);
    909 
    910         // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    911         // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
    912         // mainThread is on the ready queue when this call is made.
    913         __kernel_first_resume( kernelTLS.this_processor );
    914 
    915 
    916         // THE SYSTEM IS NOW COMPLETELY RUNNING
    917 
    918 
    919         // Now that the system is up, finish creating systems that need threading
    920         __kernel_io_finish_start( *mainCluster );
    921 
    922 
    923         __cfadbg_print_safe(runtime_core, "Kernel : Started\n--------------------------------------------------\n\n");
    924 
    925         verify( ! kernelTLS.preemption_state.enabled );
    926         enable_interrupts( __cfaabi_dbg_ctx );
    927         verify( TL_GET( preemption_state.enabled ) );
    928 }
    929 
    930 static void __kernel_shutdown(void) {
    931         //Before we start shutting things down, wait for systems that need threading to shutdown
    932         __kernel_io_prepare_stop( *mainCluster );
    933 
    934         /* paranoid */ verify( TL_GET( preemption_state.enabled ) );
    935         disable_interrupts();
    936         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    937 
    938         __cfadbg_print_safe(runtime_core, "\n--------------------------------------------------\nKernel : Shutting down\n");
    939 
    940         // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
    941         // When its coroutine terminates, it return control to the mainThread
    942         // which is currently here
    943         __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    944         __kernel_last_resume( kernelTLS.this_processor );
    945         mainThread->self_cor.state = Halted;
    946 
    947         // THE SYSTEM IS NOW COMPLETELY STOPPED
    948 
    949         // Disable preemption
    950         kernel_stop_preemption();
    951 
    952         // Destroy the main processor and its context in reverse order of construction
    953         // These were manually constructed so we need manually destroy them
    954         void ^?{}(processor & this) with( this ){
    955                 deinit( this );
    956 
    957                 /* paranoid */ verify( this.do_terminate == true );
    958                 __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
    959         }
    960 
    961         ^(*mainProcessor){};
    962 
    963         // Final step, destroy the main thread since it is no longer needed
    964 
    965         // Since we provided a stack to this taxk it will not destroy anything
    966         /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
    967         ^(*mainThread){};
    968 
    969         ^(*mainCluster){};
    970 
    971         ^(*__scheduler_lock){};
    972 
    973         ^(__cfa_dbg_global_clusters.list){};
    974         ^(__cfa_dbg_global_clusters.lock){};
    975 
    976         __cfadbg_print_safe(runtime_core, "Kernel : Shutdown complete\n");
    977 }
    978 
    979 //=============================================================================================
    980424// Kernel Idle Sleep
    981425//=============================================================================================
     
    997441
    998442// Unconditionnaly wake a thread
    999 static bool __wake_proc(processor * this) {
     443bool __wake_proc(processor * this) {
    1000444        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    1001445
     
    1173617
    1174618//-----------------------------------------------------------------------------
    1175 // Global Queues
    1176 void doregister( cluster     & cltr ) {
    1177         lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
    1178         push_front( __cfa_dbg_global_clusters.list, cltr );
    1179         unlock    ( __cfa_dbg_global_clusters.lock );
    1180 }
    1181 
    1182 void unregister( cluster     & cltr ) {
    1183         lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
    1184         remove( __cfa_dbg_global_clusters.list, cltr );
    1185         unlock( __cfa_dbg_global_clusters.lock );
    1186 }
    1187 
    1188 void doregister( cluster * cltr, $thread & thrd ) {
    1189         lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    1190         cltr->nthreads += 1;
    1191         push_front(cltr->threads, thrd);
    1192         unlock    (cltr->thread_list_lock);
    1193 }
    1194 
    1195 void unregister( cluster * cltr, $thread & thrd ) {
    1196         lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    1197         remove(cltr->threads, thrd );
    1198         cltr->nthreads -= 1;
    1199         unlock(cltr->thread_list_lock);
    1200 }
    1201 
    1202 //-----------------------------------------------------------------------------
    1203619// Debug
    1204620__cfaabi_dbg_debug_do(
Note: See TracChangeset for help on using the changeset viewer.