Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/monitor.cfa

    r2026bb6 rae66348  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // monitor_desc.c --
     7// $monitor.c --
    88//
    99// Author           : Thierry Delisle
    1010// Created On       : Thd Feb 23 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 30 14:30:26 2018
    13 // Update Count     : 9
     12// Last Modified On : Wed Dec  4 07:55:14 2019
     13// Update Count     : 10
    1414//
    1515
     
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    31 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    32 static inline void reset_mask( monitor_desc * this );
    33 
    34 static inline thread_desc * next_thread( monitor_desc * this );
    35 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     29static inline void __set_owner ( $monitor * this, $thread * owner );
     30static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
     31static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     32static inline void reset_mask( $monitor * this );
     33
     34static inline $thread * next_thread( $monitor * this );
     35static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
    3636
    3737static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    38 static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     38static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    3939static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    40 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    41 
    42 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    43 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    44 
    45 static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    46 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 
    48 static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     40static inline void unlock_all( $monitor * locks [], __lock_size_t count );
     41
     42static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     43static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     44
     45static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     46static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47
     48static inline $thread *        check_condition   ( __condition_criterion_t * );
    4949static inline void                 brand_condition   ( condition & );
    50 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     50static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
    5151
    5252forall(dtype T | sized( T ))
    5353static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5454static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    55 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
     55static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
    5656
    5757//-----------------------------------------------------------------------------
     
    6868
    6969#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    70         monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     70        $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7171        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7272        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8080//-----------------------------------------------------------------------------
    8181// Enter/Leave routines
    82 
    83 
    84 extern "C" {
    85         // Enter single monitor
    86         static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    87                 // Lock the monitor spinlock
    88                 lock( this->lock __cfaabi_dbg_ctx2 );
    89                 // Interrupts disable inside critical section
    90                 thread_desc * thrd = kernelTLS.this_thread;
    91 
    92                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    93 
    94                 if( !this->owner ) {
    95                         // No one has the monitor, just take it
    96                         set_owner( this, thrd );
    97 
    98                         __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
    99                 }
    100                 else if( this->owner == thrd) {
    101                         // We already have the monitor, just note how many times we took it
    102                         this->recursion += 1;
    103 
    104                         __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
    105                 }
    106                 else if( is_accepted( this, group) ) {
    107                         // Some one was waiting for us, enter
    108                         set_owner( this, thrd );
    109 
    110                         // Reset mask
    111                         reset_mask( this );
    112 
    113                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
    114                 }
    115                 else {
    116                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    117 
    118                         // Some one else has the monitor, wait in line for it
    119                         append( this->entry_queue, thrd );
    120 
    121                         BlockInternal( &this->lock );
    122 
    123                         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    124 
    125                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    126                         return;
    127                 }
     82// Enter single monitor
     83static void __enter( $monitor * this, const __monitor_group_t & group ) {
     84        // Lock the monitor spinlock
     85        lock( this->lock __cfaabi_dbg_ctx2 );
     86        // Interrupts disable inside critical section
     87        $thread * thrd = kernelTLS.this_thread;
     88
     89        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     90
     91        if( !this->owner ) {
     92                // No one has the monitor, just take it
     93                __set_owner( this, thrd );
     94
     95                __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     96        }
     97        else if( this->owner == thrd) {
     98                // We already have the monitor, just note how many times we took it
     99                this->recursion += 1;
     100
     101                __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     102        }
     103        else if( is_accepted( this, group) ) {
     104                // Some one was waiting for us, enter
     105                __set_owner( this, thrd );
     106
     107                // Reset mask
     108                reset_mask( this );
     109
     110                __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
     111        }
     112        else {
     113                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     114
     115                // Some one else has the monitor, wait in line for it
     116                /* paranoid */ verify( thrd->next == 0p );
     117                append( this->entry_queue, thrd );
     118                /* paranoid */ verify( thrd->next == 1p );
     119
     120                unlock( this->lock );
     121                park( __cfaabi_dbg_ctx );
    128122
    129123                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    130124
    131                 // Release the lock and leave
     125                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     126                return;
     127        }
     128
     129        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     130
     131        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     132        /* paranoid */ verify( this->lock.lock );
     133
     134        // Release the lock and leave
     135        unlock( this->lock );
     136        return;
     137}
     138
     139static void __dtor_enter( $monitor * this, fptr_t func ) {
     140        // Lock the monitor spinlock
     141        lock( this->lock __cfaabi_dbg_ctx2 );
     142        // Interrupts disable inside critical section
     143        $thread * thrd = kernelTLS.this_thread;
     144
     145        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     146
     147
     148        if( !this->owner ) {
     149                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     150
     151                // No one has the monitor, just take it
     152                __set_owner( this, thrd );
     153
     154                verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     155
    132156                unlock( this->lock );
    133157                return;
    134158        }
    135 
    136         static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
    137                 // Lock the monitor spinlock
    138                 lock( this->lock __cfaabi_dbg_ctx2 );
    139                 // Interrupts disable inside critical section
    140                 thread_desc * thrd = kernelTLS.this_thread;
    141 
    142                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
    143 
    144 
    145                 if( !this->owner ) {
    146                         __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
    147 
    148                         // No one has the monitor, just take it
    149                         set_owner( this, thrd );
    150 
    151                         unlock( this->lock );
    152                         return;
     159        else if( this->owner == thrd) {
     160                // We already have the monitor... but where about to destroy it so the nesting will fail
     161                // Abort!
     162                abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     163        }
     164
     165        __lock_size_t count = 1;
     166        $monitor ** monitors = &this;
     167        __monitor_group_t group = { &this, 1, func };
     168        if( is_accepted( this, group) ) {
     169                __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
     170
     171                // Wake the thread that is waiting for this
     172                __condition_criterion_t * urgent = pop( this->signal_stack );
     173                /* paranoid */ verify( urgent );
     174
     175                // Reset mask
     176                reset_mask( this );
     177
     178                // Create the node specific to this wait operation
     179                wait_ctx_primed( thrd, 0 )
     180
     181                // Some one else has the monitor, wait for him to finish and then run
     182                unlock( this->lock );
     183
     184                // Release the next thread
     185                /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     186                unpark( urgent->owner->waiting_thread __cfaabi_dbg_ctx2 );
     187
     188                // Park current thread waiting
     189                park( __cfaabi_dbg_ctx );
     190
     191                // Some one was waiting for us, enter
     192                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     193        }
     194        else {
     195                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     196
     197                wait_ctx( thrd, 0 )
     198                this->dtor_node = &waiter;
     199
     200                // Some one else has the monitor, wait in line for it
     201                /* paranoid */ verify( thrd->next == 0p );
     202                append( this->entry_queue, thrd );
     203                /* paranoid */ verify( thrd->next == 1p );
     204                unlock( this->lock );
     205
     206                // Park current thread waiting
     207                park( __cfaabi_dbg_ctx );
     208
     209                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     210                return;
     211        }
     212
     213        __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
     214
     215}
     216
     217// Leave single monitor
     218void __leave( $monitor * this ) {
     219        // Lock the monitor spinlock
     220        lock( this->lock __cfaabi_dbg_ctx2 );
     221
     222        __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
     223
     224        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     225
     226        // Leaving a recursion level, decrement the counter
     227        this->recursion -= 1;
     228
     229        // If we haven't left the last level of recursion
     230        // it means we don't need to do anything
     231        if( this->recursion != 0) {
     232                __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
     233                unlock( this->lock );
     234                return;
     235        }
     236
     237        // Get the next thread, will be null on low contention monitor
     238        $thread * new_owner = next_thread( this );
     239
     240        // Check the new owner is consistent with who we wake-up
     241        // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
     242        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     243
     244        // We can now let other threads in safely
     245        unlock( this->lock );
     246
     247        //We need to wake-up the thread
     248        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     249        unpark( new_owner __cfaabi_dbg_ctx2 );
     250}
     251
     252// Leave single monitor for the last time
     253void __dtor_leave( $monitor * this ) {
     254        __cfaabi_dbg_debug_do(
     255                if( TL_GET( this_thread ) != this->owner ) {
     256                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    153257                }
    154                 else if( this->owner == thrd) {
    155                         // We already have the monitor... but where about to destroy it so the nesting will fail
    156                         // Abort!
    157                         abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     258                if( this->recursion != 1 ) {
     259                        abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    158260                }
    159 
    160                 __lock_size_t count = 1;
    161                 monitor_desc ** monitors = &this;
    162                 __monitor_group_t group = { &this, 1, func };
    163                 if( is_accepted( this, group) ) {
    164                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
    165 
    166                         // Wake the thread that is waiting for this
    167                         __condition_criterion_t * urgent = pop( this->signal_stack );
    168                         verify( urgent );
    169 
    170                         // Reset mask
    171                         reset_mask( this );
    172 
    173                         // Create the node specific to this wait operation
    174                         wait_ctx_primed( thrd, 0 )
    175 
    176                         // Some one else has the monitor, wait for him to finish and then run
    177                         BlockInternal( &this->lock, urgent->owner->waiting_thread );
    178 
    179                         // Some one was waiting for us, enter
    180                         set_owner( this, thrd );
    181                 }
    182                 else {
    183                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    184 
    185                         wait_ctx( thrd, 0 )
    186                         this->dtor_node = &waiter;
    187 
    188                         // Some one else has the monitor, wait in line for it
    189                         append( this->entry_queue, thrd );
    190                         BlockInternal( &this->lock );
    191 
    192                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    193                         return;
    194                 }
    195 
    196                 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
    197 
    198         }
    199 
    200         // Leave single monitor
    201         void __leave_monitor_desc( monitor_desc * this ) {
    202                 // Lock the monitor spinlock
    203                 lock( this->lock __cfaabi_dbg_ctx2 );
    204 
    205                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    206 
    207                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    208 
    209                 // Leaving a recursion level, decrement the counter
    210                 this->recursion -= 1;
    211 
    212                 // If we haven't left the last level of recursion
    213                 // it means we don't need to do anything
    214                 if( this->recursion != 0) {
    215                         __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
    216                         unlock( this->lock );
    217                         return;
    218                 }
    219 
    220                 // Get the next thread, will be null on low contention monitor
    221                 thread_desc * new_owner = next_thread( this );
    222 
    223                 // We can now let other threads in safely
    224                 unlock( this->lock );
    225 
    226                 //We need to wake-up the thread
    227                 WakeThread( new_owner );
    228         }
    229 
    230         // Leave single monitor for the last time
    231         void __leave_dtor_monitor_desc( monitor_desc * this ) {
    232                 __cfaabi_dbg_debug_do(
    233                         if( TL_GET( this_thread ) != this->owner ) {
    234                                 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    235                         }
    236                         if( this->recursion != 1 ) {
    237                                 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    238                         }
    239                 )
    240         }
    241 
     261        )
     262}
     263
     264extern "C" {
    242265        // Leave the thread monitor
    243266        // last routine called by a thread.
    244267        // Should never return
    245         void __leave_thread_monitor( thread_desc * thrd ) {
    246                 monitor_desc * this = &thrd->self_mon;
     268        void __cfactx_thrd_leave() {
     269                $thread * thrd = TL_GET( this_thread );
     270                $monitor * this = &thrd->self_mon;
    247271
    248272                // Lock the monitor now
     
    251275                disable_interrupts();
    252276
    253                 thrd->self_cor.state = Halted;
    254 
    255                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     277                thrd->state = Halted;
     278
     279                /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    256280
    257281                // Leaving a recursion level, decrement the counter
     
    263287
    264288                // Fetch the next thread, can be null
    265                 thread_desc * new_owner = next_thread( this );
    266 
    267                 // Leave the thread, this will unlock the spinlock
    268                 // Use leave thread instead of BlockInternal which is
    269                 // specialized for this case and supports null new_owner
    270                 LeaveThread( &this->lock, new_owner );
     289                $thread * new_owner = next_thread( this );
     290
     291                // Release the monitor lock
     292                unlock( this->lock );
     293
     294                // Unpark the next owner if needed
     295                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     296                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     297                /* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
     298                /* paranoid */ verify( thrd->state == Halted );
     299
     300                kernelTLS.this_processor->destroyer = new_owner;
     301
     302                // Leave the thread
     303                __leave_thread();
    271304
    272305                // Control flow should never reach here!
     
    278311static inline void enter( __monitor_group_t monitors ) {
    279312        for( __lock_size_t i = 0; i < monitors.size; i++) {
    280                 __enter_monitor_desc( monitors[i], monitors );
     313                __enter( monitors[i], monitors );
    281314        }
    282315}
     
    284317// Leave multiple monitor
    285318// relies on the monitor array being sorted
    286 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     319static inline void leave($monitor * monitors [], __lock_size_t count) {
    287320        for( __lock_size_t i = count - 1; i >= 0; i--) {
    288                 __leave_monitor_desc( monitors[i] );
     321                __leave( monitors[i] );
    289322        }
    290323}
     
    292325// Ctor for monitor guard
    293326// Sorts monitors before entering
    294 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    295         thread_desc * thrd = TL_GET( this_thread );
     327void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
     328        $thread * thrd = TL_GET( this_thread );
    296329
    297330        // Store current array
     
    333366// Ctor for monitor guard
    334367// Sorts monitors before entering
    335 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     368void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) {
    336369        // optimization
    337         thread_desc * thrd = TL_GET( this_thread );
     370        $thread * thrd = TL_GET( this_thread );
    338371
    339372        // Store current array
     
    346379        (thrd->monitors){m, 1, func};
    347380
    348         __enter_monitor_dtor( this.m, func );
     381        __dtor_enter( this.m, func );
    349382}
    350383
     
    352385void ^?{}( monitor_dtor_guard_t & this ) {
    353386        // Leave the monitors in order
    354         __leave_dtor_monitor_desc( this.m );
     387        __dtor_leave( this.m );
    355388
    356389        // Restore thread context
     
    360393//-----------------------------------------------------------------------------
    361394// Internal scheduling types
    362 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     395void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    363396        this.waiting_thread = waiting_thread;
    364397        this.count = count;
    365         this.next = NULL;
     398        this.next = 0p;
    366399        this.user_info = user_info;
    367400}
     
    369402void ?{}(__condition_criterion_t & this ) with( this ) {
    370403        ready  = false;
    371         target = NULL;
    372         owner  = NULL;
    373         next   = NULL;
    374 }
    375 
    376 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
     404        target = 0p;
     405        owner  = 0p;
     406        next   = 0p;
     407}
     408
     409void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
    377410        this.ready  = false;
    378411        this.target = target;
    379412        this.owner  = &owner;
    380         this.next   = NULL;
     413        this.next   = 0p;
    381414}
    382415
     
    387420
    388421        // Check that everything is as expected
    389         assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     422        assertf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors );
    390423        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
    391424        verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count );
     
    399432        // Append the current wait operation to the ones already queued on the condition
    400433        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
     434        /* paranoid */ verify( waiter.next == 0p );
    401435        append( this.blocked, &waiter );
     436        /* paranoid */ verify( waiter.next == 1p );
    402437
    403438        // Lock all monitors (aggregates the locks as well)
     
    406441        // Find the next thread(s) to run
    407442        __lock_size_t thread_count = 0;
    408         thread_desc * threads[ count ];
     443        $thread * threads[ count ];
    409444        __builtin_memset( threads, 0, sizeof( threads ) );
    410445
     
    414449        // Remove any duplicate threads
    415450        for( __lock_size_t i = 0; i < count; i++) {
    416                 thread_desc * new_owner = next_thread( monitors[i] );
     451                $thread * new_owner = next_thread( monitors[i] );
    417452                insert_unique( threads, thread_count, new_owner );
    418453        }
    419454
     455        // Unlock the locks, we don't need them anymore
     456        for(int i = 0; i < count; i++) {
     457                unlock( *locks[i] );
     458        }
     459
     460        // Wake the threads
     461        for(int i = 0; i < thread_count; i++) {
     462                unpark( threads[i] __cfaabi_dbg_ctx2 );
     463        }
     464
    420465        // Everything is ready to go to sleep
    421         BlockInternal( locks, count, threads, thread_count );
     466        park( __cfaabi_dbg_ctx );
    422467
    423468        // We are back, restore the owners and recursions
     
    434479        //Some more checking in debug
    435480        __cfaabi_dbg_debug_do(
    436                 thread_desc * this_thrd = TL_GET( this_thread );
     481                $thread * this_thrd = TL_GET( this_thread );
    437482                if ( this.monitor_count != this_thrd->monitors.size ) {
    438483                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    449494
    450495        // Lock all monitors
    451         lock_all( this.monitors, NULL, count );
     496        lock_all( this.monitors, 0p, count );
    452497
    453498        //Pop the head of the waiting queue
     
    471516
    472517        //Check that everything is as expected
    473         verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     518        verifyf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors );
    474519        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
    475520
     
    488533
    489534        //Find the thread to run
    490         thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    491         set_owner( monitors, count, signallee );
     535        $thread * signallee = pop_head( this.blocked )->waiting_thread;
     536        __set_owner( monitors, count, signallee );
    492537
    493538        __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    494539
     540        // unlock all the monitors
     541        unlock_all( locks, count );
     542
     543        // unpark the thread we signalled
     544        unpark( signallee __cfaabi_dbg_ctx2 );
     545
    495546        //Everything is ready to go to sleep
    496         BlockInternal( locks, count, &signallee, 1 );
     547        park( __cfaabi_dbg_ctx );
    497548
    498549
     
    535586        // Create one!
    536587        __lock_size_t max = count_max( mask );
    537         monitor_desc * mon_storage[max];
     588        $monitor * mon_storage[max];
    538589        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    539590        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    553604        {
    554605                // Check if the entry queue
    555                 thread_desc * next; int index;
     606                $thread * next; int index;
    556607                [next, index] = search_entry_queue( mask, monitors, count );
    557608
     
    563614                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    564615
    565                                 monitor_desc * mon2dtor = accepted[0];
     616                                $monitor * mon2dtor = accepted[0];
    566617                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    567618
     
    589640
    590641                                // Set the owners to be the next thread
    591                                 set_owner( monitors, count, next );
    592 
    593                                 // Everything is ready to go to sleep
    594                                 BlockInternal( locks, count, &next, 1 );
     642                                __set_owner( monitors, count, next );
     643
     644                                // unlock all the monitors
     645                                unlock_all( locks, count );
     646
     647                                // unpark the thread we signalled
     648                                unpark( next __cfaabi_dbg_ctx2 );
     649
     650                                //Everything is ready to go to sleep
     651                                park( __cfaabi_dbg_ctx );
    595652
    596653                                // We are back, restore the owners and recursions
     
    630687        }
    631688
     689        // unlock all the monitors
     690        unlock_all( locks, count );
     691
    632692        //Everything is ready to go to sleep
    633         BlockInternal( locks, count );
     693        park( __cfaabi_dbg_ctx );
    634694
    635695
     
    648708// Utilities
    649709
    650 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
    651         // __cfaabi_dbg_print_safe( "Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     710static inline void __set_owner( $monitor * this, $thread * owner ) {
     711        /* paranoid */ verify( this->lock.lock );
    652712
    653713        //Pass the monitor appropriately
     
    658718}
    659719
    660 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    661         monitors[0]->owner     = owner;
    662         monitors[0]->recursion = 1;
     720static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
     721        /* paranoid */ verify ( monitors[0]->lock.lock );
     722        /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     723        monitors[0]->owner        = owner;
     724        monitors[0]->recursion    = 1;
    663725        for( __lock_size_t i = 1; i < count; i++ ) {
    664                 monitors[i]->owner     = owner;
    665                 monitors[i]->recursion = 0;
    666         }
    667 }
    668 
    669 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     726                /* paranoid */ verify ( monitors[i]->lock.lock );
     727                /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] );
     728                monitors[i]->owner        = owner;
     729                monitors[i]->recursion    = 0;
     730        }
     731}
     732
     733static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    670734        for( __lock_size_t i = 0; i < count; i++) {
    671735                storage[i]->mask = mask;
     
    673737}
    674738
    675 static inline void reset_mask( monitor_desc * this ) {
    676         this->mask.accepted = NULL;
    677         this->mask.data = NULL;
     739static inline void reset_mask( $monitor * this ) {
     740        this->mask.accepted = 0p;
     741        this->mask.data = 0p;
    678742        this->mask.size = 0;
    679743}
    680744
    681 static inline thread_desc * next_thread( monitor_desc * this ) {
     745static inline $thread * next_thread( $monitor * this ) {
    682746        //Check the signaller stack
    683747        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    687751                //regardless of if we are ready to baton pass,
    688752                //we need to set the monitor as in use
    689                 set_owner( this,  urgent->owner->waiting_thread );
     753                /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     754                __set_owner( this,  urgent->owner->waiting_thread );
    690755
    691756                return check_condition( urgent );
     
    694759        // No signaller thread
    695760        // Get the next thread in the entry_queue
    696         thread_desc * new_owner = pop_head( this->entry_queue );
    697         set_owner( this, new_owner );
     761        $thread * new_owner = pop_head( this->entry_queue );
     762        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     763        /* paranoid */ verify( !new_owner || new_owner->next == 0p );
     764        __set_owner( this, new_owner );
    698765
    699766        return new_owner;
    700767}
    701768
    702 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     769static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
    703770        __acceptable_t * it = this->mask.data; // Optim
    704771        __lock_size_t count = this->mask.size;
     
    722789}
    723790
    724 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     791static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    725792        for( __lock_size_t i = 0; i < count; i++) {
    726793                (criteria[i]){ monitors[i], waiter };
     
    730797}
    731798
    732 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     799static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    733800        for( __lock_size_t i = 0; i < count; i++) {
    734801                (criteria[i]){ monitors[i], waiter };
     
    746813}
    747814
    748 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     815static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    749816        for( __lock_size_t i = 0; i < count; i++ ) {
    750817                __spinlock_t * l = &source[i]->lock;
     
    760827}
    761828
    762 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     829static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
    763830        for( __lock_size_t i = 0; i < count; i++ ) {
    764831                unlock( locks[i]->lock );
     
    767834
    768835static inline void save(
    769         monitor_desc * ctx [],
     836        $monitor * ctx [],
    770837        __lock_size_t count,
    771838        __attribute((unused)) __spinlock_t * locks [],
     
    780847
    781848static inline void restore(
    782         monitor_desc * ctx [],
     849        $monitor * ctx [],
    783850        __lock_size_t count,
    784851        __spinlock_t * locks [],
     
    798865// 2 - Checks if all the monitors are ready to run
    799866//     if so return the thread to run
    800 static inline thread_desc * check_condition( __condition_criterion_t * target ) {
     867static inline $thread * check_condition( __condition_criterion_t * target ) {
    801868        __condition_node_t * node = target->owner;
    802869        unsigned short count = node->count;
     
    816883        }
    817884
    818         __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL );
    819         return ready2run ? node->waiting_thread : NULL;
     885        __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p );
     886        return ready2run ? node->waiting_thread : 0p;
    820887}
    821888
    822889static inline void brand_condition( condition & this ) {
    823         thread_desc * thrd = TL_GET( this_thread );
     890        $thread * thrd = TL_GET( this_thread );
    824891        if( !this.monitors ) {
    825892                // __cfaabi_dbg_print_safe( "Branding\n" );
    826                 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data );
     893                assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data );
    827894                this.monitor_count = thrd->monitors.size;
    828895
    829                 this.monitors = (monitor_desc **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     896                this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    830897                for( int i = 0; i < this.monitor_count; i++ ) {
    831898                        this.monitors[i] = thrd->monitors[i];
     
    834901}
    835902
    836 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
    837 
    838         __queue_t(thread_desc) & entry_queue = monitors[0]->entry_queue;
     903static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
     904
     905        __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
    839906
    840907        // For each thread in the entry-queue
    841         for(    thread_desc ** thrd_it = &entry_queue.head;
    842                 *thrd_it;
     908        for(    $thread ** thrd_it = &entry_queue.head;
     909                *thrd_it != 1p;
    843910                thrd_it = &(*thrd_it)->next
    844911        ) {
     
    883950}
    884951
    885 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     952static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
    886953        __lock_size_t size = 0;
    887954        for( __lock_size_t i = 0; i < mask.size; i++ ) {
Note: See TracChangeset for help on using the changeset viewer.