Ignore:
Timestamp:
Jan 7, 2021, 2:55:57 PM (5 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
58fe85a
Parents:
bdfc032 (diff), 44e37ef (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into dkobets-vector

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/monitor.cfa

    rbdfc032 reef8dfb  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // monitor_desc.c --
     7// $monitor.c --
    88//
    99// Author           : Thierry Delisle
     
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    31 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    32 static inline void reset_mask( monitor_desc * this );
    33 
    34 static inline thread_desc * next_thread( monitor_desc * this );
    35 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     29static inline void __set_owner ( $monitor * this, $thread * owner );
     30static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
     31static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     32static inline void reset_mask( $monitor * this );
     33
     34static inline $thread * next_thread( $monitor * this );
     35static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
    3636
    3737static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    38 static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     38static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    3939static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    40 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    41 
    42 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    43 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    44 
    45 static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    46 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 
    48 static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     40static inline void unlock_all( $monitor * locks [], __lock_size_t count );
     41
     42static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     43static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     44
     45static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     46static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47
     48static inline $thread *        check_condition   ( __condition_criterion_t * );
    4949static inline void                 brand_condition   ( condition & );
    50 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     50static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
    5151
    5252forall(dtype T | sized( T ))
    5353static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5454static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    55 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
     55static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
    5656
    5757//-----------------------------------------------------------------------------
     
    6868
    6969#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    70         monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     70        $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7171        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7272        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8080//-----------------------------------------------------------------------------
    8181// Enter/Leave routines
    82 
    83 
    84 extern "C" {
    85         // Enter single monitor
    86         static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    87                 // Lock the monitor spinlock
    88                 lock( this->lock __cfaabi_dbg_ctx2 );
    89                 // Interrupts disable inside critical section
    90                 thread_desc * thrd = kernelTLS.this_thread;
    91 
    92                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    93 
    94                 if( !this->owner ) {
    95                         // No one has the monitor, just take it
    96                         set_owner( this, thrd );
    97 
    98                         __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
    99                 }
    100                 else if( this->owner == thrd) {
    101                         // We already have the monitor, just note how many times we took it
    102                         this->recursion += 1;
    103 
    104                         __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
    105                 }
    106                 else if( is_accepted( this, group) ) {
    107                         // Some one was waiting for us, enter
    108                         set_owner( this, thrd );
    109 
    110                         // Reset mask
    111                         reset_mask( this );
    112 
    113                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
    114                 }
    115                 else {
    116                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    117 
    118                         // Some one else has the monitor, wait in line for it
    119                         append( this->entry_queue, thrd );
    120 
    121                         BlockInternal( &this->lock );
    122 
    123                         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    124 
    125                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    126                         return;
    127                 }
     82// Enter single monitor
     83static void __enter( $monitor * this, const __monitor_group_t & group ) {
     84        $thread * thrd = active_thread();
     85
     86        // Lock the monitor spinlock
     87        lock( this->lock __cfaabi_dbg_ctx2 );
     88
     89        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     90
     91        if( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
     92                abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this );
     93        }
     94        else if( !this->owner ) {
     95                // No one has the monitor, just take it
     96                __set_owner( this, thrd );
     97
     98                __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     99        }
     100        else if( this->owner == thrd) {
     101                // We already have the monitor, just note how many times we took it
     102                this->recursion += 1;
     103
     104                __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     105        }
     106        else if( is_accepted( this, group) ) {
     107                // Some one was waiting for us, enter
     108                __set_owner( this, thrd );
     109
     110                // Reset mask
     111                reset_mask( this );
     112
     113                __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
     114        }
     115        else {
     116                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     117
     118                // Some one else has the monitor, wait in line for it
     119                /* paranoid */ verify( thrd->link.next == 0p );
     120                append( this->entry_queue, thrd );
     121                /* paranoid */ verify( thrd->link.next == 1p );
     122
     123                unlock( this->lock );
     124                park();
    128125
    129126                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    130127
    131                 // Release the lock and leave
     128                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     129                return;
     130        }
     131
     132        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     133
     134        /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     135        /* paranoid */ verify( this->lock.lock );
     136
     137        // Release the lock and leave
     138        unlock( this->lock );
     139        return;
     140}
     141
     142static void __dtor_enter( $monitor * this, fptr_t func, bool join ) {
     143        $thread * thrd = active_thread();
     144        #if defined( __CFA_WITH_VERIFY__ )
     145                bool is_thrd = this == &thrd->self_mon;
     146        #endif
     147
     148        // Lock the monitor spinlock
     149        lock( this->lock __cfaabi_dbg_ctx2 );
     150
     151        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     152
     153
     154        if( !this->owner ) {
     155                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     156
     157                // No one has the monitor, just take it
     158                __set_owner( this, thrd );
     159
     160                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     161                /* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled );
     162
    132163                unlock( this->lock );
    133164                return;
    134165        }
    135 
    136         static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
    137                 // Lock the monitor spinlock
    138                 lock( this->lock __cfaabi_dbg_ctx2 );
    139                 // Interrupts disable inside critical section
    140                 thread_desc * thrd = kernelTLS.this_thread;
    141 
    142                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
    143 
    144 
    145                 if( !this->owner ) {
    146                         __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
    147 
    148                         // No one has the monitor, just take it
    149                         set_owner( this, thrd );
    150 
    151                         unlock( this->lock );
    152                         return;
     166        else if( this->owner == thrd && !join) {
     167                // We already have the monitor... but where about to destroy it so the nesting will fail
     168                // Abort!
     169                abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     170        }
     171        // SKULLDUGGERY: join will act as a dtor so it would normally trigger to above check
     172        // because join will not release the monitor after it executed.
     173        // to avoid that it sets the owner to the special value thrd | 1p before exiting
     174        else if( this->owner == ($thread*)(1 | (uintptr_t)thrd) ) {
     175                // restore the owner and just return
     176                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     177
     178                // No one has the monitor, just take it
     179                __set_owner( this, thrd );
     180
     181                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     182                /* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled );
     183
     184                unlock( this->lock );
     185                return;
     186        }
     187
     188        // The monitor is busy, if this is a thread and the thread owns itself, it better be active
     189        /* paranoid */ verify( !is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) );
     190
     191        __lock_size_t count = 1;
     192        $monitor ** monitors = &this;
     193        __monitor_group_t group = { &this, 1, func };
     194        if( is_accepted( this, group) ) {
     195                __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
     196
     197                // Wake the thread that is waiting for this
     198                __condition_criterion_t * urgent = pop( this->signal_stack );
     199                /* paranoid */ verify( urgent );
     200
     201                // Reset mask
     202                reset_mask( this );
     203
     204                // Create the node specific to this wait operation
     205                wait_ctx_primed( thrd, 0 )
     206
     207                // Some one else has the monitor, wait for him to finish and then run
     208                unlock( this->lock );
     209
     210                // Release the next thread
     211                /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     212                unpark( urgent->owner->waiting_thread );
     213
     214                // Park current thread waiting
     215                park();
     216
     217                // Some one was waiting for us, enter
     218                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     219
     220                __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
     221                return;
     222        }
     223        else {
     224                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     225
     226                wait_ctx( thrd, 0 )
     227                this->dtor_node = &waiter;
     228
     229                // Some one else has the monitor, wait in line for it
     230                /* paranoid */ verify( thrd->link.next == 0p );
     231                append( this->entry_queue, thrd );
     232                /* paranoid */ verify( thrd->link.next == 1p );
     233                unlock( this->lock );
     234
     235                // Park current thread waiting
     236                park();
     237
     238                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     239                return;
     240        }
     241}
     242
     243// Leave single monitor
     244void __leave( $monitor * this ) {
     245        // Lock the monitor spinlock
     246        lock( this->lock __cfaabi_dbg_ctx2 );
     247
     248        __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", active_thread(), this, this->owner);
     249
     250        /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     251
     252        // Leaving a recursion level, decrement the counter
     253        this->recursion -= 1;
     254
     255        // If we haven't left the last level of recursion
     256        // it means we don't need to do anything
     257        if( this->recursion != 0) {
     258                __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
     259                unlock( this->lock );
     260                return;
     261        }
     262
     263        // Get the next thread, will be null on low contention monitor
     264        $thread * new_owner = next_thread( this );
     265
     266        // Check the new owner is consistent with who we wake-up
     267        // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
     268        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     269
     270        // We can now let other threads in safely
     271        unlock( this->lock );
     272
     273        //We need to wake-up the thread
     274        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     275        unpark( new_owner );
     276}
     277
     278// Leave single monitor for the last time
     279void __dtor_leave( $monitor * this, bool join ) {
     280        __cfaabi_dbg_debug_do(
     281                if( active_thread() != this->owner ) {
     282                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner);
    153283                }
    154                 else if( this->owner == thrd) {
    155                         // We already have the monitor... but where about to destroy it so the nesting will fail
    156                         // Abort!
    157                         abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     284                if( this->recursion != 1  && !join ) {
     285                        abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    158286                }
    159 
    160                 __lock_size_t count = 1;
    161                 monitor_desc ** monitors = &this;
    162                 __monitor_group_t group = { &this, 1, func };
    163                 if( is_accepted( this, group) ) {
    164                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
    165 
    166                         // Wake the thread that is waiting for this
    167                         __condition_criterion_t * urgent = pop( this->signal_stack );
    168                         verify( urgent );
    169 
    170                         // Reset mask
    171                         reset_mask( this );
    172 
    173                         // Create the node specific to this wait operation
    174                         wait_ctx_primed( thrd, 0 )
    175 
    176                         // Some one else has the monitor, wait for him to finish and then run
    177                         BlockInternal( &this->lock, urgent->owner->waiting_thread );
    178 
    179                         // Some one was waiting for us, enter
    180                         set_owner( this, thrd );
    181                 }
    182                 else {
    183                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    184 
    185                         wait_ctx( thrd, 0 )
    186                         this->dtor_node = &waiter;
    187 
    188                         // Some one else has the monitor, wait in line for it
    189                         append( this->entry_queue, thrd );
    190                         BlockInternal( &this->lock );
    191 
    192                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    193                         return;
    194                 }
    195 
    196                 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
    197 
    198         }
    199 
    200         // Leave single monitor
    201         void __leave_monitor_desc( monitor_desc * this ) {
    202                 // Lock the monitor spinlock
    203                 lock( this->lock __cfaabi_dbg_ctx2 );
    204 
    205                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    206 
    207                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    208 
    209                 // Leaving a recursion level, decrement the counter
    210                 this->recursion -= 1;
    211 
    212                 // If we haven't left the last level of recursion
    213                 // it means we don't need to do anything
    214                 if( this->recursion != 0) {
    215                         __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
    216                         unlock( this->lock );
    217                         return;
    218                 }
    219 
    220                 // Get the next thread, will be null on low contention monitor
    221                 thread_desc * new_owner = next_thread( this );
    222 
    223                 // We can now let other threads in safely
    224                 unlock( this->lock );
    225 
    226                 //We need to wake-up the thread
    227                 WakeThread( new_owner );
    228         }
    229 
    230         // Leave single monitor for the last time
    231         void __leave_dtor_monitor_desc( monitor_desc * this ) {
    232                 __cfaabi_dbg_debug_do(
    233                         if( TL_GET( this_thread ) != this->owner ) {
    234                                 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    235                         }
    236                         if( this->recursion != 1 ) {
    237                                 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    238                         }
    239                 )
    240         }
    241 
    242         // Leave the thread monitor
    243         // last routine called by a thread.
    244         // Should never return
    245         void __leave_thread_monitor() {
    246                 thread_desc * thrd = TL_GET( this_thread );
    247                 monitor_desc * this = &thrd->self_mon;
    248 
    249                 // Lock the monitor now
    250                 lock( this->lock __cfaabi_dbg_ctx2 );
    251 
    252                 disable_interrupts();
    253 
    254                 thrd->self_cor.state = Halted;
    255 
    256                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    257 
    258                 // Leaving a recursion level, decrement the counter
    259                 this->recursion -= 1;
    260 
    261                 // If we haven't left the last level of recursion
    262                 // it must mean there is an error
    263                 if( this->recursion != 0) { abort( "Thread internal monitor has unbalanced recursion" ); }
    264 
    265                 // Fetch the next thread, can be null
    266                 thread_desc * new_owner = next_thread( this );
    267 
    268                 // Leave the thread, this will unlock the spinlock
    269                 // Use leave thread instead of BlockInternal which is
    270                 // specialized for this case and supports null new_owner
    271                 LeaveThread( &this->lock, new_owner );
    272 
    273                 // Control flow should never reach here!
    274         }
     287        )
     288
     289        this->owner = ($thread*)(1 | (uintptr_t)this->owner);
     290}
     291
     292void __thread_finish( $thread * thrd ) {
     293        $monitor * this = &thrd->self_mon;
     294
     295        // Lock the monitor now
     296        /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
     297        /* paranoid */ verify( this->lock.lock );
     298        /* paranoid */ verify( thrd->context.SP );
     299        /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd );
     300        /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd );
     301        /* paranoid */ verify( ! __preemption_enabled() );
     302
     303        /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     304        /* paranoid */ verify( thrd->state == Halting );
     305        /* paranoid */ verify( this->recursion == 1 );
     306
     307        // Leaving a recursion level, decrement the counter
     308        this->recursion -= 1;
     309        this->owner = 0p;
     310
     311        // Fetch the next thread, can be null
     312        $thread * new_owner = next_thread( this );
     313
     314        // Mark the state as fully halted
     315        thrd->state = Halted;
     316
     317        // Release the monitor lock
     318        unlock( this->lock );
     319
     320        // Unpark the next owner if needed
     321        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     322        /* paranoid */ verify( ! __preemption_enabled() );
     323        /* paranoid */ verify( thrd->state == Halted );
     324        unpark( new_owner );
    275325}
    276326
     
    279329static inline void enter( __monitor_group_t monitors ) {
    280330        for( __lock_size_t i = 0; i < monitors.size; i++) {
    281                 __enter_monitor_desc( monitors[i], monitors );
     331                __enter( monitors[i], monitors );
    282332        }
    283333}
     
    285335// Leave multiple monitor
    286336// relies on the monitor array being sorted
    287 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     337static inline void leave($monitor * monitors [], __lock_size_t count) {
    288338        for( __lock_size_t i = count - 1; i >= 0; i--) {
    289                 __leave_monitor_desc( monitors[i] );
     339                __leave( monitors[i] );
    290340        }
    291341}
     
    293343// Ctor for monitor guard
    294344// Sorts monitors before entering
    295 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    296         thread_desc * thrd = TL_GET( this_thread );
     345void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
     346        $thread * thrd = active_thread();
    297347
    298348        // Store current array
     
    329379
    330380        // Restore thread context
    331         TL_GET( this_thread )->monitors = this.prev;
     381        active_thread()->monitors = this.prev;
    332382}
    333383
    334384// Ctor for monitor guard
    335385// Sorts monitors before entering
    336 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     386void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func, bool join ) {
    337387        // optimization
    338         thread_desc * thrd = TL_GET( this_thread );
     388        $thread * thrd = active_thread();
    339389
    340390        // Store current array
     
    344394        this.prev = thrd->monitors;
    345395
     396        // Save whether we are in a join or not
     397        this.join = join;
     398
    346399        // Update thread context (needed for conditions)
    347400        (thrd->monitors){m, 1, func};
    348401
    349         __enter_monitor_dtor( this.m, func );
     402        __dtor_enter( this.m, func, join );
    350403}
    351404
     
    353406void ^?{}( monitor_dtor_guard_t & this ) {
    354407        // Leave the monitors in order
    355         __leave_dtor_monitor_desc( this.m );
     408        __dtor_leave( this.m, this.join );
    356409
    357410        // Restore thread context
    358         TL_GET( this_thread )->monitors = this.prev;
     411        active_thread()->monitors = this.prev;
    359412}
    360413
    361414//-----------------------------------------------------------------------------
    362415// Internal scheduling types
    363 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     416void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    364417        this.waiting_thread = waiting_thread;
    365418        this.count = count;
     
    375428}
    376429
    377 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
     430void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
    378431        this.ready  = false;
    379432        this.target = target;
     
    396449
    397450        // Create the node specific to this wait operation
    398         wait_ctx( TL_GET( this_thread ), user_info );
     451        wait_ctx( active_thread(), user_info );
    399452
    400453        // Append the current wait operation to the ones already queued on the condition
    401454        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
     455        /* paranoid */ verify( waiter.next == 0p );
    402456        append( this.blocked, &waiter );
     457        /* paranoid */ verify( waiter.next == 1p );
    403458
    404459        // Lock all monitors (aggregates the locks as well)
     
    407462        // Find the next thread(s) to run
    408463        __lock_size_t thread_count = 0;
    409         thread_desc * threads[ count ];
     464        $thread * threads[ count ];
    410465        __builtin_memset( threads, 0, sizeof( threads ) );
    411466
     
    415470        // Remove any duplicate threads
    416471        for( __lock_size_t i = 0; i < count; i++) {
    417                 thread_desc * new_owner = next_thread( monitors[i] );
     472                $thread * new_owner = next_thread( monitors[i] );
    418473                insert_unique( threads, thread_count, new_owner );
    419474        }
    420475
     476        // Unlock the locks, we don't need them anymore
     477        for(int i = 0; i < count; i++) {
     478                unlock( *locks[i] );
     479        }
     480
     481        // Wake the threads
     482        for(int i = 0; i < thread_count; i++) {
     483                unpark( threads[i] );
     484        }
     485
    421486        // Everything is ready to go to sleep
    422         BlockInternal( locks, count, threads, thread_count );
     487        park();
    423488
    424489        // We are back, restore the owners and recursions
     
    435500        //Some more checking in debug
    436501        __cfaabi_dbg_debug_do(
    437                 thread_desc * this_thrd = TL_GET( this_thread );
     502                $thread * this_thrd = active_thread();
    438503                if ( this.monitor_count != this_thrd->monitors.size ) {
    439504                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    483548
    484549        // Create the node specific to this wait operation
    485         wait_ctx_primed( kernelTLS.this_thread, 0 )
     550        wait_ctx_primed( active_thread(), 0 )
    486551
    487552        //save contexts
     
    489554
    490555        //Find the thread to run
    491         thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    492         set_owner( monitors, count, signallee );
     556        $thread * signallee = pop_head( this.blocked )->waiting_thread;
     557        __set_owner( monitors, count, signallee );
    493558
    494559        __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    495560
     561        // unlock all the monitors
     562        unlock_all( locks, count );
     563
     564        // unpark the thread we signalled
     565        unpark( signallee );
     566
    496567        //Everything is ready to go to sleep
    497         BlockInternal( locks, count, &signallee, 1 );
     568        park();
    498569
    499570
     
    536607        // Create one!
    537608        __lock_size_t max = count_max( mask );
    538         monitor_desc * mon_storage[max];
     609        $monitor * mon_storage[max];
    539610        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    540611        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    554625        {
    555626                // Check if the entry queue
    556                 thread_desc * next; int index;
     627                $thread * next; int index;
    557628                [next, index] = search_entry_queue( mask, monitors, count );
    558629
     
    564635                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    565636
    566                                 monitor_desc * mon2dtor = accepted[0];
     637                                $monitor * mon2dtor = accepted[0];
    567638                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    568639
     
    576647
    577648                                // Create the node specific to this wait operation
    578                                 wait_ctx_primed( kernelTLS.this_thread, 0 );
     649                                wait_ctx_primed( active_thread(), 0 );
    579650
    580651                                // Save monitor states
     
    590661
    591662                                // Set the owners to be the next thread
    592                                 set_owner( monitors, count, next );
    593 
    594                                 // Everything is ready to go to sleep
    595                                 BlockInternal( locks, count, &next, 1 );
     663                                __set_owner( monitors, count, next );
     664
     665                                // unlock all the monitors
     666                                unlock_all( locks, count );
     667
     668                                // unpark the thread we signalled
     669                                unpark( next );
     670
     671                                //Everything is ready to go to sleep
     672                                park();
    596673
    597674                                // We are back, restore the owners and recursions
     
    622699
    623700        // Create the node specific to this wait operation
    624         wait_ctx_primed( kernelTLS.this_thread, 0 );
     701        wait_ctx_primed( active_thread(), 0 );
    625702
    626703        monitor_save;
     
    628705
    629706        for( __lock_size_t i = 0; i < count; i++) {
    630                 verify( monitors[i]->owner == kernelTLS.this_thread );
    631         }
     707                verify( monitors[i]->owner == active_thread() );
     708        }
     709
     710        // unlock all the monitors
     711        unlock_all( locks, count );
    632712
    633713        //Everything is ready to go to sleep
    634         BlockInternal( locks, count );
     714        park();
    635715
    636716
     
    649729// Utilities
    650730
    651 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
    652         // __cfaabi_dbg_print_safe( "Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     731static inline void __set_owner( $monitor * this, $thread * owner ) {
     732        /* paranoid */ verify( this->lock.lock );
    653733
    654734        //Pass the monitor appropriately
     
    659739}
    660740
    661 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    662         monitors[0]->owner     = owner;
    663         monitors[0]->recursion = 1;
     741static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
     742        /* paranoid */ verify ( monitors[0]->lock.lock );
     743        /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     744        monitors[0]->owner        = owner;
     745        monitors[0]->recursion    = 1;
    664746        for( __lock_size_t i = 1; i < count; i++ ) {
    665                 monitors[i]->owner     = owner;
    666                 monitors[i]->recursion = 0;
    667         }
    668 }
    669 
    670 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     747                /* paranoid */ verify ( monitors[i]->lock.lock );
     748                /* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] );
     749                monitors[i]->owner        = owner;
     750                monitors[i]->recursion    = 0;
     751        }
     752}
     753
     754static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    671755        for( __lock_size_t i = 0; i < count; i++) {
    672756                storage[i]->mask = mask;
     
    674758}
    675759
    676 static inline void reset_mask( monitor_desc * this ) {
     760static inline void reset_mask( $monitor * this ) {
    677761        this->mask.accepted = 0p;
    678762        this->mask.data = 0p;
     
    680764}
    681765
    682 static inline thread_desc * next_thread( monitor_desc * this ) {
     766static inline $thread * next_thread( $monitor * this ) {
    683767        //Check the signaller stack
    684768        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    688772                //regardless of if we are ready to baton pass,
    689773                //we need to set the monitor as in use
    690                 set_owner( this,  urgent->owner->waiting_thread );
     774                /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     775                __set_owner( this,  urgent->owner->waiting_thread );
    691776
    692777                return check_condition( urgent );
     
    695780        // No signaller thread
    696781        // Get the next thread in the entry_queue
    697         thread_desc * new_owner = pop_head( this->entry_queue );
    698         set_owner( this, new_owner );
     782        $thread * new_owner = pop_head( this->entry_queue );
     783        /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     784        /* paranoid */ verify( !new_owner || new_owner->link.next == 0p );
     785        __set_owner( this, new_owner );
    699786
    700787        return new_owner;
    701788}
    702789
    703 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     790static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
    704791        __acceptable_t * it = this->mask.data; // Optim
    705792        __lock_size_t count = this->mask.size;
     
    723810}
    724811
    725 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     812static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    726813        for( __lock_size_t i = 0; i < count; i++) {
    727814                (criteria[i]){ monitors[i], waiter };
     
    731818}
    732819
    733 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     820static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    734821        for( __lock_size_t i = 0; i < count; i++) {
    735822                (criteria[i]){ monitors[i], waiter };
     
    747834}
    748835
    749 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     836static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    750837        for( __lock_size_t i = 0; i < count; i++ ) {
    751838                __spinlock_t * l = &source[i]->lock;
     
    761848}
    762849
    763 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     850static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
    764851        for( __lock_size_t i = 0; i < count; i++ ) {
    765852                unlock( locks[i]->lock );
     
    768855
    769856static inline void save(
    770         monitor_desc * ctx [],
     857        $monitor * ctx [],
    771858        __lock_size_t count,
    772859        __attribute((unused)) __spinlock_t * locks [],
     
    781868
    782869static inline void restore(
    783         monitor_desc * ctx [],
     870        $monitor * ctx [],
    784871        __lock_size_t count,
    785872        __spinlock_t * locks [],
     
    799886// 2 - Checks if all the monitors are ready to run
    800887//     if so return the thread to run
    801 static inline thread_desc * check_condition( __condition_criterion_t * target ) {
     888static inline $thread * check_condition( __condition_criterion_t * target ) {
    802889        __condition_node_t * node = target->owner;
    803890        unsigned short count = node->count;
     
    817904        }
    818905
    819         __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p );
     906        __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p );
    820907        return ready2run ? node->waiting_thread : 0p;
    821908}
    822909
    823910static inline void brand_condition( condition & this ) {
    824         thread_desc * thrd = TL_GET( this_thread );
     911        $thread * thrd = active_thread();
    825912        if( !this.monitors ) {
    826913                // __cfaabi_dbg_print_safe( "Branding\n" );
     
    828915                this.monitor_count = thrd->monitors.size;
    829916
    830                 this.monitors = (monitor_desc **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     917                this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    831918                for( int i = 0; i < this.monitor_count; i++ ) {
    832919                        this.monitors[i] = thrd->monitors[i];
     
    835922}
    836923
    837 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
    838 
    839         __queue_t(thread_desc) & entry_queue = monitors[0]->entry_queue;
     924static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
     925
     926        __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
    840927
    841928        // For each thread in the entry-queue
    842         for(    thread_desc ** thrd_it = &entry_queue.head;
    843                 *thrd_it;
    844                 thrd_it = &(*thrd_it)->next
     929        for(    $thread ** thrd_it = &entry_queue.head;
     930                (*thrd_it) != 1p;
     931                thrd_it = &(*thrd_it)->link.next
    845932        ) {
    846933                // For each acceptable check if it matches
     
    884971}
    885972
    886 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     973static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
    887974        __lock_size_t size = 0;
    888975        for( __lock_size_t i = 0; i < mask.size; i++ ) {
Note: See TracChangeset for help on using the changeset viewer.