Ignore:
Timestamp:
Feb 27, 2020, 4:04:25 PM (6 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
a037f85
Parents:
41efd33 (diff), 930b504 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/monitor.cfa

    r41efd33 r04e6f93  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // monitor_desc.c --
     7// $monitor.c --
    88//
    99// Author           : Thierry Delisle
     
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    31 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    32 static inline void reset_mask( monitor_desc * this );
    33 
    34 static inline thread_desc * next_thread( monitor_desc * this );
    35 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     29static inline void __set_owner ( $monitor * this, $thread * owner );
     30static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
     31static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     32static inline void reset_mask( $monitor * this );
     33
     34static inline $thread * next_thread( $monitor * this );
     35static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
    3636
    3737static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    38 static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     38static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    3939static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    40 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    41 
    42 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    43 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    44 
    45 static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    46 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 
    48 static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     40static inline void unlock_all( $monitor * locks [], __lock_size_t count );
     41
     42static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     43static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     44
     45static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     46static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47
     48static inline $thread *        check_condition   ( __condition_criterion_t * );
    4949static inline void                 brand_condition   ( condition & );
    50 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     50static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
    5151
    5252forall(dtype T | sized( T ))
    5353static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5454static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    55 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
     55static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
    5656
    5757//-----------------------------------------------------------------------------
     
    6868
    6969#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    70         monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     70        $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7171        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7272        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8080//-----------------------------------------------------------------------------
    8181// Enter/Leave routines
    82 
    83 
    84 extern "C" {
    85         // Enter single monitor
    86         static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    87                 // Lock the monitor spinlock
    88                 lock( this->lock __cfaabi_dbg_ctx2 );
    89                 // Interrupts disable inside critical section
    90                 thread_desc * thrd = kernelTLS.this_thread;
    91 
    92                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    93 
    94                 if( !this->owner ) {
    95                         // No one has the monitor, just take it
    96                         set_owner( this, thrd );
    97 
    98                         __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
    99                 }
    100                 else if( this->owner == thrd) {
    101                         // We already have the monitor, just note how many times we took it
    102                         this->recursion += 1;
    103 
    104                         __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
    105                 }
    106                 else if( is_accepted( this, group) ) {
    107                         // Some one was waiting for us, enter
    108                         set_owner( this, thrd );
    109 
    110                         // Reset mask
    111                         reset_mask( this );
    112 
    113                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
    114                 }
    115                 else {
    116                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    117 
    118                         // Some one else has the monitor, wait in line for it
    119                         append( this->entry_queue, thrd );
    120 
    121                         BlockInternal( &this->lock );
    122 
    123                         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    124 
    125                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    126                         return;
    127                 }
     82// Enter single monitor
     83static void __enter( $monitor * this, const __monitor_group_t & group ) {
     84        // Lock the monitor spinlock
     85        lock( this->lock __cfaabi_dbg_ctx2 );
     86        // Interrupts disable inside critical section
     87        $thread * thrd = kernelTLS.this_thread;
     88
     89        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     90
     91        if( !this->owner ) {
     92                // No one has the monitor, just take it
     93                __set_owner( this, thrd );
     94
     95                __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     96        }
     97        else if( this->owner == thrd) {
     98                // We already have the monitor, just note how many times we took it
     99                this->recursion += 1;
     100
     101                __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     102        }
     103        else if( is_accepted( this, group) ) {
     104                // Some one was waiting for us, enter
     105                __set_owner( this, thrd );
     106
     107                // Reset mask
     108                reset_mask( this );
     109
     110                __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
     111        }
     112        else {
     113                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     114
     115                // Some one else has the monitor, wait in line for it
     116                /* paranoid */ verify( thrd->next == 0p );
     117                append( this->entry_queue, thrd );
     118                /* paranoid */ verify( thrd->next == 1p );
     119
     120                unlock( this->lock );
     121                park();
    128122
    129123                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    130124
    131                 // Release the lock and leave
     125                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     126                return;
     127        }
     128
     129        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     130
     131        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     132        /* paranoid */ verify( this->lock.lock );
     133
     134        // Release the lock and leave
     135        unlock( this->lock );
     136        return;
     137}
     138
     139static void __dtor_enter( $monitor * this, fptr_t func ) {
     140        // Lock the monitor spinlock
     141        lock( this->lock __cfaabi_dbg_ctx2 );
     142        // Interrupts disable inside critical section
     143        $thread * thrd = kernelTLS.this_thread;
     144
     145        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     146
     147
     148        if( !this->owner ) {
     149                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     150
     151                // No one has the monitor, just take it
     152                __set_owner( this, thrd );
     153
     154                verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     155
    132156                unlock( this->lock );
    133157                return;
    134158        }
    135 
    136         static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
    137                 // Lock the monitor spinlock
    138                 lock( this->lock __cfaabi_dbg_ctx2 );
    139                 // Interrupts disable inside critical section
    140                 thread_desc * thrd = kernelTLS.this_thread;
    141 
    142                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
    143 
    144 
    145                 if( !this->owner ) {
    146                         __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
    147 
    148                         // No one has the monitor, just take it
    149                         set_owner( this, thrd );
    150 
    151                         unlock( this->lock );
    152                         return;
     159        else if( this->owner == thrd) {
     160                // We already have the monitor... but where about to destroy it so the nesting will fail
     161                // Abort!
     162                abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     163        }
     164
     165        __lock_size_t count = 1;
     166        $monitor ** monitors = &this;
     167        __monitor_group_t group = { &this, 1, func };
     168        if( is_accepted( this, group) ) {
     169                __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
     170
     171                // Wake the thread that is waiting for this
     172                __condition_criterion_t * urgent = pop( this->signal_stack );
     173                /* paranoid */ verify( urgent );
     174
     175                // Reset mask
     176                reset_mask( this );
     177
     178                // Create the node specific to this wait operation
     179                wait_ctx_primed( thrd, 0 )
     180
     181                // Some one else has the monitor, wait for him to finish and then run
     182                unlock( this->lock );
     183
     184                // Release the next thread
     185                /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     186                unpark( urgent->owner->waiting_thread );
     187
     188                // Park current thread waiting
     189                park();
     190
     191                // Some one was waiting for us, enter
     192                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     193        }
     194        else {
     195                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     196
     197                wait_ctx( thrd, 0 )
     198                this->dtor_node = &waiter;
     199
     200                // Some one else has the monitor, wait in line for it
     201                /* paranoid */ verify( thrd->next == 0p );
     202                append( this->entry_queue, thrd );
     203                /* paranoid */ verify( thrd->next == 1p );
     204                unlock( this->lock );
     205
     206                // Park current thread waiting
     207                park();
     208
     209                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     210                return;
     211        }
     212
     213        __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
     214
     215}
     216
     217// Leave single monitor
     218void __leave( $monitor * this ) {
     219        // Lock the monitor spinlock
     220        lock( this->lock __cfaabi_dbg_ctx2 );
     221
     222        __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
     223
     224        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     225
     226        // Leaving a recursion level, decrement the counter
     227        this->recursion -= 1;
     228
     229        // If we haven't left the last level of recursion
     230        // it means we don't need to do anything
     231        if( this->recursion != 0) {
     232                __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
     233                unlock( this->lock );
     234                return;
     235        }
     236
     237        // Get the next thread, will be null on low contention monitor
     238        $thread * new_owner = next_thread( this );
     239
     240        // Check the new owner is consistent with who we wake-up
     241        // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
     242        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     243
     244        // We can now let other threads in safely
     245        unlock( this->lock );
     246
     247        //We need to wake-up the thread
     248        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     249        unpark( new_owner );
     250}
     251
     252// Leave single monitor for the last time
     253void __dtor_leave( $monitor * this ) {
     254        __cfaabi_dbg_debug_do(
     255                if( TL_GET( this_thread ) != this->owner ) {
     256                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    153257                }
    154                 else if( this->owner == thrd) {
    155                         // We already have the monitor... but where about to destroy it so the nesting will fail
    156                         // Abort!
    157                         abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     258                if( this->recursion != 1 ) {
     259                        abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    158260                }
    159 
    160                 __lock_size_t count = 1;
    161                 monitor_desc ** monitors = &this;
    162                 __monitor_group_t group = { &this, 1, func };
    163                 if( is_accepted( this, group) ) {
    164                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
    165 
    166                         // Wake the thread that is waiting for this
    167                         __condition_criterion_t * urgent = pop( this->signal_stack );
    168                         verify( urgent );
    169 
    170                         // Reset mask
    171                         reset_mask( this );
    172 
    173                         // Create the node specific to this wait operation
    174                         wait_ctx_primed( thrd, 0 )
    175 
    176                         // Some one else has the monitor, wait for him to finish and then run
    177                         BlockInternal( &this->lock, urgent->owner->waiting_thread );
    178 
    179                         // Some one was waiting for us, enter
    180                         set_owner( this, thrd );
    181                 }
    182                 else {
    183                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    184 
    185                         wait_ctx( thrd, 0 )
    186                         this->dtor_node = &waiter;
    187 
    188                         // Some one else has the monitor, wait in line for it
    189                         append( this->entry_queue, thrd );
    190                         BlockInternal( &this->lock );
    191 
    192                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    193                         return;
    194                 }
    195 
    196                 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
    197 
    198         }
    199 
    200         // Leave single monitor
    201         void __leave_monitor_desc( monitor_desc * this ) {
    202                 // Lock the monitor spinlock
    203                 lock( this->lock __cfaabi_dbg_ctx2 );
    204 
    205                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    206 
    207                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    208 
    209                 // Leaving a recursion level, decrement the counter
    210                 this->recursion -= 1;
    211 
    212                 // If we haven't left the last level of recursion
    213                 // it means we don't need to do anything
    214                 if( this->recursion != 0) {
    215                         __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
    216                         unlock( this->lock );
    217                         return;
    218                 }
    219 
    220                 // Get the next thread, will be null on low contention monitor
    221                 thread_desc * new_owner = next_thread( this );
    222 
    223                 // We can now let other threads in safely
    224                 unlock( this->lock );
    225 
    226                 //We need to wake-up the thread
    227                 WakeThread( new_owner );
    228         }
    229 
    230         // Leave single monitor for the last time
    231         void __leave_dtor_monitor_desc( monitor_desc * this ) {
    232                 __cfaabi_dbg_debug_do(
    233                         if( TL_GET( this_thread ) != this->owner ) {
    234                                 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    235                         }
    236                         if( this->recursion != 1 ) {
    237                                 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    238                         }
    239                 )
    240         }
    241 
     261        )
     262}
     263
     264extern "C" {
    242265        // Leave the thread monitor
    243266        // last routine called by a thread.
    244267        // Should never return
    245         void __leave_thread_monitor() {
    246                 thread_desc * thrd = TL_GET( this_thread );
    247                 monitor_desc * this = &thrd->self_mon;
     268        void __cfactx_thrd_leave() {
     269                $thread * thrd = TL_GET( this_thread );
     270                $monitor * this = &thrd->self_mon;
    248271
    249272                // Lock the monitor now
     
    252275                disable_interrupts();
    253276
    254                 thrd->self_cor.state = Halted;
    255 
    256                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     277                thrd->state = Halted;
     278
     279                /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    257280
    258281                // Leaving a recursion level, decrement the counter
     
    264287
    265288                // Fetch the next thread, can be null
    266                 thread_desc * new_owner = next_thread( this );
    267 
    268                 // Leave the thread, this will unlock the spinlock
    269                 // Use leave thread instead of BlockInternal which is
    270                 // specialized for this case and supports null new_owner
    271                 LeaveThread( &this->lock, new_owner );
     289                $thread * new_owner = next_thread( this );
     290
     291                // Release the monitor lock
     292                unlock( this->lock );
     293
     294                // Unpark the next owner if needed
     295                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     296                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     297                /* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
     298                /* paranoid */ verify( thrd->state == Halted );
     299
     300                kernelTLS.this_processor->destroyer = new_owner;
     301
     302                // Leave the thread
     303                __leave_thread();
    272304
    273305                // Control flow should never reach here!
     
    279311static inline void enter( __monitor_group_t monitors ) {
    280312        for( __lock_size_t i = 0; i < monitors.size; i++) {
    281                 __enter_monitor_desc( monitors[i], monitors );
     313                __enter( monitors[i], monitors );
    282314        }
    283315}
     
    285317// Leave multiple monitor
    286318// relies on the monitor array being sorted
    287 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     319static inline void leave($monitor * monitors [], __lock_size_t count) {
    288320        for( __lock_size_t i = count - 1; i >= 0; i--) {
    289                 __leave_monitor_desc( monitors[i] );
     321                __leave( monitors[i] );
    290322        }
    291323}
     
    293325// Ctor for monitor guard
    294326// Sorts monitors before entering
    295 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    296         thread_desc * thrd = TL_GET( this_thread );
     327void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
     328        $thread * thrd = TL_GET( this_thread );
    297329
    298330        // Store current array
     
    334366// Ctor for monitor guard
    335367// Sorts monitors before entering
    336 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     368void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) {
    337369        // optimization
    338         thread_desc * thrd = TL_GET( this_thread );
     370        $thread * thrd = TL_GET( this_thread );
    339371
    340372        // Store current array
     
    347379        (thrd->monitors){m, 1, func};
    348380
    349         __enter_monitor_dtor( this.m, func );
     381        __dtor_enter( this.m, func );
    350382}
    351383
     
    353385void ^?{}( monitor_dtor_guard_t & this ) {
    354386        // Leave the monitors in order
    355         __leave_dtor_monitor_desc( this.m );
     387        __dtor_leave( this.m );
    356388
    357389        // Restore thread context
     
    361393//-----------------------------------------------------------------------------
    362394// Internal scheduling types
    363 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     395void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    364396        this.waiting_thread = waiting_thread;
    365397        this.count = count;
     
    375407}
    376408
    377 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
     409void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
    378410        this.ready  = false;
    379411        this.target = target;
     
    400432        // Append the current wait operation to the ones already queued on the condition
    401433        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
     434        /* paranoid */ verify( waiter.next == 0p );
    402435        append( this.blocked, &waiter );
     436        /* paranoid */ verify( waiter.next == 1p );
    403437
    404438        // Lock all monitors (aggregates the locks as well)
     
    407441        // Find the next thread(s) to run
    408442        __lock_size_t thread_count = 0;
    409         thread_desc * threads[ count ];
     443        $thread * threads[ count ];
    410444        __builtin_memset( threads, 0, sizeof( threads ) );
    411445
     
    415449        // Remove any duplicate threads
    416450        for( __lock_size_t i = 0; i < count; i++) {
    417                 thread_desc * new_owner = next_thread( monitors[i] );
     451                $thread * new_owner = next_thread( monitors[i] );
    418452                insert_unique( threads, thread_count, new_owner );
    419453        }
    420454
     455        // Unlock the locks, we don't need them anymore
     456        for(int i = 0; i < count; i++) {
     457                unlock( *locks[i] );
     458        }
     459
     460        // Wake the threads
     461        for(int i = 0; i < thread_count; i++) {
     462                unpark( threads[i] );
     463        }
     464
    421465        // Everything is ready to go to sleep
    422         BlockInternal( locks, count, threads, thread_count );
     466        park();
    423467
    424468        // We are back, restore the owners and recursions
     
    435479        //Some more checking in debug
    436480        __cfaabi_dbg_debug_do(
    437                 thread_desc * this_thrd = TL_GET( this_thread );
     481                $thread * this_thrd = TL_GET( this_thread );
    438482                if ( this.monitor_count != this_thrd->monitors.size ) {
    439483                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    489533
    490534        //Find the thread to run
    491         thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    492         set_owner( monitors, count, signallee );
     535        $thread * signallee = pop_head( this.blocked )->waiting_thread;
     536        /* paranoid */ verify( signallee->next == 0p );
     537        __set_owner( monitors, count, signallee );
    493538
    494539        __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    495540
     541        // unlock all the monitors
     542        unlock_all( locks, count );
     543
     544        // unpark the thread we signalled
     545        unpark( signallee );
     546
    496547        //Everything is ready to go to sleep
    497         BlockInternal( locks, count, &signallee, 1 );
     548        park();
    498549
    499550
     
    536587        // Create one!
    537588        __lock_size_t max = count_max( mask );
    538         monitor_desc * mon_storage[max];
     589        $monitor * mon_storage[max];
    539590        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    540591        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    554605        {
    555606                // Check if the entry queue
    556                 thread_desc * next; int index;
     607                $thread * next; int index;
    557608                [next, index] = search_entry_queue( mask, monitors, count );
    558609
     
    564615                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    565616
    566                                 monitor_desc * mon2dtor = accepted[0];
     617                                $monitor * mon2dtor = accepted[0];
    567618                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    568619
     
    590641
    591642                                // Set the owners to be the next thread
    592                                 set_owner( monitors, count, next );
    593 
    594                                 // Everything is ready to go to sleep
    595                                 BlockInternal( locks, count, &next, 1 );
     643                                __set_owner( monitors, count, next );
     644
     645                                // unlock all the monitors
     646                                unlock_all( locks, count );
     647
     648                                // unpark the thread we signalled
     649                                unpark( next );
     650
     651                                //Everything is ready to go to sleep
     652                                park();
    596653
    597654                                // We are back, restore the owners and recursions
     
    631688        }
    632689
     690        // unlock all the monitors
     691        unlock_all( locks, count );
     692
    633693        //Everything is ready to go to sleep
    634         BlockInternal( locks, count );
     694        park();
    635695
    636696
     
    649709// Utilities
    650710
    651 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
    652         // __cfaabi_dbg_print_safe( "Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     711static inline void __set_owner( $monitor * this, $thread * owner ) {
     712        /* paranoid */ verify( this->lock.lock );
    653713
    654714        //Pass the monitor appropriately
     
    659719}
    660720
    661 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    662         monitors[0]->owner     = owner;
    663         monitors[0]->recursion = 1;
     721static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
     722        /* paranoid */ verify ( monitors[0]->lock.lock );
     723        /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     724        monitors[0]->owner        = owner;
     725        monitors[0]->recursion    = 1;
    664726        for( __lock_size_t i = 1; i < count; i++ ) {
    665                 monitors[i]->owner     = owner;
    666                 monitors[i]->recursion = 0;
    667         }
    668 }
    669 
    670 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     727                /* paranoid */ verify ( monitors[i]->lock.lock );
     728                /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] );
     729                monitors[i]->owner        = owner;
     730                monitors[i]->recursion    = 0;
     731        }
     732}
     733
     734static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    671735        for( __lock_size_t i = 0; i < count; i++) {
    672736                storage[i]->mask = mask;
     
    674738}
    675739
    676 static inline void reset_mask( monitor_desc * this ) {
     740static inline void reset_mask( $monitor * this ) {
    677741        this->mask.accepted = 0p;
    678742        this->mask.data = 0p;
     
    680744}
    681745
    682 static inline thread_desc * next_thread( monitor_desc * this ) {
     746static inline $thread * next_thread( $monitor * this ) {
    683747        //Check the signaller stack
    684748        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    688752                //regardless of if we are ready to baton pass,
    689753                //we need to set the monitor as in use
    690                 set_owner( this,  urgent->owner->waiting_thread );
     754                /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     755                __set_owner( this,  urgent->owner->waiting_thread );
    691756
    692757                return check_condition( urgent );
     
    695760        // No signaller thread
    696761        // Get the next thread in the entry_queue
    697         thread_desc * new_owner = pop_head( this->entry_queue );
    698         set_owner( this, new_owner );
     762        $thread * new_owner = pop_head( this->entry_queue );
     763        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     764        /* paranoid */ verify( !new_owner || new_owner->next == 0p );
     765        __set_owner( this, new_owner );
    699766
    700767        return new_owner;
    701768}
    702769
    703 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     770static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
    704771        __acceptable_t * it = this->mask.data; // Optim
    705772        __lock_size_t count = this->mask.size;
     
    723790}
    724791
    725 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     792static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    726793        for( __lock_size_t i = 0; i < count; i++) {
    727794                (criteria[i]){ monitors[i], waiter };
     
    731798}
    732799
    733 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     800static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    734801        for( __lock_size_t i = 0; i < count; i++) {
    735802                (criteria[i]){ monitors[i], waiter };
     
    747814}
    748815
    749 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     816static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    750817        for( __lock_size_t i = 0; i < count; i++ ) {
    751818                __spinlock_t * l = &source[i]->lock;
     
    761828}
    762829
    763 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     830static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
    764831        for( __lock_size_t i = 0; i < count; i++ ) {
    765832                unlock( locks[i]->lock );
     
    768835
    769836static inline void save(
    770         monitor_desc * ctx [],
     837        $monitor * ctx [],
    771838        __lock_size_t count,
    772839        __attribute((unused)) __spinlock_t * locks [],
     
    781848
    782849static inline void restore(
    783         monitor_desc * ctx [],
     850        $monitor * ctx [],
    784851        __lock_size_t count,
    785852        __spinlock_t * locks [],
     
    799866// 2 - Checks if all the monitors are ready to run
    800867//     if so return the thread to run
    801 static inline thread_desc * check_condition( __condition_criterion_t * target ) {
     868static inline $thread * check_condition( __condition_criterion_t * target ) {
    802869        __condition_node_t * node = target->owner;
    803870        unsigned short count = node->count;
     
    822889
    823890static inline void brand_condition( condition & this ) {
    824         thread_desc * thrd = TL_GET( this_thread );
     891        $thread * thrd = TL_GET( this_thread );
    825892        if( !this.monitors ) {
    826893                // __cfaabi_dbg_print_safe( "Branding\n" );
     
    828895                this.monitor_count = thrd->monitors.size;
    829896
    830                 this.monitors = (monitor_desc **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     897                this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    831898                for( int i = 0; i < this.monitor_count; i++ ) {
    832899                        this.monitors[i] = thrd->monitors[i];
     
    835902}
    836903
    837 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
    838 
    839         __queue_t(thread_desc) & entry_queue = monitors[0]->entry_queue;
     904static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
     905
     906        __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
    840907
    841908        // For each thread in the entry-queue
    842         for(    thread_desc ** thrd_it = &entry_queue.head;
    843                 *thrd_it;
     909        for(    $thread ** thrd_it = &entry_queue.head;
     910                *thrd_it != 1p;
    844911                thrd_it = &(*thrd_it)->next
    845912        ) {
     
    884951}
    885952
    886 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     953static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
    887954        __lock_size_t size = 0;
    888955        for( __lock_size_t i = 0; i < mask.size; i++ ) {
Note: See TracChangeset for help on using the changeset viewer.