Ignore:
Timestamp:
Oct 19, 2017, 12:01:04 PM (8 years ago)
Author:
Rob Schluntz <rschlunt@…>
Branches:
ADT, arm-eh, ast-experimental, cleanup-dtors, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
837ce06
Parents:
b96ec83 (diff), a15b72c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into cleanup-dtors

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/monitor.c

    rb96ec83 r6840e7c  
    2323//-----------------------------------------------------------------------------
    2424// Forward declarations
    25 static inline void set_owner( monitor_desc * this, thread_desc * owner );
    26 static inline void set_owner( monitor_desc ** storage, short count, thread_desc * owner );
    27 static inline void set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     25static inline void set_owner ( monitor_desc * this, thread_desc * owner );
     26static inline void set_owner ( monitor_desc ** storage, short count, thread_desc * owner );
     27static inline void set_mask  ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     28static inline void reset_mask( monitor_desc * this );
    2829
    2930static inline thread_desc * next_thread( monitor_desc * this );
     
    7273#define monitor_restore restore( monitors, count, locks, recursions, masks )
    7374
    74 #define blockAndWake( thrd, cnt )                               /* Create the necessary information to use the signaller stack                         */ \
    75         monitor_save;                                             /* Save monitor states                                                                 */ \
    76         BlockInternal( locks, count, thrd, cnt );                 /* Everything is ready to go to sleep                                                  */ \
    77         monitor_restore;                                          /* We are back, restore the owners and recursions                                      */ \
    78 
    7975
    8076//-----------------------------------------------------------------------------
     
    9894                }
    9995                else if( this->owner == thrd) {
    100                         // We already have the monitor, just not how many times we took it
     96                        // We already have the monitor, just note how many times we took it
    10197                        verify( this->recursion > 0 );
    10298                        this->recursion += 1;
     
    108104                        set_owner( this, thrd );
    109105
     106                        // Reset mask
     107                        reset_mask( this );
     108
    110109                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts \n");
    111110                }
     
    128127                unlock( &this->lock );
    129128                return;
     129        }
     130
     131        static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
     132                // Lock the monitor spinlock, lock_yield to reduce contention
     133                lock_yield( &this->lock DEBUG_CTX2 );
     134                thread_desc * thrd = this_thread;
     135
     136                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     137
     138
     139                if( !this->owner ) {
     140                        LIB_DEBUG_PRINT_SAFE("Kernel : Destroying free mon %p\n", this);
     141
     142                        // No one has the monitor, just take it
     143                        set_owner( this, thrd );
     144
     145                        unlock( &this->lock );
     146                        return;
     147                }
     148                else if( this->owner == thrd) {
     149                        // We already have the monitor... but where about to destroy it so the nesting will fail
     150                        // Abort!
     151                        abortf("Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.");
     152                }
     153
     154                int count = 1;
     155                monitor_desc ** monitors = &this;
     156                __monitor_group_t group = { &this, 1, func };
     157                if( is_accepted( this, group) ) {
     158                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts dtor, block and signal it \n");
     159
     160                        // Wake the thread that is waiting for this
     161                        __condition_criterion_t * urgent = pop( &this->signal_stack );
     162                        verify( urgent );
     163
     164                        // Reset mask
     165                        reset_mask( this );
     166
     167                        // Create the node specific to this wait operation
     168                        wait_ctx_primed( this_thread, 0 )
     169
     170                        // Some one else has the monitor, wait for him to finish and then run
     171                        BlockInternal( &this->lock, urgent->owner->waiting_thread );
     172
     173                        // Some one was waiting for us, enter
     174                        set_owner( this, thrd );
     175                }
     176                else {
     177                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
     178
     179                        wait_ctx( this_thread, 0 )
     180                        this->dtor_node = &waiter;
     181
     182                        // Some one else has the monitor, wait in line for it
     183                        append( &this->entry_queue, thrd );
     184                        BlockInternal( &this->lock );
     185
     186                        // BlockInternal will unlock spinlock, no need to unlock ourselves
     187                        return;
     188                }
     189
     190                LIB_DEBUG_PRINT_SAFE("Kernel : Destroying %p\n", this);
     191
    130192        }
    131193
     
    159221        }
    160222
     223        // Leave single monitor for the last time
     224        void __leave_dtor_monitor_desc( monitor_desc * this ) {
     225                LIB_DEBUG_DO(
     226                        if( this_thread != this->owner ) {
     227                                abortf("Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner);
     228                        }
     229                        if( this->recursion != 1 ) {
     230                                abortf("Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
     231                        }
     232                )
     233        }
     234
    161235        // Leave the thread monitor
    162236        // last routine called by a thread.
     
    211285// Ctor for monitor guard
    212286// Sorts monitors before entering
    213 void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() ) {
     287void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, fptr_t func ) {
    214288        // Store current array
    215289        this.m = m;
     
    229303        this_thread->monitors.func = func;
    230304
    231         LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
     305        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
    232306
    233307        // Enter the monitors in order
     
    235309        enter( group );
    236310
    237         LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");
     311        // LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");
    238312}
    239313
     
    241315// Dtor for monitor guard
    242316void ^?{}( monitor_guard_t & this ) {
    243         LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);
     317        // LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);
    244318
    245319        // Leave the monitors in order
    246320        leave( this.m, this.count );
    247321
    248         LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
     322        // LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
     323
     324        // Restore thread context
     325        this_thread->monitors.list = this.prev_mntrs;
     326        this_thread->monitors.size = this.prev_count;
     327        this_thread->monitors.func = this.prev_func;
     328}
     329
     330
     331// Ctor for monitor guard
     332// Sorts monitors before entering
     333void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, fptr_t func ) {
     334        // Store current array
     335        this.m = *m;
     336
     337        // Save previous thread context
     338        this.prev_mntrs = this_thread->monitors.list;
     339        this.prev_count = this_thread->monitors.size;
     340        this.prev_func  = this_thread->monitors.func;
     341
     342        // Update thread context (needed for conditions)
     343        this_thread->monitors.list = m;
     344        this_thread->monitors.size = 1;
     345        this_thread->monitors.func = func;
     346
     347        __enter_monitor_dtor( this.m, func );
     348}
     349
     350
     351// Dtor for monitor guard
     352void ^?{}( monitor_dtor_guard_t & this ) {
     353        // Leave the monitors in order
     354        __leave_dtor_monitor_desc( this.m );
    249355
    250356        // Restore thread context
     
    303409        short thread_count = 0;
    304410        thread_desc * threads[ count ];
    305         for(int i = 0; i < count; i++) {
    306                 threads[i] = 0;
    307         }
     411        __builtin_memset( threads, 0, sizeof( threads ) );
    308412
    309413        // Save monitor states
     
    429533        short max = count_max( mask );
    430534        monitor_desc * mon_storage[max];
     535        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    431536        short actual_count = aggregate( mon_storage, mask );
    432537
     538        LIB_DEBUG_PRINT_SAFE("Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (short)max);
     539
    433540        if(actual_count == 0) return;
     541
     542        LIB_DEBUG_PRINT_SAFE("Kernel : waitfor internal proceeding\n");
    434543
    435544        // Create storage for monitor context
     
    445554
    446555                if( next ) {
     556                        *mask.accepted = index;
    447557                        if( mask.clauses[index].is_dtor ) {
    448                                 #warning case not implemented
     558                                LIB_DEBUG_PRINT_SAFE("Kernel : dtor already there\n");
     559                                verifyf( mask.clauses[index].size == 1        , "ERROR: Accepted dtor has more than 1 mutex parameter." );
     560
     561                                monitor_desc * mon2dtor = mask.clauses[index].list[0];
     562                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
     563
     564                                __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
     565                                push( &mon2dtor->signal_stack, dtor_crit );
     566
     567                                unlock_all( locks, count );
    449568                        }
    450569                        else {
    451                                 blockAndWake( &next, 1 );
     570                                LIB_DEBUG_PRINT_SAFE("Kernel : thread present, baton-passing\n");
     571
     572                                // Create the node specific to this wait operation
     573                                wait_ctx_primed( this_thread, 0 );
     574
     575                                // Save monitor states
     576                                monitor_save;
     577
     578                                // Set the owners to be the next thread
     579                                set_owner( monitors, count, next );
     580
     581                                // Everything is ready to go to sleep
     582                                BlockInternal( locks, count, &next, 1 );
     583
     584                                // We are back, restore the owners and recursions
     585                                monitor_restore;
     586
     587                                LIB_DEBUG_PRINT_SAFE("Kernel : thread present, returned\n");
    452588                        }
    453589
    454                         return index;
     590                        LIB_DEBUG_PRINT_SAFE("Kernel : accepted %d\n", *mask.accepted);
     591
     592                        return;
    455593                }
    456594        }
     
    458596
    459597        if( duration == 0 ) {
     598                LIB_DEBUG_PRINT_SAFE("Kernel : non-blocking, exiting\n");
     599
    460600                unlock_all( locks, count );
     601
     602                LIB_DEBUG_PRINT_SAFE("Kernel : accepted %d\n", *mask.accepted);
    461603                return;
    462604        }
     
    465607        verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
    466608
     609        LIB_DEBUG_PRINT_SAFE("Kernel : blocking waitfor\n");
     610
     611        // Create the node specific to this wait operation
     612        wait_ctx_primed( this_thread, 0 );
    467613
    468614        monitor_save;
    469615        set_mask( monitors, count, mask );
    470616
    471         BlockInternal( locks, count );       // Everything is ready to go to sleep
    472         //WE WOKE UP
    473         monitor_restore;                     //We are back, restore the masks and recursions
     617        for(int i = 0; i < count; i++) {
     618                verify( monitors[i]->owner == this_thread );
     619        }
     620
     621        //Everything is ready to go to sleep
     622        BlockInternal( locks, count );
     623
     624
     625        // WE WOKE UP
     626
     627
     628        //We are back, restore the masks and recursions
     629        monitor_restore;
     630
     631        LIB_DEBUG_PRINT_SAFE("Kernel : exiting\n");
     632
     633        LIB_DEBUG_PRINT_SAFE("Kernel : accepted %d\n", *mask.accepted);
    474634}
    475635
     
    478638
    479639static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
    480         LIB_DEBUG_PRINT_SAFE("Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     640        // LIB_DEBUG_PRINT_SAFE("Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
    481641
    482642        //Pass the monitor appropriately
     
    497657                storage[i]->mask = mask;
    498658        }
     659}
     660
     661static inline void reset_mask( monitor_desc * this ) {
     662        this->mask.accepted = NULL;
     663        this->mask.clauses = NULL;
     664        this->mask.size = 0;
    499665}
    500666
     
    584750}
    585751
    586 static inline void save   ( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
     752static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
    587753        for( int i = 0; i < count; i++ ) {
    588754                recursions[i] = ctx[i]->recursion;
Note: See TracChangeset for help on using the changeset viewer.