Ignore:
Timestamp:
Nov 6, 2017, 11:11:56 AM (6 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
a2ea829
Parents:
e706bfd (diff), 121ac13 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/monitor.c

    re706bfd rbbeb908  
    2626// Forward declarations
    2727static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    28 static inline void set_owner ( monitor_desc ** storage, short count, thread_desc * owner );
    29 static inline void set_mask  ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     28static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
     29static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    3030static inline void reset_mask( monitor_desc * this );
    3131
     
    3333static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
    3434
    35 static inline void lock_all( spinlock ** locks, unsigned short count );
    36 static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
    37 static inline void unlock_all( spinlock ** locks, unsigned short count );
    38 static inline void unlock_all( monitor_desc ** locks, unsigned short count );
    39 
    40 static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
    41 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
    42 
    43 static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    44 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
     35static inline void lock_all  ( spinlock * locks [], __lock_size_t count );
     36static inline void lock_all  ( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count );
     37static inline void unlock_all( spinlock * locks [], __lock_size_t count );
     38static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
     39
     40static inline void save   ( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     41static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     42
     43static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     44static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    4545
    4646static inline thread_desc *        check_condition   ( __condition_criterion_t * );
    47 static inline void                 brand_condition   ( condition * );
    48 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
     47static inline void                 brand_condition   ( condition & );
     48static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
    4949
    5050forall(dtype T | sized( T ))
    51 static inline short insert_unique( T ** array, short & size, T * val );
    52 static inline short count_max    ( const __waitfor_mask_t & mask );
    53 static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
     51static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
     52static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
     53static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
    5454
    5555//-----------------------------------------------------------------------------
     
    5858        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
    5959        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
    60         init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
     60        init( count, monitors, waiter, criteria );                /* Link everything together                                                            */ \
    6161
    6262#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
    6363        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
    6464        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
    65         init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
     65        init_push( count, monitors, waiter, criteria );           /* Link everything together and push it to the AS-Stack                                */ \
    6666
    6767#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    6868        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    69         unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
     69        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7070        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
    71         __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
     71        __waitfor_mask_t masks [ count ];                         /* Save the current waitfor masks to restore them later                                */ \
    7272        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
    7373
     
    114114
    115115                        // Some one else has the monitor, wait in line for it
    116                         append( &this->entry_queue, thrd );
     116                        append( this->entry_queue, thrd );
    117117                        BlockInternal( &this->lock );
    118118
     
    153153                }
    154154
    155                 int count = 1;
     155                __lock_size_t count = 1;
    156156                monitor_desc ** monitors = &this;
    157157                __monitor_group_t group = { &this, 1, func };
     
    160160
    161161                        // Wake the thread that is waiting for this
    162                         __condition_criterion_t * urgent = pop( &this->signal_stack );
     162                        __condition_criterion_t * urgent = pop( this->signal_stack );
    163163                        verify( urgent );
    164164
     
    182182
    183183                        // Some one else has the monitor, wait in line for it
    184                         append( &this->entry_queue, thrd );
     184                        append( this->entry_queue, thrd );
    185185                        BlockInternal( &this->lock );
    186186
     
    272272// relies on the monitor array being sorted
    273273static inline void enter( __monitor_group_t monitors ) {
    274         for(int i = 0; i < monitors.size; i++) {
     274        for( __lock_size_t i = 0; i < monitors.size; i++) {
    275275                __enter_monitor_desc( monitors.list[i], monitors );
    276276        }
     
    279279// Leave multiple monitor
    280280// relies on the monitor array being sorted
    281 static inline void leave(monitor_desc ** monitors, int count) {
    282         for(int i = count - 1; i >= 0; i--) {
     281static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     282        for( __lock_size_t i = count - 1; i >= 0; i--) {
    283283                __leave_monitor_desc( monitors[i] );
    284284        }
     
    287287// Ctor for monitor guard
    288288// Sorts monitors before entering
    289 void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, fptr_t func ) {
     289void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    290290        // Store current array
    291291        this.m = m;
     
    296296
    297297        // Save previous thread context
    298         this.prev_mntrs = this_thread->monitors.list;
    299         this.prev_count = this_thread->monitors.size;
    300         this.prev_func  = this_thread->monitors.func;
     298        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
    301299
    302300        // Update thread context (needed for conditions)
    303         this_thread->monitors.list = m;
    304         this_thread->monitors.size = count;
    305         this_thread->monitors.func = func;
     301        this_thread->monitors.[list, size, func] = [m, count, func];
    306302
    307303        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
     
    325321
    326322        // Restore thread context
    327         this_thread->monitors.list = this.prev_mntrs;
    328         this_thread->monitors.size = this.prev_count;
    329         this_thread->monitors.func = this.prev_func;
    330 }
    331 
     323        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
     324}
    332325
    333326// Ctor for monitor guard
    334327// Sorts monitors before entering
    335 void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, fptr_t func ) {
     328void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
    336329        // Store current array
    337330        this.m = *m;
    338331
    339332        // Save previous thread context
    340         this.prev_mntrs = this_thread->monitors.list;
    341         this.prev_count = this_thread->monitors.size;
    342         this.prev_func  = this_thread->monitors.func;
     333        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
    343334
    344335        // Update thread context (needed for conditions)
    345         this_thread->monitors.list = m;
    346         this_thread->monitors.size = 1;
    347         this_thread->monitors.func = func;
     336        this_thread->monitors.[list, size, func] = [m, 1, func];
    348337
    349338        __enter_monitor_dtor( this.m, func );
    350339}
    351 
    352340
    353341// Dtor for monitor guard
     
    357345
    358346        // Restore thread context
    359         this_thread->monitors.list = this.prev_mntrs;
    360         this_thread->monitors.size = this.prev_count;
    361         this_thread->monitors.func = this.prev_func;
     347        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
    362348}
    363349
    364350//-----------------------------------------------------------------------------
    365351// Internal scheduling types
    366 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
     352void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    367353        this.waiting_thread = waiting_thread;
    368354        this.count = count;
     
    378364}
    379365
    380 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ) {
     366void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
    381367        this.ready  = false;
    382368        this.target = target;
    383         this.owner  = owner;
     369        this.owner  = &owner;
    384370        this.next   = NULL;
    385371}
     
    387373//-----------------------------------------------------------------------------
    388374// Internal scheduling
    389 void wait( condition * this, uintptr_t user_info = 0 ) {
     375void wait( condition & this, uintptr_t user_info = 0 ) {
    390376        brand_condition( this );
    391377
    392378        // Check that everything is as expected
    393         assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    394         verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    395         verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
     379        assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     380        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%i)", this.monitor_count );
     381        verifyf( this.monitor_count < 32u, "Excessive monitor count (%i)", this.monitor_count );
    396382
    397383        // Create storage for monitor context
    398         monitor_ctx( this->monitors, this->monitor_count );
     384        monitor_ctx( this.monitors, this.monitor_count );
    399385
    400386        // Create the node specific to this wait operation
     
    403389        // Append the current wait operation to the ones already queued on the condition
    404390        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
    405         append( &this->blocked, &waiter );
     391        append( this.blocked, &waiter );
    406392
    407393        // Lock all monitors (aggregates the locks as well)
     
    409395
    410396        // Find the next thread(s) to run
    411         short thread_count = 0;
     397        __lock_size_t thread_count = 0;
    412398        thread_desc * threads[ count ];
    413399        __builtin_memset( threads, 0, sizeof( threads ) );
     
    417403
    418404        // Remove any duplicate threads
    419         for( int i = 0; i < count; i++) {
     405        for( __lock_size_t i = 0; i < count; i++) {
    420406                thread_desc * new_owner = next_thread( monitors[i] );
    421407                insert_unique( threads, thread_count, new_owner );
     
    429415}
    430416
    431 bool signal( condition * this ) {
     417bool signal( condition & this ) {
    432418        if( is_empty( this ) ) { return false; }
    433419
    434420        //Check that everything is as expected
    435         verify( this->monitors );
    436         verify( this->monitor_count != 0 );
     421        verify( this.monitors );
     422        verify( this.monitor_count != 0 );
    437423
    438424        //Some more checking in debug
    439425        LIB_DEBUG_DO(
    440426                thread_desc * this_thrd = this_thread;
    441                 if ( this->monitor_count != this_thrd->monitors.size ) {
    442                         abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
    443                 }
    444 
    445                 for(int i = 0; i < this->monitor_count; i++) {
    446                         if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
    447                                 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
     427                if ( this.monitor_count != this_thrd->monitors.size ) {
     428                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", &this, this.monitor_count, this_thrd->monitors.size );
     429                }
     430
     431                for(int i = 0; i < this.monitor_count; i++) {
     432                        if ( this.monitors[i] != this_thrd->monitors.list[i] ) {
     433                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors.list[i] );
    448434                        }
    449435                }
    450436        );
    451437
    452         unsigned short count = this->monitor_count;
     438        __lock_size_t count = this.monitor_count;
    453439
    454440        // Lock all monitors
    455         lock_all( this->monitors, NULL, count );
     441        lock_all( this.monitors, NULL, count );
    456442
    457443        //Pop the head of the waiting queue
    458         __condition_node_t * node = pop_head( &this->blocked );
     444        __condition_node_t * node = pop_head( this.blocked );
    459445
    460446        //Add the thread to the proper AS stack
     
    462448                __condition_criterion_t * crit = &node->criteria[i];
    463449                assert( !crit->ready );
    464                 push( &crit->target->signal_stack, crit );
     450                push( crit->target->signal_stack, crit );
    465451        }
    466452
    467453        //Release
    468         unlock_all( this->monitors, count );
     454        unlock_all( this.monitors, count );
    469455
    470456        return true;
    471457}
    472458
    473 bool signal_block( condition * this ) {
    474         if( !this->blocked.head ) { return false; }
     459bool signal_block( condition & this ) {
     460        if( !this.blocked.head ) { return false; }
    475461
    476462        //Check that everything is as expected
    477         verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    478         verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
     463        verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     464        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%i)", this.monitor_count );
    479465
    480466        // Create storage for monitor context
    481         monitor_ctx( this->monitors, this->monitor_count );
     467        monitor_ctx( this.monitors, this.monitor_count );
    482468
    483469        // Lock all monitors (aggregates the locks them as well)
     
    491477
    492478        //Find the thread to run
    493         thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
     479        thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    494480        set_owner( monitors, count, signallee );
    495481
    496         LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", this, signallee );
     482        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    497483
    498484        //Everything is ready to go to sleep
     
    512498
    513499// Access the user_info of the thread waiting at the front of the queue
    514 uintptr_t front( condition * this ) {
     500uintptr_t front( condition & this ) {
    515501        verifyf( !is_empty(this),
    516502                "Attempt to access user data on an empty condition.\n"
    517503                "Possible cause is not checking if the condition is empty before reading stored data."
    518504        );
    519         return this->blocked.head->user_info;
     505        return this.blocked.head->user_info;
    520506}
    521507
     
    537523        // This statment doesn't have a contiguous list of monitors...
    538524        // Create one!
    539         short max = count_max( mask );
     525        __lock_size_t max = count_max( mask );
    540526        monitor_desc * mon_storage[max];
    541527        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    542         short actual_count = aggregate( mon_storage, mask );
    543 
    544         LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (short)max);
     528        __lock_size_t actual_count = aggregate( mon_storage, mask );
     529
     530        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max);
    545531
    546532        if(actual_count == 0) return;
     
    569555
    570556                                __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
    571                                 push( &mon2dtor->signal_stack, dtor_crit );
     557                                push( mon2dtor->signal_stack, dtor_crit );
    572558
    573559                                unlock_all( locks, count );
     
    629615        set_mask( monitors, count, mask );
    630616
    631         for(int i = 0; i < count; i++) {
     617        for( __lock_size_t i = 0; i < count; i++) {
    632618                verify( monitors[i]->owner == this_thread );
    633619        }
     
    661647}
    662648
    663 static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
     649static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    664650        monitors[0]->owner     = owner;
    665651        monitors[0]->recursion = 1;
    666         for( int i = 1; i < count; i++ ) {
     652        for( __lock_size_t i = 1; i < count; i++ ) {
    667653                monitors[i]->owner     = owner;
    668654                monitors[i]->recursion = 0;
     
    670656}
    671657
    672 static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
    673         for(int i = 0; i < count; i++) {
     658static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     659        for( __lock_size_t i = 0; i < count; i++) {
    674660                storage[i]->mask = mask;
    675661        }
     
    685671        //Check the signaller stack
    686672        LIB_DEBUG_PRINT_SAFE("Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
    687         __condition_criterion_t * urgent = pop( &this->signal_stack );
     673        __condition_criterion_t * urgent = pop( this->signal_stack );
    688674        if( urgent ) {
    689675                //The signaller stack is not empty,
     
    697683        // No signaller thread
    698684        // Get the next thread in the entry_queue
    699         thread_desc * new_owner = pop_head( &this->entry_queue );
     685        thread_desc * new_owner = pop_head( this->entry_queue );
    700686        set_owner( this, new_owner );
    701687
     
    705691static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
    706692        __acceptable_t * it = this->mask.clauses; // Optim
    707         int count = this->mask.size;
     693        __lock_size_t count = this->mask.size;
    708694
    709695        // Check if there are any acceptable functions
     
    714700
    715701        // For all acceptable functions check if this is the current function.
    716         for( short i = 0; i < count; i++, it++ ) {
     702        for( __lock_size_t i = 0; i < count; i++, it++ ) {
    717703                if( *it == group ) {
    718704                        *this->mask.accepted = i;
     
    725711}
    726712
    727 static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
    728         for(int i = 0; i < count; i++) {
     713static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     714        for( __lock_size_t i = 0; i < count; i++) {
    729715                (criteria[i]){ monitors[i], waiter };
    730716        }
    731717
    732         waiter->criteria = criteria;
    733 }
    734 
    735 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
    736         for(int i = 0; i < count; i++) {
     718        waiter.criteria = criteria;
     719}
     720
     721static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     722        for( __lock_size_t i = 0; i < count; i++) {
    737723                (criteria[i]){ monitors[i], waiter };
    738724                LIB_DEBUG_PRINT_SAFE( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
    739                 push( &criteria[i].target->signal_stack, &criteria[i] );
    740         }
    741 
    742         waiter->criteria = criteria;
    743 }
    744 
    745 static inline void lock_all( spinlock ** locks, unsigned short count ) {
    746         for( int i = 0; i < count; i++ ) {
     725                push( criteria[i].target->signal_stack, &criteria[i] );
     726        }
     727
     728        waiter.criteria = criteria;
     729}
     730
     731static inline void lock_all( spinlock * locks [], __lock_size_t count ) {
     732        for( __lock_size_t i = 0; i < count; i++ ) {
    747733                lock_yield( locks[i] DEBUG_CTX2 );
    748734        }
    749735}
    750736
    751 static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
    752         for( int i = 0; i < count; i++ ) {
     737static inline void lock_all( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count ) {
     738        for( __lock_size_t i = 0; i < count; i++ ) {
    753739                spinlock * l = &source[i]->lock;
    754740                lock_yield( l DEBUG_CTX2 );
     
    757743}
    758744
    759 static inline void unlock_all( spinlock ** locks, unsigned short count ) {
    760         for( int i = 0; i < count; i++ ) {
     745static inline void unlock_all( spinlock * locks [], __lock_size_t count ) {
     746        for( __lock_size_t i = 0; i < count; i++ ) {
    761747                unlock( locks[i] );
    762748        }
    763749}
    764750
    765 static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
    766         for( int i = 0; i < count; i++ ) {
     751static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     752        for( __lock_size_t i = 0; i < count; i++ ) {
    767753                unlock( &locks[i]->lock );
    768754        }
    769755}
    770756
    771 static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
    772         for( int i = 0; i < count; i++ ) {
     757static inline void save(
     758        monitor_desc * ctx [],
     759        __lock_size_t count,
     760        __attribute((unused)) spinlock * locks [],
     761        unsigned int /*out*/ recursions [],
     762        __waitfor_mask_t /*out*/ masks []
     763) {
     764        for( __lock_size_t i = 0; i < count; i++ ) {
    773765                recursions[i] = ctx[i]->recursion;
    774766                masks[i]      = ctx[i]->mask;
     
    776768}
    777769
    778 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
     770static inline void restore(
     771        monitor_desc * ctx [],
     772        __lock_size_t count,
     773        spinlock * locks [],
     774        unsigned int /*out*/ recursions [],
     775        __waitfor_mask_t /*out*/ masks []
     776) {
    779777        lock_all( locks, count );
    780         for( int i = 0; i < count; i++ ) {
     778        for( __lock_size_t i = 0; i < count; i++ ) {
    781779                ctx[i]->recursion = recursions[i];
    782780                ctx[i]->mask      = masks[i];
     
    811809}
    812810
    813 static inline void brand_condition( condition * this ) {
     811static inline void brand_condition( condition & this ) {
    814812        thread_desc * thrd = this_thread;
    815         if( !this->monitors ) {
     813        if( !this.monitors ) {
    816814                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    817815                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
    818                 this->monitor_count = thrd->monitors.size;
    819 
    820                 this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
    821                 for( int i = 0; i < this->monitor_count; i++ ) {
    822                         this->monitors[i] = thrd->monitors.list[i];
    823                 }
    824         }
    825 }
    826 
    827 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
    828 
    829         __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
     816                this.monitor_count = thrd->monitors.size;
     817
     818                this.monitors = malloc( this.monitor_count * sizeof( *this.monitors ) );
     819                for( int i = 0; i < this.monitor_count; i++ ) {
     820                        this.monitors[i] = thrd->monitors.list[i];
     821                }
     822        }
     823}
     824
     825static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
     826
     827        __thread_queue_t & entry_queue = monitors[0]->entry_queue;
    830828
    831829        // For each thread in the entry-queue
    832         for(    thread_desc ** thrd_it = &entry_queue->head;
     830        for(    thread_desc ** thrd_it = &entry_queue.head;
    833831                *thrd_it;
    834832                thrd_it = &(*thrd_it)->next
     
    852850
    853851forall(dtype T | sized( T ))
    854 static inline short insert_unique( T ** array, short & size, T * val ) {
     852static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) {
    855853        if( !val ) return size;
    856854
    857         for(int i = 0; i <= size; i++) {
     855        for( __lock_size_t i = 0; i <= size; i++) {
    858856                if( array[i] == val ) return size;
    859857        }
     
    864862}
    865863
    866 static inline short count_max( const __waitfor_mask_t & mask ) {
    867         short max = 0;
    868         for( int i = 0; i < mask.size; i++ ) {
     864static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) {
     865        __lock_size_t max = 0;
     866        for( __lock_size_t i = 0; i < mask.size; i++ ) {
    869867                max += mask.clauses[i].size;
    870868        }
     
    872870}
    873871
    874 static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
    875         short size = 0;
    876         for( int i = 0; i < mask.size; i++ ) {
     872static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     873        __lock_size_t size = 0;
     874        for( __lock_size_t i = 0; i < mask.size; i++ ) {
    877875                __libcfa_small_sort( mask.clauses[i].list, mask.clauses[i].size );
    878                 for( int j = 0; j < mask.clauses[i].size; j++) {
     876                for( __lock_size_t j = 0; j < mask.clauses[i].size; j++) {
    879877                        insert_unique( storage, size, mask.clauses[i].list[j] );
    880878                }
     
    890888}
    891889
    892 void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
    893         verify(this->tail != NULL);
    894         *this->tail = c;
    895         this->tail = &c->next;
    896 }
    897 
    898 __condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
    899         __condition_node_t * head = this->head;
     890void append( __condition_blocked_queue_t & this, __condition_node_t * c ) {
     891        verify(this.tail != NULL);
     892        *this.tail = c;
     893        this.tail = &c->next;
     894}
     895
     896__condition_node_t * pop_head( __condition_blocked_queue_t & this ) {
     897        __condition_node_t * head = this.head;
    900898        if( head ) {
    901                 this->head = head->next;
     899                this.head = head->next;
    902900                if( !head->next ) {
    903                         this->tail = &this->head;
     901                        this.tail = &this.head;
    904902                }
    905903                head->next = NULL;
Note: See TracChangeset for help on using the changeset viewer.