Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/monitor.c

    rde737c8 r2f6a7e93  
    1717
    1818#include <stdlib>
     19#include <inttypes.h>
    1920
    2021#include "libhdr.h"
     
    2627// Forward declarations
    2728static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    28 static inline void set_owner ( monitor_desc ** storage, short count, thread_desc * owner );
    29 static inline void set_mask  ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     29static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
     30static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    3031static inline void reset_mask( monitor_desc * this );
    3132
     
    3334static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
    3435
    35 static inline void lock_all( spinlock ** locks, unsigned short count );
    36 static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
    37 static inline void unlock_all( spinlock ** locks, unsigned short count );
    38 static inline void unlock_all( monitor_desc ** locks, unsigned short count );
    39 
    40 static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
    41 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
    42 
    43 static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    44 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
     36static inline void lock_all  ( spinlock * locks [], __lock_size_t count );
     37static inline void lock_all  ( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count );
     38static inline void unlock_all( spinlock * locks [], __lock_size_t count );
     39static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
     40
     41static inline void save   ( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     42static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     43
     44static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     45static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    4546
    4647static inline thread_desc *        check_condition   ( __condition_criterion_t * );
    47 static inline void                 brand_condition   ( condition * );
    48 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
     48static inline void                 brand_condition   ( condition & );
     49static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
    4950
    5051forall(dtype T | sized( T ))
    51 static inline short insert_unique( T ** array, short & size, T * val );
    52 static inline short count_max    ( const __waitfor_mask_t & mask );
    53 static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
     52static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
     53static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
     54static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
    5455
    5556//-----------------------------------------------------------------------------
     
    5859        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
    5960        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
    60         init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
     61        init( count, monitors, waiter, criteria );                /* Link everything together                                                            */ \
    6162
    6263#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
    6364        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
    6465        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
    65         init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
     66        init_push( count, monitors, waiter, criteria );           /* Link everything together and push it to the AS-Stack                                */ \
    6667
    6768#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    6869        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    69         unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
     70        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7071        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
    71         __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
     72        __waitfor_mask_t masks [ count ];                         /* Save the current waitfor masks to restore them later                                */ \
    7273        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
    7374
     
    114115
    115116                        // Some one else has the monitor, wait in line for it
    116                         append( &this->entry_queue, thrd );
     117                        append( this->entry_queue, thrd );
    117118                        BlockInternal( &this->lock );
    118119
     
    153154                }
    154155
    155                 int count = 1;
     156                __lock_size_t count = 1;
    156157                monitor_desc ** monitors = &this;
    157158                __monitor_group_t group = { &this, 1, func };
     
    160161
    161162                        // Wake the thread that is waiting for this
    162                         __condition_criterion_t * urgent = pop( &this->signal_stack );
     163                        __condition_criterion_t * urgent = pop( this->signal_stack );
    163164                        verify( urgent );
    164165
     
    182183
    183184                        // Some one else has the monitor, wait in line for it
    184                         append( &this->entry_queue, thrd );
     185                        append( this->entry_queue, thrd );
    185186                        BlockInternal( &this->lock );
    186187
     
    272273// relies on the monitor array being sorted
    273274static inline void enter( __monitor_group_t monitors ) {
    274         for(int i = 0; i < monitors.size; i++) {
     275        for( __lock_size_t i = 0; i < monitors.size; i++) {
    275276                __enter_monitor_desc( monitors.list[i], monitors );
    276277        }
     
    279280// Leave multiple monitor
    280281// relies on the monitor array being sorted
    281 static inline void leave(monitor_desc ** monitors, int count) {
    282         for(int i = count - 1; i >= 0; i--) {
     282static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     283        for( __lock_size_t i = count - 1; i >= 0; i--) {
    283284                __leave_monitor_desc( monitors[i] );
    284285        }
     
    287288// Ctor for monitor guard
    288289// Sorts monitors before entering
    289 void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, fptr_t func ) {
     290void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    290291        // Store current array
    291292        this.m = m;
     
    296297
    297298        // Save previous thread context
    298         this.prev_mntrs = this_thread->monitors.list;
    299         this.prev_count = this_thread->monitors.size;
    300         this.prev_func  = this_thread->monitors.func;
     299        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
    301300
    302301        // Update thread context (needed for conditions)
    303         this_thread->monitors.list = m;
    304         this_thread->monitors.size = count;
    305         this_thread->monitors.func = func;
     302        this_thread->monitors.[list, size, func] = [m, count, func];
    306303
    307304        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
     
    325322
    326323        // Restore thread context
    327         this_thread->monitors.list = this.prev_mntrs;
    328         this_thread->monitors.size = this.prev_count;
    329         this_thread->monitors.func = this.prev_func;
    330 }
    331 
     324        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
     325}
    332326
    333327// Ctor for monitor guard
    334328// Sorts monitors before entering
    335 void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, fptr_t func ) {
     329void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
    336330        // Store current array
    337331        this.m = *m;
    338332
    339333        // Save previous thread context
    340         this.prev_mntrs = this_thread->monitors.list;
    341         this.prev_count = this_thread->monitors.size;
    342         this.prev_func  = this_thread->monitors.func;
     334        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
    343335
    344336        // Update thread context (needed for conditions)
    345         this_thread->monitors.list = m;
    346         this_thread->monitors.size = 1;
    347         this_thread->monitors.func = func;
     337        this_thread->monitors.[list, size, func] = [m, 1, func];
    348338
    349339        __enter_monitor_dtor( this.m, func );
    350340}
    351 
    352341
    353342// Dtor for monitor guard
     
    357346
    358347        // Restore thread context
    359         this_thread->monitors.list = this.prev_mntrs;
    360         this_thread->monitors.size = this.prev_count;
    361         this_thread->monitors.func = this.prev_func;
     348        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
    362349}
    363350
    364351//-----------------------------------------------------------------------------
    365352// Internal scheduling types
    366 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
     353void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    367354        this.waiting_thread = waiting_thread;
    368355        this.count = count;
     
    378365}
    379366
    380 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ) {
     367void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
    381368        this.ready  = false;
    382369        this.target = target;
    383         this.owner  = owner;
     370        this.owner  = &owner;
    384371        this.next   = NULL;
    385372}
     
    387374//-----------------------------------------------------------------------------
    388375// Internal scheduling
    389 void wait( condition * this, uintptr_t user_info = 0 ) {
     376void wait( condition & this, uintptr_t user_info = 0 ) {
    390377        brand_condition( this );
    391378
    392379        // Check that everything is as expected
    393         assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    394         verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    395         verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
     380        assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     381        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
     382        verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count );
    396383
    397384        // Create storage for monitor context
    398         monitor_ctx( this->monitors, this->monitor_count );
     385        monitor_ctx( this.monitors, this.monitor_count );
    399386
    400387        // Create the node specific to this wait operation
     
    403390        // Append the current wait operation to the ones already queued on the condition
    404391        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
    405         append( &this->blocked, &waiter );
     392        append( this.blocked, &waiter );
    406393
    407394        // Lock all monitors (aggregates the locks as well)
     
    409396
    410397        // Find the next thread(s) to run
    411         short thread_count = 0;
     398        __lock_size_t thread_count = 0;
    412399        thread_desc * threads[ count ];
    413400        __builtin_memset( threads, 0, sizeof( threads ) );
     
    417404
    418405        // Remove any duplicate threads
    419         for( int i = 0; i < count; i++) {
     406        for( __lock_size_t i = 0; i < count; i++) {
    420407                thread_desc * new_owner = next_thread( monitors[i] );
    421408                insert_unique( threads, thread_count, new_owner );
     
    429416}
    430417
    431 bool signal( condition * this ) {
     418bool signal( condition & this ) {
    432419        if( is_empty( this ) ) { return false; }
    433420
    434421        //Check that everything is as expected
    435         verify( this->monitors );
    436         verify( this->monitor_count != 0 );
     422        verify( this.monitors );
     423        verify( this.monitor_count != 0 );
    437424
    438425        //Some more checking in debug
    439426        LIB_DEBUG_DO(
    440427                thread_desc * this_thrd = this_thread;
    441                 if ( this->monitor_count != this_thrd->monitors.size ) {
    442                         abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
    443                 }
    444 
    445                 for(int i = 0; i < this->monitor_count; i++) {
    446                         if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
    447                                 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
     428                if ( this.monitor_count != this_thrd->monitors.size ) {
     429                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", &this, this.monitor_count, this_thrd->monitors.size );
     430                }
     431
     432                for(int i = 0; i < this.monitor_count; i++) {
     433                        if ( this.monitors[i] != this_thrd->monitors.list[i] ) {
     434                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors.list[i] );
    448435                        }
    449436                }
    450437        );
    451438
    452         unsigned short count = this->monitor_count;
     439        __lock_size_t count = this.monitor_count;
    453440
    454441        // Lock all monitors
    455         lock_all( this->monitors, NULL, count );
     442        lock_all( this.monitors, NULL, count );
    456443
    457444        //Pop the head of the waiting queue
    458         __condition_node_t * node = pop_head( &this->blocked );
     445        __condition_node_t * node = pop_head( this.blocked );
    459446
    460447        //Add the thread to the proper AS stack
     
    462449                __condition_criterion_t * crit = &node->criteria[i];
    463450                assert( !crit->ready );
    464                 push( &crit->target->signal_stack, crit );
     451                push( crit->target->signal_stack, crit );
    465452        }
    466453
    467454        //Release
    468         unlock_all( this->monitors, count );
     455        unlock_all( this.monitors, count );
    469456
    470457        return true;
    471458}
    472459
    473 bool signal_block( condition * this ) {
    474         if( !this->blocked.head ) { return false; }
     460bool signal_block( condition & this ) {
     461        if( !this.blocked.head ) { return false; }
    475462
    476463        //Check that everything is as expected
    477         verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    478         verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
     464        verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     465        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
    479466
    480467        // Create storage for monitor context
    481         monitor_ctx( this->monitors, this->monitor_count );
     468        monitor_ctx( this.monitors, this.monitor_count );
    482469
    483470        // Lock all monitors (aggregates the locks them as well)
     
    491478
    492479        //Find the thread to run
    493         thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
     480        thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    494481        set_owner( monitors, count, signallee );
    495482
    496         LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", this, signallee );
     483        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    497484
    498485        //Everything is ready to go to sleep
     
    512499
    513500// Access the user_info of the thread waiting at the front of the queue
    514 uintptr_t front( condition * this ) {
     501uintptr_t front( condition & this ) {
    515502        verifyf( !is_empty(this),
    516503                "Attempt to access user data on an empty condition.\n"
    517504                "Possible cause is not checking if the condition is empty before reading stored data."
    518505        );
    519         return this->blocked.head->user_info;
     506        return this.blocked.head->user_info;
    520507}
    521508
     
    537524        // This statment doesn't have a contiguous list of monitors...
    538525        // Create one!
    539         short max = count_max( mask );
     526        __lock_size_t max = count_max( mask );
    540527        monitor_desc * mon_storage[max];
    541528        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    542         short actual_count = aggregate( mon_storage, mask );
    543 
    544         LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (short)max);
     529        __lock_size_t actual_count = aggregate( mon_storage, mask );
     530
     531        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max);
    545532
    546533        if(actual_count == 0) return;
     
    569556
    570557                                __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
    571                                 push( &mon2dtor->signal_stack, dtor_crit );
     558                                push( mon2dtor->signal_stack, dtor_crit );
    572559
    573560                                unlock_all( locks, count );
     
    629616        set_mask( monitors, count, mask );
    630617
    631         for(int i = 0; i < count; i++) {
     618        for( __lock_size_t i = 0; i < count; i++) {
    632619                verify( monitors[i]->owner == this_thread );
    633620        }
     
    661648}
    662649
    663 static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
     650static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    664651        monitors[0]->owner     = owner;
    665652        monitors[0]->recursion = 1;
    666         for( int i = 1; i < count; i++ ) {
     653        for( __lock_size_t i = 1; i < count; i++ ) {
    667654                monitors[i]->owner     = owner;
    668655                monitors[i]->recursion = 0;
     
    670657}
    671658
    672 static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
    673         for(int i = 0; i < count; i++) {
     659static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     660        for( __lock_size_t i = 0; i < count; i++) {
    674661                storage[i]->mask = mask;
    675662        }
     
    685672        //Check the signaller stack
    686673        LIB_DEBUG_PRINT_SAFE("Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
    687         __condition_criterion_t * urgent = pop( &this->signal_stack );
     674        __condition_criterion_t * urgent = pop( this->signal_stack );
    688675        if( urgent ) {
    689676                //The signaller stack is not empty,
     
    697684        // No signaller thread
    698685        // Get the next thread in the entry_queue
    699         thread_desc * new_owner = pop_head( &this->entry_queue );
     686        thread_desc * new_owner = pop_head( this->entry_queue );
    700687        set_owner( this, new_owner );
    701688
     
    705692static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
    706693        __acceptable_t * it = this->mask.clauses; // Optim
    707         int count = this->mask.size;
     694        __lock_size_t count = this->mask.size;
    708695
    709696        // Check if there are any acceptable functions
     
    714701
    715702        // For all acceptable functions check if this is the current function.
    716         for( short i = 0; i < count; i++, it++ ) {
     703        for( __lock_size_t i = 0; i < count; i++, it++ ) {
    717704                if( *it == group ) {
    718705                        *this->mask.accepted = i;
     
    725712}
    726713
    727 static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
    728         for(int i = 0; i < count; i++) {
     714static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     715        for( __lock_size_t i = 0; i < count; i++) {
    729716                (criteria[i]){ monitors[i], waiter };
    730717        }
    731718
    732         waiter->criteria = criteria;
    733 }
    734 
    735 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
    736         for(int i = 0; i < count; i++) {
     719        waiter.criteria = criteria;
     720}
     721
     722static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     723        for( __lock_size_t i = 0; i < count; i++) {
    737724                (criteria[i]){ monitors[i], waiter };
    738725                LIB_DEBUG_PRINT_SAFE( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
    739                 push( &criteria[i].target->signal_stack, &criteria[i] );
    740         }
    741 
    742         waiter->criteria = criteria;
    743 }
    744 
    745 static inline void lock_all( spinlock ** locks, unsigned short count ) {
    746         for( int i = 0; i < count; i++ ) {
     726                push( criteria[i].target->signal_stack, &criteria[i] );
     727        }
     728
     729        waiter.criteria = criteria;
     730}
     731
     732static inline void lock_all( spinlock * locks [], __lock_size_t count ) {
     733        for( __lock_size_t i = 0; i < count; i++ ) {
    747734                lock_yield( locks[i] DEBUG_CTX2 );
    748735        }
    749736}
    750737
    751 static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
    752         for( int i = 0; i < count; i++ ) {
     738static inline void lock_all( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count ) {
     739        for( __lock_size_t i = 0; i < count; i++ ) {
    753740                spinlock * l = &source[i]->lock;
    754741                lock_yield( l DEBUG_CTX2 );
     
    757744}
    758745
    759 static inline void unlock_all( spinlock ** locks, unsigned short count ) {
    760         for( int i = 0; i < count; i++ ) {
     746static inline void unlock_all( spinlock * locks [], __lock_size_t count ) {
     747        for( __lock_size_t i = 0; i < count; i++ ) {
    761748                unlock( locks[i] );
    762749        }
    763750}
    764751
    765 static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
    766         for( int i = 0; i < count; i++ ) {
     752static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     753        for( __lock_size_t i = 0; i < count; i++ ) {
    767754                unlock( &locks[i]->lock );
    768755        }
    769756}
    770757
    771 static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
    772         for( int i = 0; i < count; i++ ) {
     758static inline void save(
     759        monitor_desc * ctx [],
     760        __lock_size_t count,
     761        __attribute((unused)) spinlock * locks [],
     762        unsigned int /*out*/ recursions [],
     763        __waitfor_mask_t /*out*/ masks []
     764) {
     765        for( __lock_size_t i = 0; i < count; i++ ) {
    773766                recursions[i] = ctx[i]->recursion;
    774767                masks[i]      = ctx[i]->mask;
     
    776769}
    777770
    778 static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
     771static inline void restore(
     772        monitor_desc * ctx [],
     773        __lock_size_t count,
     774        spinlock * locks [],
     775        unsigned int /*out*/ recursions [],
     776        __waitfor_mask_t /*out*/ masks []
     777) {
    779778        lock_all( locks, count );
    780         for( int i = 0; i < count; i++ ) {
     779        for( __lock_size_t i = 0; i < count; i++ ) {
    781780                ctx[i]->recursion = recursions[i];
    782781                ctx[i]->mask      = masks[i];
     
    811810}
    812811
    813 static inline void brand_condition( condition * this ) {
     812static inline void brand_condition( condition & this ) {
    814813        thread_desc * thrd = this_thread;
    815         if( !this->monitors ) {
     814        if( !this.monitors ) {
    816815                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    817816                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
    818                 this->monitor_count = thrd->monitors.size;
    819 
    820                 this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
    821                 for( int i = 0; i < this->monitor_count; i++ ) {
    822                         this->monitors[i] = thrd->monitors.list[i];
    823                 }
    824         }
    825 }
    826 
    827 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
    828 
    829         __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
     817                this.monitor_count = thrd->monitors.size;
     818
     819                this.monitors = malloc( this.monitor_count * sizeof( *this.monitors ) );
     820                for( int i = 0; i < this.monitor_count; i++ ) {
     821                        this.monitors[i] = thrd->monitors.list[i];
     822                }
     823        }
     824}
     825
     826static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
     827
     828        __thread_queue_t & entry_queue = monitors[0]->entry_queue;
    830829
    831830        // For each thread in the entry-queue
    832         for(    thread_desc ** thrd_it = &entry_queue->head;
     831        for(    thread_desc ** thrd_it = &entry_queue.head;
    833832                *thrd_it;
    834833                thrd_it = &(*thrd_it)->next
     
    852851
    853852forall(dtype T | sized( T ))
    854 static inline short insert_unique( T ** array, short & size, T * val ) {
     853static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) {
    855854        if( !val ) return size;
    856855
    857         for(int i = 0; i <= size; i++) {
     856        for( __lock_size_t i = 0; i <= size; i++) {
    858857                if( array[i] == val ) return size;
    859858        }
     
    864863}
    865864
    866 static inline short count_max( const __waitfor_mask_t & mask ) {
    867         short max = 0;
    868         for( int i = 0; i < mask.size; i++ ) {
     865static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) {
     866        __lock_size_t max = 0;
     867        for( __lock_size_t i = 0; i < mask.size; i++ ) {
    869868                max += mask.clauses[i].size;
    870869        }
     
    872871}
    873872
    874 static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
    875         short size = 0;
    876         for( int i = 0; i < mask.size; i++ ) {
     873static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     874        __lock_size_t size = 0;
     875        for( __lock_size_t i = 0; i < mask.size; i++ ) {
    877876                __libcfa_small_sort( mask.clauses[i].list, mask.clauses[i].size );
    878                 for( int j = 0; j < mask.clauses[i].size; j++) {
     877                for( __lock_size_t j = 0; j < mask.clauses[i].size; j++) {
    879878                        insert_unique( storage, size, mask.clauses[i].list[j] );
    880879                }
     
    890889}
    891890
    892 void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
    893         verify(this->tail != NULL);
    894         *this->tail = c;
    895         this->tail = &c->next;
    896 }
    897 
    898 __condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
    899         __condition_node_t * head = this->head;
     891void append( __condition_blocked_queue_t & this, __condition_node_t * c ) {
     892        verify(this.tail != NULL);
     893        *this.tail = c;
     894        this.tail = &c->next;
     895}
     896
     897__condition_node_t * pop_head( __condition_blocked_queue_t & this ) {
     898        __condition_node_t * head = this.head;
    900899        if( head ) {
    901                 this->head = head->next;
     900                this.head = head->next;
    902901                if( !head->next ) {
    903                         this->tail = &this->head;
     902                        this.tail = &this.head;
    904903                }
    905904                head->next = NULL;
Note: See TracChangeset for help on using the changeset viewer.