Ignore:
Timestamp:
Sep 26, 2017, 5:19:32 PM (8 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
5dc26f5
Parents:
af58ee0 (diff), a7d151f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/monitor.c

    raf58ee0 r74bba15  
    2323//-----------------------------------------------------------------------------
    2424// Forward declarations
    25 static inline void set_owner( monitor_desc * this, thread_desc * owner );
     25static inline void set_owner ( monitor_desc * this, thread_desc * owner );
     26static inline void set_owner ( monitor_desc ** storage, short count, thread_desc * owner );
     27static inline void set_mask  ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     28static inline void reset_mask( monitor_desc * this );
     29
    2630static inline thread_desc * next_thread( monitor_desc * this );
    27 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() );
     31static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
    2832
    2933static inline void lock_all( spinlock ** locks, unsigned short count );
     
    3236static inline void unlock_all( monitor_desc ** locks, unsigned short count );
    3337
    34 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count );
    35 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
     38static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
     39static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
    3640
    3741static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    3842static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    3943
    40 static inline thread_desc * check_condition( __condition_criterion_t * );
    41 static inline void brand_condition( condition * );
    42 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
    43 
    44 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count );
     44static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     45static inline void                 brand_condition   ( condition * );
     46static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
     47
     48forall(dtype T | sized( T ))
     49static inline short insert_unique( T ** array, short & size, T * val );
     50static inline short count_max    ( const __waitfor_mask_t & mask );
     51static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
    4552
    4653//-----------------------------------------------------------------------------
    4754// Useful defines
    48 #define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack       */ \
    49         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    50         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    51         init( count, monitors, &waiter, criteria );               /* Link everything together                                          */ \
    52 
    53 #define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack       */ \
    54         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    55         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    56         init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack              */ \
    57 
    58 #define monitor_ctx( mons, cnt )              /* Define that create the necessary struct for internal/external scheduling operations */ \
    59         monitor_desc ** monitors = mons;        /* Save the targeted monitors                                                          */ \
    60         unsigned short count = cnt;             /* Save the count to a local variable                                                  */ \
    61         unsigned int recursions[ count ];       /* Save the current recursion levels to restore them later                             */ \
    62         spinlock *   locks     [ count ];       /* We need to pass-in an array of locks to BlockInternal                               */ \
     55#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
     56        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     57        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     58        init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
     59
     60#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
     61        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     62        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     63        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
     64
     65#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
     66        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     67        unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
     68        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     69        __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
     70        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
     71
     72#define monitor_save    save   ( monitors, count, locks, recursions, masks )
     73#define monitor_restore restore( monitors, count, locks, recursions, masks )
     74
    6375
    6476//-----------------------------------------------------------------------------
     
    6880extern "C" {
    6981        // Enter single monitor
    70         static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
     82        static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    7183                // Lock the monitor spinlock, lock_yield to reduce contention
    7284                lock_yield( &this->lock DEBUG_CTX2 );
     
    7587                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    7688
    77                 this->accepted_index = -1;
    7889                if( !this->owner ) {
    7990                        // No one has the monitor, just take it
     
    89100                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon already owned \n");
    90101                }
    91                 else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) {
     102                else if( is_accepted( this, group) ) {
    92103                        // Some one was waiting for us, enter
    93104                        set_owner( this, thrd );
     105
     106                        // Reset mask
     107                        reset_mask( this );
    94108
    95109                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts \n");
     
    120134                lock_yield( &this->lock DEBUG_CTX2 );
    121135
    122                 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
     136                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
     137
     138                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this );
    123139
    124140                // Leaving a recursion level, decrement the counter
     
    146162        // Should never return
    147163        void __leave_thread_monitor( thread_desc * thrd ) {
    148                 monitor_desc * this = &thrd->mon;
     164                monitor_desc * this = &thrd->self_mon;
    149165
    150166                // Lock the monitor now
     
    153169                disable_interrupts();
    154170
    155                 thrd->cor.state = Halted;
    156 
    157                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
     171                thrd->self_cor.state = Halted;
     172
     173                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    158174
    159175                // Leaving a recursion level, decrement the counter
     
    178194// Enter multiple monitor
    179195// relies on the monitor array being sorted
    180 static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) {
    181         for(int i = 0; i < count; i++) {
    182                 __enter_monitor_desc( monitors[i], monitors, count, func );
     196static inline void enter( __monitor_group_t monitors ) {
     197        for(int i = 0; i < monitors.size; i++) {
     198                __enter_monitor_desc( monitors.list[i], monitors );
    183199        }
    184200}
     
    203219
    204220        // Save previous thread context
    205         this.prev_mntrs = this_thread->current_monitors;
    206         this.prev_count = this_thread->current_monitor_count;
    207         this.prev_func  = this_thread->current_monitor_func;
     221        this.prev_mntrs = this_thread->monitors.list;
     222        this.prev_count = this_thread->monitors.size;
     223        this.prev_func  = this_thread->monitors.func;
    208224
    209225        // Update thread context (needed for conditions)
    210         this_thread->current_monitors      = m;
    211         this_thread->current_monitor_count = count;
    212         this_thread->current_monitor_func  = func;
     226        this_thread->monitors.list = m;
     227        this_thread->monitors.size = count;
     228        this_thread->monitors.func = func;
     229
     230        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
    213231
    214232        // Enter the monitors in order
    215         enter( this.m, this.count, func );
     233        __monitor_group_t group = {this.m, this.count, func};
     234        enter( group );
     235
     236        // LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");
    216237}
    217238
     
    219240// Dtor for monitor guard
    220241void ^?{}( monitor_guard_t & this ) {
     242        // LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);
     243
    221244        // Leave the monitors in order
    222245        leave( this.m, this.count );
    223246
     247        // LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
     248
    224249        // Restore thread context
    225         this_thread->current_monitors      = this.prev_mntrs;
    226         this_thread->current_monitor_count = this.prev_count;
    227         this_thread->current_monitor_func = this.prev_func;
     250        this_thread->monitors.list = this.prev_mntrs;
     251        this_thread->monitors.size = this.prev_count;
     252        this_thread->monitors.func = this.prev_func;
    228253}
    229254
     
    271296        append( &this->blocked, &waiter );
    272297
    273         // Lock all monitors (aggregates the lock them as well)
     298        // Lock all monitors (aggregates the locks as well)
    274299        lock_all( monitors, locks, count );
    275300
    276         // DON'T unlock, ask the kernel to do it
    277 
    278         // Save monitor state
    279         save_recursion( monitors, recursions, count );
    280 
    281301        // Find the next thread(s) to run
    282         unsigned short thread_count = 0;
     302        short thread_count = 0;
    283303        thread_desc * threads[ count ];
    284304        for(int i = 0; i < count; i++) {
     
    286306        }
    287307
     308        // Save monitor states
     309        monitor_save;
     310
    288311        // Remove any duplicate threads
    289312        for( int i = 0; i < count; i++) {
    290313                thread_desc * new_owner = next_thread( monitors[i] );
    291                 thread_count = insert_unique( threads, thread_count, new_owner );
     314                insert_unique( threads, thread_count, new_owner );
    292315        }
    293316
     
    295318        BlockInternal( locks, count, threads, thread_count );
    296319
    297 
    298         // WE WOKE UP
    299 
    300 
    301320        // We are back, restore the owners and recursions
    302         lock_all( locks, count );
    303         restore_recursion( monitors, recursions, count );
    304         unlock_all( locks, count );
     321        monitor_restore;
    305322}
    306323
     
    315332        LIB_DEBUG_DO(
    316333                thread_desc * this_thrd = this_thread;
    317                 if ( this->monitor_count != this_thrd->current_monitor_count ) {
    318                         abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
     334                if ( this->monitor_count != this_thrd->monitors.size ) {
     335                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
    319336                }
    320337
    321338                for(int i = 0; i < this->monitor_count; i++) {
    322                         if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
    323                                 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
     339                        if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
     340                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
    324341                        }
    325342                }
     
    364381
    365382        //save contexts
    366         save_recursion( monitors, recursions, count );
     383        monitor_save;
    367384
    368385        //Find the thread to run
    369386        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
    370         for(int i = 0; i < count; i++) {
    371                 set_owner( monitors[i], signallee );
    372         }
     387        set_owner( monitors, count, signallee );
    373388
    374389        //Everything is ready to go to sleep
     
    379394
    380395
    381         //We are back, restore the owners and recursions
    382         lock_all( locks, count );
    383         restore_recursion( monitors, recursions, count );
    384         unlock_all( locks, count );
     396        //We are back, restore the masks and recursions
     397        monitor_restore;
    385398
    386399        return true;
     
    397410
    398411//-----------------------------------------------------------------------------
    399 // Internal scheduling
    400 int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) {
    401         thread_desc * thrd = this_thread;
     412// External scheduling
     413// cases to handle :
     414//      - target already there :
     415//              block and wake
     416//      - dtor already there
     417//              put thread on signaller stack
     418//      - non-blocking
     419//              return else
     420//      - timeout
     421//              return timeout
     422//      - block
     423//              setup mask
     424//              block
     425void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
     426        // This statment doesn't have a contiguous list of monitors...
     427        // Create one!
     428        short max = count_max( mask );
     429        monitor_desc * mon_storage[max];
     430        short actual_count = aggregate( mon_storage, mask );
     431
     432        if(actual_count == 0) return;
     433
     434        LIB_DEBUG_PRINT_SAFE("Kernel : waitfor internal proceeding\n");
    402435
    403436        // Create storage for monitor context
    404         monitor_ctx( acceptables->monitors, acceptables->count );
    405 
    406         // Lock all monitors (aggregates the lock them as well)
     437        monitor_ctx( mon_storage, actual_count );
     438
     439        // Lock all monitors (aggregates the locks as well)
    407440        lock_all( monitors, locks, count );
    408441
     442        {
     443                // Check if the entry queue
     444                thread_desc * next; int index;
     445                [next, index] = search_entry_queue( mask, monitors, count );
     446
     447                if( next ) {
     448                        *mask.accepted = index;
     449                        if( mask.clauses[index].is_dtor ) {
     450                                #warning case not implemented
     451                        }
     452                        else {
     453                                LIB_DEBUG_PRINT_SAFE("Kernel : thread present, baton-passing\n");
     454
     455                                // Create the node specific to this wait operation
     456                                wait_ctx_primed( this_thread, 0 );
     457
     458                                // Save monitor states
     459                                monitor_save;
     460
     461                                // Set the owners to be the next thread
     462                                set_owner( monitors, count, next );
     463
     464                                // Everything is ready to go to sleep
     465                                BlockInternal( locks, count, &next, 1 );
     466
     467                                // We are back, restore the owners and recursions
     468                                monitor_restore;
     469
     470                                LIB_DEBUG_PRINT_SAFE("Kernel : thread present, returned\n");
     471                        }
     472
     473                        LIB_DEBUG_PRINT_SAFE("Kernel : accepted %d\n", *mask.accepted);
     474
     475                        return;
     476                }
     477        }
     478
     479
     480        if( duration == 0 ) {
     481                LIB_DEBUG_PRINT_SAFE("Kernel : non-blocking, exiting\n");
     482
     483                unlock_all( locks, count );
     484
     485                LIB_DEBUG_PRINT_SAFE("Kernel : accepted %d\n", *mask.accepted);
     486                return;
     487        }
     488
     489
     490        verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
     491
     492        LIB_DEBUG_PRINT_SAFE("Kernel : blocking waitfor\n");
     493
    409494        // Create the node specific to this wait operation
    410         wait_ctx_primed( thrd, 0 );
    411 
    412         // Check if the entry queue
    413         thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count );
    414 
    415         LIB_DEBUG_PRINT_SAFE("Owner(s) :");
     495        wait_ctx_primed( this_thread, 0 );
     496
     497        monitor_save;
     498        set_mask( monitors, count, mask );
     499
    416500        for(int i = 0; i < count; i++) {
    417                 LIB_DEBUG_PRINT_SAFE(" %p", monitors[i]->owner );
    418         }
    419         LIB_DEBUG_PRINT_SAFE("\n");
    420 
    421         LIB_DEBUG_PRINT_SAFE("Passing mon to %p\n", next);
    422 
    423         if( !next ) {
    424                 // Update acceptables on the current monitors
    425                 for(int i = 0; i < count; i++) {
    426                         monitors[i]->acceptables = acceptables;
    427                         monitors[i]->acceptable_count = acc_count;
    428                 }
    429         }
    430         else {
    431                 for(int i = 0; i < count; i++) {
    432                         set_owner( monitors[i], next );
    433                 }
    434         }
    435 
    436 
    437         save_recursion( monitors, recursions, count );
    438 
    439 
    440         // Everything is ready to go to sleep
    441         BlockInternal( locks, count, &next, next ? 1 : 0 );
    442 
    443 
    444         //WE WOKE UP
    445 
    446 
    447         //We are back, restore the owners and recursions
    448         lock_all( locks, count );
    449         restore_recursion( monitors, recursions, count );
    450         int acc_idx = monitors[0]->accepted_index;
    451         unlock_all( locks, count );
    452 
    453         return acc_idx;
     501                verify( monitors[i]->owner == this_thread );
     502        }
     503
     504        //Everything is ready to go to sleep
     505        BlockInternal( locks, count );
     506
     507
     508        // WE WOKE UP
     509
     510
     511        //We are back, restore the masks and recursions
     512        monitor_restore;
     513
     514        LIB_DEBUG_PRINT_SAFE("Kernel : exiting\n");
     515
     516        LIB_DEBUG_PRINT_SAFE("Kernel : accepted %d\n", *mask.accepted);
    454517}
    455518
     
    458521
    459522static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
     523        // LIB_DEBUG_PRINT_SAFE("Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     524
    460525        //Pass the monitor appropriately
    461526        this->owner = owner;
     
    463528        //We are passing the monitor to someone else, which means recursion level is not 0
    464529        this->recursion = owner ? 1 : 0;
     530}
     531
     532static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
     533        for( int i = 0; i < count; i++ ) {
     534                set_owner( monitors[i], owner );
     535        }
     536}
     537
     538static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
     539        for(int i = 0; i < count; i++) {
     540                storage[i]->mask = mask;
     541        }
     542}
     543
     544static inline void reset_mask( monitor_desc * this ) {
     545        this->mask.accepted = NULL;
     546        this->mask.clauses = NULL;
     547        this->mask.size = 0;
    465548}
    466549
     
    485568}
    486569
    487 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
    488         __acceptable_t* accs = this->acceptables; // Optim
    489         int acc_cnt = this->acceptable_count;
     570static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     571        __acceptable_t * it = this->mask.clauses; // Optim
     572        int count = this->mask.size;
    490573
    491574        // Check if there are any acceptable functions
    492         if( !accs ) return -1;
     575        if( !it ) return false;
    493576
    494577        // If this isn't the first monitor to test this, there is no reason to repeat the test.
    495         if( this != group[0] ) return group[0]->accepted_index;
     578        if( this != group[0] ) return group[0]->mask.accepted >= 0;
    496579
    497580        // For all acceptable functions check if this is the current function.
    498         OUT_LOOP:
    499         for( int i = 0; i < acc_cnt; i++ ) {
    500                 __acceptable_t * acc = &accs[i];
    501 
    502                 // if function matches, check the monitors
    503                 if( acc->func == func ) {
    504 
    505                         // If the group count is different then it can't be a match
    506                         if( acc->count != group_cnt ) return -1;
    507 
    508                         // Check that all the monitors match
    509                         for( int j = 0; j < group_cnt; j++ ) {
    510                                 // If not a match, check next function
    511                                 if( acc->monitors[j] != group[j] ) continue OUT_LOOP;
    512                         }
    513 
    514                         // It's a complete match, accept the call
    515                         return i;
     581        for( short i = 0; i < count; i++, it++ ) {
     582                if( *it == group ) {
     583                        *this->mask.accepted = i;
     584                        return true;
    516585                }
    517586        }
    518587
    519588        // No function matched
    520         return -1;
     589        return false;
    521590}
    522591
     
    564633}
    565634
    566 
    567 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count ) {
     635static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
    568636        for( int i = 0; i < count; i++ ) {
    569637                recursions[i] = ctx[i]->recursion;
    570         }
    571 }
    572 
    573 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ) {
     638                masks[i]      = ctx[i]->mask;
     639        }
     640}
     641
     642static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
     643        lock_all( locks, count );
    574644        for( int i = 0; i < count; i++ ) {
    575645                ctx[i]->recursion = recursions[i];
    576         }
     646                ctx[i]->mask      = masks[i];
     647        }
     648        unlock_all( locks, count );
    577649}
    578650
     
    607679        if( !this->monitors ) {
    608680                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    609                 assertf( thrd->current_monitors != NULL, "No current monitor to brand condition %p", thrd->current_monitors );
    610                 this->monitor_count = thrd->current_monitor_count;
     681                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
     682                this->monitor_count = thrd->monitors.size;
    611683
    612684                this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
    613685                for( int i = 0; i < this->monitor_count; i++ ) {
    614                         this->monitors[i] = thrd->current_monitors[i];
    615                 }
    616         }
    617 }
    618 
    619 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ) {
    620         if( !val ) return end;
    621 
    622         for(int i = 0; i <= end; i++) {
    623                 if( thrds[i] == val ) return end;
    624         }
    625 
    626         thrds[end] = val;
    627         return end + 1;
    628 }
    629 
    630 
    631 static inline bool match( __acceptable_t * acc, thread_desc * thrd ) {
    632         verify( thrd );
    633         verify( acc );
    634         if( acc->func != thrd->current_monitor_func ) return false;
    635 
    636         return true;
    637 }
    638 
    639 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) {
     686                        this->monitors[i] = thrd->monitors.list[i];
     687                }
     688        }
     689}
     690
     691static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
    640692
    641693        __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
     
    644696        for(    thread_desc ** thrd_it = &entry_queue->head;
    645697                *thrd_it;
    646                 thrd_it = &(*thrd_it)->next)
    647         {
     698                thrd_it = &(*thrd_it)->next
     699        ) {
    648700                // For each acceptable check if it matches
    649                 __acceptable_t * acc_end = acceptables + acc_count;
    650                 for( __acceptable_t * acc_it = acceptables; acc_it != acc_end; acc_it++ ) {
     701                int i = 0;
     702                __acceptable_t * end = mask.clauses + mask.size;
     703                for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) {
    651704                        // Check if we have a match
    652                         if( match( acc_it, *thrd_it ) ) {
     705                        if( *it == (*thrd_it)->monitors ) {
    653706
    654707                                // If we have a match return it
    655708                                // after removeing it from the entry queue
    656                                 return remove( entry_queue, thrd_it );
     709                                return [remove( entry_queue, thrd_it ), i];
    657710                        }
    658711                }
    659712        }
    660713
    661         return NULL;
    662 }
     714        return [0, -1];
     715}
     716
     717forall(dtype T | sized( T ))
     718static inline short insert_unique( T ** array, short & size, T * val ) {
     719        if( !val ) return size;
     720
     721        for(int i = 0; i <= size; i++) {
     722                if( array[i] == val ) return size;
     723        }
     724
     725        array[size] = val;
     726        size = size + 1;
     727        return size;
     728}
     729
     730static inline short count_max( const __waitfor_mask_t & mask ) {
     731        short max = 0;
     732        for( int i = 0; i < mask.size; i++ ) {
     733                max += mask.clauses[i].size;
     734        }
     735        return max;
     736}
     737
     738static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
     739        short size = 0;
     740        for( int i = 0; i < mask.size; i++ ) {
     741                for( int j = 0; j < mask.clauses[i].size; j++) {
     742                        insert_unique( storage, size, mask.clauses[i].list[j] );
     743                }
     744        }
     745        qsort( storage, size );
     746        return size;
     747}
     748
    663749void ?{}( __condition_blocked_queue_t & this ) {
    664750        this.head = NULL;
Note: See TracChangeset for help on using the changeset viewer.