Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/monitor.c

    r2f6a7e93 rde737c8  
    1717
    1818#include <stdlib>
    19 #include <inttypes.h>
    2019
    2120#include "libhdr.h"
     
    2726// Forward declarations
    2827static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    29 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    30 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     28static inline void set_owner ( monitor_desc ** storage, short count, thread_desc * owner );
     29static inline void set_mask  ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
    3130static inline void reset_mask( monitor_desc * this );
    3231
     
    3433static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
    3534
    36 static inline void lock_all  ( spinlock * locks [], __lock_size_t count );
    37 static inline void lock_all  ( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count );
    38 static inline void unlock_all( spinlock * locks [], __lock_size_t count );
    39 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    40 
    41 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    42 static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    43 
    44 static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    45 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     35static inline void lock_all( spinlock ** locks, unsigned short count );
     36static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
     37static inline void unlock_all( spinlock ** locks, unsigned short count );
     38static inline void unlock_all( monitor_desc ** locks, unsigned short count );
     39
     40static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
     41static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
     42
     43static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
     44static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    4645
    4746static inline thread_desc *        check_condition   ( __condition_criterion_t * );
    48 static inline void                 brand_condition   ( condition & );
    49 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     47static inline void                 brand_condition   ( condition * );
     48static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
    5049
    5150forall(dtype T | sized( T ))
    52 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    53 static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    54 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
     51static inline short insert_unique( T ** array, short & size, T * val );
     52static inline short count_max    ( const __waitfor_mask_t & mask );
     53static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
    5554
    5655//-----------------------------------------------------------------------------
     
    5958        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
    6059        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
    61         init( count, monitors, waiter, criteria );                /* Link everything together                                                            */ \
     60        init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
    6261
    6362#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
    6463        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
    6564        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
    66         init_push( count, monitors, waiter, criteria );           /* Link everything together and push it to the AS-Stack                                */ \
     65        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
    6766
    6867#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    6968        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    70         __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
     69        unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
    7170        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
    72         __waitfor_mask_t masks [ count ];                         /* Save the current waitfor masks to restore them later                                */ \
     71        __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
    7372        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
    7473
     
    115114
    116115                        // Some one else has the monitor, wait in line for it
    117                         append( this->entry_queue, thrd );
     116                        append( &this->entry_queue, thrd );
    118117                        BlockInternal( &this->lock );
    119118
     
    154153                }
    155154
    156                 __lock_size_t count = 1;
     155                int count = 1;
    157156                monitor_desc ** monitors = &this;
    158157                __monitor_group_t group = { &this, 1, func };
     
    161160
    162161                        // Wake the thread that is waiting for this
    163                         __condition_criterion_t * urgent = pop( this->signal_stack );
     162                        __condition_criterion_t * urgent = pop( &this->signal_stack );
    164163                        verify( urgent );
    165164
     
    183182
    184183                        // Some one else has the monitor, wait in line for it
    185                         append( this->entry_queue, thrd );
     184                        append( &this->entry_queue, thrd );
    186185                        BlockInternal( &this->lock );
    187186
     
    273272// relies on the monitor array being sorted
    274273static inline void enter( __monitor_group_t monitors ) {
    275         for( __lock_size_t i = 0; i < monitors.size; i++) {
     274        for(int i = 0; i < monitors.size; i++) {
    276275                __enter_monitor_desc( monitors.list[i], monitors );
    277276        }
     
    280279// Leave multiple monitor
    281280// relies on the monitor array being sorted
    282 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
    283         for( __lock_size_t i = count - 1; i >= 0; i--) {
     281static inline void leave(monitor_desc ** monitors, int count) {
     282        for(int i = count - 1; i >= 0; i--) {
    284283                __leave_monitor_desc( monitors[i] );
    285284        }
     
    288287// Ctor for monitor guard
    289288// Sorts monitors before entering
    290 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
     289void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, fptr_t func ) {
    291290        // Store current array
    292291        this.m = m;
     
    297296
    298297        // Save previous thread context
    299         this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
     298        this.prev_mntrs = this_thread->monitors.list;
     299        this.prev_count = this_thread->monitors.size;
     300        this.prev_func  = this_thread->monitors.func;
    300301
    301302        // Update thread context (needed for conditions)
    302         this_thread->monitors.[list, size, func] = [m, count, func];
     303        this_thread->monitors.list = m;
     304        this_thread->monitors.size = count;
     305        this_thread->monitors.func = func;
    303306
    304307        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
     
    322325
    323326        // Restore thread context
    324         this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
    325 }
     327        this_thread->monitors.list = this.prev_mntrs;
     328        this_thread->monitors.size = this.prev_count;
     329        this_thread->monitors.func = this.prev_func;
     330}
     331
    326332
    327333// Ctor for monitor guard
    328334// Sorts monitors before entering
    329 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     335void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, fptr_t func ) {
    330336        // Store current array
    331337        this.m = *m;
    332338
    333339        // Save previous thread context
    334         this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
     340        this.prev_mntrs = this_thread->monitors.list;
     341        this.prev_count = this_thread->monitors.size;
     342        this.prev_func  = this_thread->monitors.func;
    335343
    336344        // Update thread context (needed for conditions)
    337         this_thread->monitors.[list, size, func] = [m, 1, func];
     345        this_thread->monitors.list = m;
     346        this_thread->monitors.size = 1;
     347        this_thread->monitors.func = func;
    338348
    339349        __enter_monitor_dtor( this.m, func );
    340350}
     351
    341352
    342353// Dtor for monitor guard
     
    346357
    347358        // Restore thread context
    348         this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
     359        this_thread->monitors.list = this.prev_mntrs;
     360        this_thread->monitors.size = this.prev_count;
     361        this_thread->monitors.func = this.prev_func;
    349362}
    350363
    351364//-----------------------------------------------------------------------------
    352365// Internal scheduling types
    353 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     366void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
    354367        this.waiting_thread = waiting_thread;
    355368        this.count = count;
     
    365378}
    366379
    367 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
     380void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ) {
    368381        this.ready  = false;
    369382        this.target = target;
    370         this.owner  = &owner;
     383        this.owner  = owner;
    371384        this.next   = NULL;
    372385}
     
    374387//-----------------------------------------------------------------------------
    375388// Internal scheduling
    376 void wait( condition & this, uintptr_t user_info = 0 ) {
     389void wait( condition * this, uintptr_t user_info = 0 ) {
    377390        brand_condition( this );
    378391
    379392        // Check that everything is as expected
    380         assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
    381         verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
    382         verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count );
     393        assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
     394        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
     395        verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
    383396
    384397        // Create storage for monitor context
    385         monitor_ctx( this.monitors, this.monitor_count );
     398        monitor_ctx( this->monitors, this->monitor_count );
    386399
    387400        // Create the node specific to this wait operation
     
    390403        // Append the current wait operation to the ones already queued on the condition
    391404        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
    392         append( this.blocked, &waiter );
     405        append( &this->blocked, &waiter );
    393406
    394407        // Lock all monitors (aggregates the locks as well)
     
    396409
    397410        // Find the next thread(s) to run
    398         __lock_size_t thread_count = 0;
     411        short thread_count = 0;
    399412        thread_desc * threads[ count ];
    400413        __builtin_memset( threads, 0, sizeof( threads ) );
     
    404417
    405418        // Remove any duplicate threads
    406         for( __lock_size_t i = 0; i < count; i++) {
     419        for( int i = 0; i < count; i++) {
    407420                thread_desc * new_owner = next_thread( monitors[i] );
    408421                insert_unique( threads, thread_count, new_owner );
     
    416429}
    417430
    418 bool signal( condition & this ) {
     431bool signal( condition * this ) {
    419432        if( is_empty( this ) ) { return false; }
    420433
    421434        //Check that everything is as expected
    422         verify( this.monitors );
    423         verify( this.monitor_count != 0 );
     435        verify( this->monitors );
     436        verify( this->monitor_count != 0 );
    424437
    425438        //Some more checking in debug
    426439        LIB_DEBUG_DO(
    427440                thread_desc * this_thrd = this_thread;
    428                 if ( this.monitor_count != this_thrd->monitors.size ) {
    429                         abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", &this, this.monitor_count, this_thrd->monitors.size );
    430                 }
    431 
    432                 for(int i = 0; i < this.monitor_count; i++) {
    433                         if ( this.monitors[i] != this_thrd->monitors.list[i] ) {
    434                                 abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors.list[i] );
     441                if ( this->monitor_count != this_thrd->monitors.size ) {
     442                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
     443                }
     444
     445                for(int i = 0; i < this->monitor_count; i++) {
     446                        if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
     447                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
    435448                        }
    436449                }
    437450        );
    438451
    439         __lock_size_t count = this.monitor_count;
     452        unsigned short count = this->monitor_count;
    440453
    441454        // Lock all monitors
    442         lock_all( this.monitors, NULL, count );
     455        lock_all( this->monitors, NULL, count );
    443456
    444457        //Pop the head of the waiting queue
    445         __condition_node_t * node = pop_head( this.blocked );
     458        __condition_node_t * node = pop_head( &this->blocked );
    446459
    447460        //Add the thread to the proper AS stack
     
    449462                __condition_criterion_t * crit = &node->criteria[i];
    450463                assert( !crit->ready );
    451                 push( crit->target->signal_stack, crit );
     464                push( &crit->target->signal_stack, crit );
    452465        }
    453466
    454467        //Release
    455         unlock_all( this.monitors, count );
     468        unlock_all( this->monitors, count );
    456469
    457470        return true;
    458471}
    459472
    460 bool signal_block( condition & this ) {
    461         if( !this.blocked.head ) { return false; }
     473bool signal_block( condition * this ) {
     474        if( !this->blocked.head ) { return false; }
    462475
    463476        //Check that everything is as expected
    464         verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
    465         verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
     477        verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
     478        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    466479
    467480        // Create storage for monitor context
    468         monitor_ctx( this.monitors, this.monitor_count );
     481        monitor_ctx( this->monitors, this->monitor_count );
    469482
    470483        // Lock all monitors (aggregates the locks them as well)
     
    478491
    479492        //Find the thread to run
    480         thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
     493        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
    481494        set_owner( monitors, count, signallee );
    482495
    483         LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
     496        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", this, signallee );
    484497
    485498        //Everything is ready to go to sleep
     
    499512
    500513// Access the user_info of the thread waiting at the front of the queue
    501 uintptr_t front( condition & this ) {
     514uintptr_t front( condition * this ) {
    502515        verifyf( !is_empty(this),
    503516                "Attempt to access user data on an empty condition.\n"
    504517                "Possible cause is not checking if the condition is empty before reading stored data."
    505518        );
    506         return this.blocked.head->user_info;
     519        return this->blocked.head->user_info;
    507520}
    508521
     
    524537        // This statment doesn't have a contiguous list of monitors...
    525538        // Create one!
    526         __lock_size_t max = count_max( mask );
     539        short max = count_max( mask );
    527540        monitor_desc * mon_storage[max];
    528541        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    529         __lock_size_t actual_count = aggregate( mon_storage, mask );
    530 
    531         LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max);
     542        short actual_count = aggregate( mon_storage, mask );
     543
     544        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (short)max);
    532545
    533546        if(actual_count == 0) return;
     
    556569
    557570                                __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
    558                                 push( mon2dtor->signal_stack, dtor_crit );
     571                                push( &mon2dtor->signal_stack, dtor_crit );
    559572
    560573                                unlock_all( locks, count );
     
    616629        set_mask( monitors, count, mask );
    617630
    618         for( __lock_size_t i = 0; i < count; i++) {
     631        for(int i = 0; i < count; i++) {
    619632                verify( monitors[i]->owner == this_thread );
    620633        }
     
    648661}
    649662
    650 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
     663static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
    651664        monitors[0]->owner     = owner;
    652665        monitors[0]->recursion = 1;
    653         for( __lock_size_t i = 1; i < count; i++ ) {
     666        for( int i = 1; i < count; i++ ) {
    654667                monitors[i]->owner     = owner;
    655668                monitors[i]->recursion = 0;
     
    657670}
    658671
    659 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    660         for( __lock_size_t i = 0; i < count; i++) {
     672static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
     673        for(int i = 0; i < count; i++) {
    661674                storage[i]->mask = mask;
    662675        }
     
    672685        //Check the signaller stack
    673686        LIB_DEBUG_PRINT_SAFE("Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
    674         __condition_criterion_t * urgent = pop( this->signal_stack );
     687        __condition_criterion_t * urgent = pop( &this->signal_stack );
    675688        if( urgent ) {
    676689                //The signaller stack is not empty,
     
    684697        // No signaller thread
    685698        // Get the next thread in the entry_queue
    686         thread_desc * new_owner = pop_head( this->entry_queue );
     699        thread_desc * new_owner = pop_head( &this->entry_queue );
    687700        set_owner( this, new_owner );
    688701
     
    692705static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
    693706        __acceptable_t * it = this->mask.clauses; // Optim
    694         __lock_size_t count = this->mask.size;
     707        int count = this->mask.size;
    695708
    696709        // Check if there are any acceptable functions
     
    701714
    702715        // For all acceptable functions check if this is the current function.
    703         for( __lock_size_t i = 0; i < count; i++, it++ ) {
     716        for( short i = 0; i < count; i++, it++ ) {
    704717                if( *it == group ) {
    705718                        *this->mask.accepted = i;
     
    712725}
    713726
    714 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    715         for( __lock_size_t i = 0; i < count; i++) {
     727static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
     728        for(int i = 0; i < count; i++) {
    716729                (criteria[i]){ monitors[i], waiter };
    717730        }
    718731
    719         waiter.criteria = criteria;
    720 }
    721 
    722 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    723         for( __lock_size_t i = 0; i < count; i++) {
     732        waiter->criteria = criteria;
     733}
     734
     735static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
     736        for(int i = 0; i < count; i++) {
    724737                (criteria[i]){ monitors[i], waiter };
    725738                LIB_DEBUG_PRINT_SAFE( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
    726                 push( criteria[i].target->signal_stack, &criteria[i] );
    727         }
    728 
    729         waiter.criteria = criteria;
    730 }
    731 
    732 static inline void lock_all( spinlock * locks [], __lock_size_t count ) {
    733         for( __lock_size_t i = 0; i < count; i++ ) {
     739                push( &criteria[i].target->signal_stack, &criteria[i] );
     740        }
     741
     742        waiter->criteria = criteria;
     743}
     744
     745static inline void lock_all( spinlock ** locks, unsigned short count ) {
     746        for( int i = 0; i < count; i++ ) {
    734747                lock_yield( locks[i] DEBUG_CTX2 );
    735748        }
    736749}
    737750
    738 static inline void lock_all( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count ) {
    739         for( __lock_size_t i = 0; i < count; i++ ) {
     751static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
     752        for( int i = 0; i < count; i++ ) {
    740753                spinlock * l = &source[i]->lock;
    741754                lock_yield( l DEBUG_CTX2 );
     
    744757}
    745758
    746 static inline void unlock_all( spinlock * locks [], __lock_size_t count ) {
    747         for( __lock_size_t i = 0; i < count; i++ ) {
     759static inline void unlock_all( spinlock ** locks, unsigned short count ) {
     760        for( int i = 0; i < count; i++ ) {
    748761                unlock( locks[i] );
    749762        }
    750763}
    751764
    752 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
    753         for( __lock_size_t i = 0; i < count; i++ ) {
     765static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
     766        for( int i = 0; i < count; i++ ) {
    754767                unlock( &locks[i]->lock );
    755768        }
    756769}
    757770
    758 static inline void save(
    759         monitor_desc * ctx [],
    760         __lock_size_t count,
    761         __attribute((unused)) spinlock * locks [],
    762         unsigned int /*out*/ recursions [],
    763         __waitfor_mask_t /*out*/ masks []
    764 ) {
    765         for( __lock_size_t i = 0; i < count; i++ ) {
     771static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
     772        for( int i = 0; i < count; i++ ) {
    766773                recursions[i] = ctx[i]->recursion;
    767774                masks[i]      = ctx[i]->mask;
     
    769776}
    770777
    771 static inline void restore(
    772         monitor_desc * ctx [],
    773         __lock_size_t count,
    774         spinlock * locks [],
    775         unsigned int /*out*/ recursions [],
    776         __waitfor_mask_t /*out*/ masks []
    777 ) {
     778static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
    778779        lock_all( locks, count );
    779         for( __lock_size_t i = 0; i < count; i++ ) {
     780        for( int i = 0; i < count; i++ ) {
    780781                ctx[i]->recursion = recursions[i];
    781782                ctx[i]->mask      = masks[i];
     
    810811}
    811812
    812 static inline void brand_condition( condition & this ) {
     813static inline void brand_condition( condition * this ) {
    813814        thread_desc * thrd = this_thread;
    814         if( !this.monitors ) {
     815        if( !this->monitors ) {
    815816                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    816817                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
    817                 this.monitor_count = thrd->monitors.size;
    818 
    819                 this.monitors = malloc( this.monitor_count * sizeof( *this.monitors ) );
    820                 for( int i = 0; i < this.monitor_count; i++ ) {
    821                         this.monitors[i] = thrd->monitors.list[i];
    822                 }
    823         }
    824 }
    825 
    826 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
    827 
    828         __thread_queue_t & entry_queue = monitors[0]->entry_queue;
     818                this->monitor_count = thrd->monitors.size;
     819
     820                this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
     821                for( int i = 0; i < this->monitor_count; i++ ) {
     822                        this->monitors[i] = thrd->monitors.list[i];
     823                }
     824        }
     825}
     826
     827static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
     828
     829        __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
    829830
    830831        // For each thread in the entry-queue
    831         for(    thread_desc ** thrd_it = &entry_queue.head;
     832        for(    thread_desc ** thrd_it = &entry_queue->head;
    832833                *thrd_it;
    833834                thrd_it = &(*thrd_it)->next
     
    851852
    852853forall(dtype T | sized( T ))
    853 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) {
     854static inline short insert_unique( T ** array, short & size, T * val ) {
    854855        if( !val ) return size;
    855856
    856         for( __lock_size_t i = 0; i <= size; i++) {
     857        for(int i = 0; i <= size; i++) {
    857858                if( array[i] == val ) return size;
    858859        }
     
    863864}
    864865
    865 static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) {
    866         __lock_size_t max = 0;
    867         for( __lock_size_t i = 0; i < mask.size; i++ ) {
     866static inline short count_max( const __waitfor_mask_t & mask ) {
     867        short max = 0;
     868        for( int i = 0; i < mask.size; i++ ) {
    868869                max += mask.clauses[i].size;
    869870        }
     
    871872}
    872873
    873 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
    874         __lock_size_t size = 0;
    875         for( __lock_size_t i = 0; i < mask.size; i++ ) {
     874static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
     875        short size = 0;
     876        for( int i = 0; i < mask.size; i++ ) {
    876877                __libcfa_small_sort( mask.clauses[i].list, mask.clauses[i].size );
    877                 for( __lock_size_t j = 0; j < mask.clauses[i].size; j++) {
     878                for( int j = 0; j < mask.clauses[i].size; j++) {
    878879                        insert_unique( storage, size, mask.clauses[i].list[j] );
    879880                }
     
    889890}
    890891
    891 void append( __condition_blocked_queue_t & this, __condition_node_t * c ) {
    892         verify(this.tail != NULL);
    893         *this.tail = c;
    894         this.tail = &c->next;
    895 }
    896 
    897 __condition_node_t * pop_head( __condition_blocked_queue_t & this ) {
    898         __condition_node_t * head = this.head;
     892void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
     893        verify(this->tail != NULL);
     894        *this->tail = c;
     895        this->tail = &c->next;
     896}
     897
     898__condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
     899        __condition_node_t * head = this->head;
    899900        if( head ) {
    900                 this.head = head->next;
     901                this->head = head->next;
    901902                if( !head->next ) {
    902                         this.tail = &this.head;
     903                        this->tail = &this->head;
    903904                }
    904905                head->next = NULL;
Note: See TracChangeset for help on using the changeset viewer.