Changeset 97e3296


Ignore:
Timestamp:
Aug 17, 2017, 3:42:16 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
aaron-thesis, arm-eh, cleanup-dtors, deferred_resn, demangler, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, resolv-new, with_gc
Children:
6ac5223
Parents:
f710aca
Message:

First working implementation of external scheduling... Still lots of testing to do

Location:
src
Files:
1 added
5 edited

Legend:

Unmodified
Added
Removed
  • src/Concurrency/Keywords.cc

    rf710aca r97e3296  
    200200                std::list<DeclarationWithType*> findMutexArgs( FunctionDecl* );
    201201                void validate( DeclarationWithType * );
    202                 void addStatments( CompoundStmt *, const std::list<DeclarationWithType * > &);
     202                void addStatments( FunctionDecl* func, CompoundStmt *, const std::list<DeclarationWithType * > &);
    203203
    204204                static void implement( std::list< Declaration * > & translationUnit ) {
     
    210210                StructDecl* monitor_decl = nullptr;
    211211                StructDecl* guard_decl = nullptr;
     212
     213                static std::unique_ptr< Type > generic_func;
    212214        };
     215
     216        std::unique_ptr< Type > MutexKeyword::generic_func = std::unique_ptr< Type >(
     217                new FunctionType(
     218                        noQualifiers,
     219                        true
     220                )
     221        );
    213222
    214223        //-----------------------------------------------------------------------------
     
    394403        // Mutex keyword implementation
    395404        //=============================================================================================
     405
    396406        void MutexKeyword::visit(FunctionDecl* decl) {
    397407                Visitor::visit(decl);
     
    410420                if( !guard_decl ) throw SemanticError( "mutex keyword requires monitors to be in scope, add #include <monitor>", decl );
    411421
    412                 addStatments( body, mutexArgs );
     422                addStatments( decl, body, mutexArgs );
    413423        }
    414424
     
    456466        }
    457467
    458         void MutexKeyword::addStatments( CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {
     468        void MutexKeyword::addStatments( FunctionDecl* func, CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {
    459469                ObjectDecl * monitors = new ObjectDecl(
    460470                        "__monitors",
     
    487497                );
    488498
     499                assert(generic_func);
     500
    489501                //in reverse order :
    490                 // monitor_guard_t __guard = { __monitors, # };
     502                // monitor_guard_t __guard = { __monitors, #, func };
    491503                body->push_front(
    492504                        new DeclStmt( noLabels, new ObjectDecl(
     
    502514                                        {
    503515                                                new SingleInit( new VariableExpr( monitors ) ),
    504                                                 new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) )
     516                                                new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) ),
     517                                                new SingleInit( new CastExpr( new VariableExpr( func ), generic_func->clone() ) )
    505518                                        },
    506519                                        noDesignators,
  • src/libcfa/concurrency/invoke.h

    rf710aca r97e3296  
    8787            struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    8888            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    89       };
     89
     90            struct __acceptable_t * acceptables;      // list of acceptable functions, null if any
     91            unsigned short acceptable_count;          // number of acceptable functions
     92            short accepted_index;                     // the index of the accepted function, -1 if none
     93            void (*pre_accept)(void);                 // function to run before an accept
     94       };
    9095
    9196      struct thread_desc {
     
    95100            struct monitor_desc ** current_monitors;  // currently held monitors
    96101            unsigned short current_monitor_count;     // number of currently held monitors
    97       };
     102     };
    98103
    99104#endif //_INVOKE_H_
  • src/libcfa/concurrency/kernel.c

    rf710aca r97e3296  
    366366
    367367void BlockInternal( thread_desc * thrd ) {
     368        assert(thrd);
    368369        disable_interrupts();
    369370        assert( thrd->cor.state != Halted );
     
    379380
    380381void BlockInternal( spinlock * lock, thread_desc * thrd ) {
     382        assert(thrd);
    381383        disable_interrupts();
    382384        this_processor->finish.action_code = Release_Schedule;
  • src/libcfa/concurrency/monitor

    rf710aca r97e3296  
    2323
    2424static inline void ?{}(monitor_desc * this) {
     25        (&this->lock){};
    2526        this->owner = NULL;
     27        (&this->entry_queue){};
     28        (&this->signal_stack){};
    2629        this->recursion = 0;
     30        this->acceptables = NULL;
     31        this->acceptable_count = 0;
     32        this->accepted_index = -1;
     33        this->pre_accept = 0;
    2734}
    2835
     
    3845}
    3946
    40 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count );
     47void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() );
    4148void ^?{}( monitor_guard_t * this );
    4249
     
    8996uintptr_t front( condition * this );
    9097
     98//-----------------------------------------------------------------------------
     99// External scheduling
     100
     101typedef void (*void_fptr_t)(void);
     102
    91103struct __acceptable_t {
    92         void (*func)(void);
     104        void_fptr_t func;
    93105        unsigned short count;
    94         monitor_desc * monitors[1];
     106        monitor_desc ** monitors;
     107        bool run_preaccept;
    95108};
    96109
    97 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) );
     110int __accept_internal( unsigned short count, __acceptable_t * acceptables );
    98111
    99112// Local Variables: //
  • src/libcfa/concurrency/monitor.c

    rf710aca r97e3296  
    2525static inline void set_owner( monitor_desc * this, thread_desc * owner );
    2626static inline thread_desc * next_thread( monitor_desc * this );
     27static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() );
    2728
    2829static inline void lock_all( spinlock ** locks, unsigned short count );
     
    3435static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
    3536
     37static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
     38static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
     39
    3640static inline thread_desc * check_condition( __condition_criterion_t * );
    3741static inline void brand_condition( condition * );
    3842static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
    3943
     44static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count );
     45
     46//-----------------------------------------------------------------------------
     47// Useful defines
     48#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack       */ \
     49        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
     50        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
     51        init( count, monitors, &waiter, criteria );               /* Link everything together                                          */ \
     52
     53#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack       */ \
     54        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
     55        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
     56        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack              */ \
     57
     58#define monitor_ctx( mons, cnt )              /* Define that create the necessary struct for internal/external scheduling operations */ \
     59        monitor_desc ** monitors = mons;        /* Save the targeted monitors                                                          */ \
     60        unsigned short count = cnt;             /* Save the count to a local variable                                                  */ \
     61        unsigned int recursions[ count ];       /* Save the current recursion levels to restore them later                             */ \
     62        spinlock *   locks     [ count ];       /* We need to pass-in an array of locks to BlockInternal                               */ \
     63
    4064//-----------------------------------------------------------------------------
    4165// Enter/Leave routines
     
    4367
    4468extern "C" {
    45         void __enter_monitor_desc( monitor_desc * this ) {
     69        // Enter single monitor
     70        static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
     71                // Lock the monitor spinlock, lock_yield to reduce contention
    4672                lock_yield( &this->lock DEBUG_CTX2 );
    4773                thread_desc * thrd = this_thread;
    4874
    49                 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    50 
     75                this->accepted_index = -1;
    5176                if( !this->owner ) {
    52                         //No one has the monitor, just take it
     77                        // No one has the monitor, just take it
    5378                        set_owner( this, thrd );
    5479                }
    5580                else if( this->owner == thrd) {
    56                         //We already have the monitor, just not how many times we took it
     81                        // We already have the monitor, just not how many times we took it
    5782                        verify( this->recursion > 0 );
    5883                        this->recursion += 1;
    5984                }
     85                else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) {
     86                        // Some one was waiting for us, enter
     87                        set_owner( this, thrd );
     88                }
    6089                else {
    61                         //Some one else has the monitor, wait in line for it
     90                        // Some one else has the monitor, wait in line for it
    6291                        append( &this->entry_queue, thrd );
    63                         // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
    6492                        BlockInternal( &this->lock );
    6593
    66                         //BlockInternal will unlock spinlock, no need to unlock ourselves
     94                        // BlockInternal will unlock spinlock, no need to unlock ourselves
    6795                        return;
    6896                }
    6997
     98                // Release the lock and leave
    7099                unlock( &this->lock );
    71100                return;
    72101        }
    73102
    74         // leave pseudo code :
    75         //      TODO
     103        // Leave single monitor
    76104        void __leave_monitor_desc( monitor_desc * this ) {
     105                // Lock the monitor spinlock, lock_yield to reduce contention
    77106                lock_yield( &this->lock DEBUG_CTX2 );
    78107
    79                 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);
    80108                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
    81109
    82                 //Leaving a recursion level, decrement the counter
     110                // Leaving a recursion level, decrement the counter
    83111                this->recursion -= 1;
    84112
    85                 //If we haven't left the last level of recursion
    86                 //it means we don't need to do anything
     113                // If we haven't left the last level of recursion
     114                // it means we don't need to do anything
    87115                if( this->recursion != 0) {
    88116                        unlock( &this->lock );
     
    90118                }
    91119
     120                // Get the next thread, will be null on low contention monitor
    92121                thread_desc * new_owner = next_thread( this );
    93122
    94                 //We can now let other threads in safely
     123                // We can now let other threads in safely
    95124                unlock( &this->lock );
    96 
    97                 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
    98125
    99126                //We need to wake-up the thread
     
    101128        }
    102129
     130        // Leave the thread monitor
     131        // last routine called by a thread.
     132        // Should never return
    103133        void __leave_thread_monitor( thread_desc * thrd ) {
    104134                monitor_desc * this = &thrd->mon;
     135
     136                // Lock the monitor now
    105137                lock_yield( &this->lock DEBUG_CTX2 );
    106138
     
    111143                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
    112144
    113                 //Leaving a recursion level, decrement the counter
     145                // Leaving a recursion level, decrement the counter
    114146                this->recursion -= 1;
    115147
    116                 //If we haven't left the last level of recursion
    117                 //it means we don't need to do anything
    118                 if( this->recursion != 0) {
    119                         unlock( &this->lock );
    120                         return;
    121                 }
    122 
     148                // If we haven't left the last level of recursion
     149                // it must mean there is an error
     150                if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); }
     151
     152                // Fetch the next thread, can be null
    123153                thread_desc * new_owner = next_thread( this );
    124154
     155                // Leave the thread, this will unlock the spinlock
     156                // Use leave thread instead of BlockInternal which is
     157                // specialized for this case and supports null new_owner
    125158                LeaveThread( &this->lock, new_owner );
    126         }
    127 }
    128 
    129 static inline void enter(monitor_desc ** monitors, int count) {
     159
     160                // Control flow should never reach here!
     161        }
     162}
     163
     164// Enter multiple monitor
     165// relies on the monitor array being sorted
     166static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) {
    130167        for(int i = 0; i < count; i++) {
    131                 __enter_monitor_desc( monitors[i] );
    132         }
    133 }
    134 
     168                __enter_monitor_desc( monitors[i], monitors, count, func );
     169        }
     170
     171        int acc_idx = monitors[0]->accepted_index;
     172        if( acc_idx >= 0 && monitors[0]->acceptables[ acc_idx ].run_preaccept ) {
     173                assert( monitors[0]->pre_accept );
     174                monitors[0]->pre_accept();
     175        }
     176}
     177
     178// Leave multiple monitor
     179// relies on the monitor array being sorted
    135180static inline void leave(monitor_desc ** monitors, int count) {
    136181        for(int i = count - 1; i >= 0; i--) {
     
    139184}
    140185
    141 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) {
     186// Ctor for monitor guard
     187// Sorts monitors before entering
     188void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() ) {
     189        // Store current array
    142190        this->m = m;
    143191        this->count = count;
     192
     193        // Sort monitors based on address -> TODO use a sort specialized for small numbers
    144194        qsort(this->m, count);
    145         enter( this->m, this->count );
    146 
     195
     196        // Enter the monitors in order
     197        enter( this->m, this->count, func );
     198
     199        // Save previous thread context
    147200        this->prev_mntrs = this_thread->current_monitors;
    148201        this->prev_count = this_thread->current_monitor_count;
    149202
     203        // Update thread context (needed for conditions)
    150204        this_thread->current_monitors      = m;
    151205        this_thread->current_monitor_count = count;
    152206}
    153207
     208// Dtor for monitor guard
    154209void ^?{}( monitor_guard_t * this ) {
     210        // Leave the monitors in order
    155211        leave( this->m, this->count );
    156212
     213        // Restore thread context
    157214        this_thread->current_monitors      = this->prev_mntrs;
    158215        this_thread->current_monitor_count = this->prev_count;
    159216}
     217
     218//-----------------------------------------------------------------------------
     219// Internal scheduling types
    160220
    161221void ?{}(__condition_node_t * this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
     
    183243// Internal scheduling
    184244void wait( condition * this, uintptr_t user_info = 0 ) {
    185         // LIB_DEBUG_PRINT_SAFE("Waiting\n");
    186 
    187245        brand_condition( this );
    188246
    189         //Check that everything is as expected
     247        // Check that everything is as expected
    190248        assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    191249        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    192250        verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
    193251
    194         unsigned short count = this->monitor_count;
    195         unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    196         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    197 
    198         // LIB_DEBUG_PRINT_SAFE("count %i\n", count);
    199 
    200         __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info };
    201 
    202         __condition_criterion_t criteria[count];
    203         for(int i = 0; i < count; i++) {
    204                 (&criteria[i]){ this->monitors[i], &waiter };
    205                 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    206         }
    207 
    208         waiter.criteria = criteria;
     252        // Create storage for monitor context
     253        monitor_ctx( this->monitors, this->monitor_count );
     254
     255        // Create the node specific to this wait operation
     256        wait_ctx( this_thread, user_info );
     257
     258        // Append the current wait operation to the ones already queued on the condition
     259        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
    209260        append( &this->blocked, &waiter );
    210261
    211         lock_all( this->monitors, locks, count );
    212         save_recursion( this->monitors, recursions, count );
    213         //DON'T unlock, ask the kernel to do it
    214 
    215         //Find the next thread(s) to run
     262        // Lock all monitors (aggregates the lock them as well)
     263        lock_all( monitors, locks, count );
     264
     265        // DON'T unlock, ask the kernel to do it
     266
     267        // Save monitor state
     268        save_recursion( monitors, recursions, count );
     269
     270        // Find the next thread(s) to run
    216271        unsigned short thread_count = 0;
    217272        thread_desc * threads[ count ];
     
    220275        }
    221276
     277        // Remove any duplicate threads
    222278        for( int i = 0; i < count; i++) {
    223                 thread_desc * new_owner = next_thread( this->monitors[i] );
     279                thread_desc * new_owner = next_thread( monitors[i] );
    224280                thread_count = insert_unique( threads, thread_count, new_owner );
    225281        }
    226 
    227         // LIB_DEBUG_PRINT_SAFE("Will unblock: ");
    228         for(int i = 0; i < thread_count; i++) {
    229                 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
    230         }
    231         // LIB_DEBUG_PRINT_SAFE("\n");
    232282
    233283        // Everything is ready to go to sleep
     
    235285
    236286
    237         //WE WOKE UP
    238 
    239 
    240         //We are back, restore the owners and recursions
     287        // WE WOKE UP
     288
     289
     290        // We are back, restore the owners and recursions
    241291        lock_all( locks, count );
    242         restore_recursion( this->monitors, recursions, count );
     292        restore_recursion( monitors, recursions, count );
    243293        unlock_all( locks, count );
    244294}
    245295
    246296bool signal( condition * this ) {
    247         if( is_empty( this ) ) {
    248                 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
    249                 return false;
    250         }
     297        if( is_empty( this ) ) { return false; }
    251298
    252299        //Check that everything is as expected
    253300        verify( this->monitors );
    254301        verify( this->monitor_count != 0 );
    255 
    256         unsigned short count = this->monitor_count;
    257302
    258303        //Some more checking in debug
     
    261306                if ( this->monitor_count != this_thrd->current_monitor_count ) {
    262307                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
    263                 } // if
     308                }
    264309
    265310                for(int i = 0; i < this->monitor_count; i++) {
    266311                        if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
    267312                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
    268                         } // if
     313                        }
    269314                }
    270315        );
    271316
    272         //Lock all the monitors
     317        unsigned short count = this->monitor_count;
     318
     319        // Lock all monitors
    273320        lock_all( this->monitors, NULL, count );
    274         // LIB_DEBUG_PRINT_SAFE("Signalling");
    275321
    276322        //Pop the head of the waiting queue
     
    280326        for(int i = 0; i < count; i++) {
    281327                __condition_criterion_t * crit = &node->criteria[i];
    282                 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
    283328                assert( !crit->ready );
    284329                push( &crit->target->signal_stack, crit );
    285330        }
    286331
    287         // LIB_DEBUG_PRINT_SAFE("\n");
    288 
    289332        //Release
    290333        unlock_all( this->monitors, count );
     
    294337
    295338bool signal_block( condition * this ) {
    296         if( !this->blocked.head ) {
    297                 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
    298                 return false;
    299         }
     339        if( !this->blocked.head ) { return false; }
    300340
    301341        //Check that everything is as expected
     
    303343        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    304344
    305         unsigned short count = this->monitor_count;
    306         unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    307         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    308 
    309         lock_all( this->monitors, locks, count );
    310 
    311         //create creteria
    312         __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 };
    313 
    314         __condition_criterion_t criteria[count];
    315         for(int i = 0; i < count; i++) {
    316                 (&criteria[i]){ this->monitors[i], &waiter };
    317                 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    318                 push( &criteria[i].target->signal_stack, &criteria[i] );
    319         }
    320 
    321         waiter.criteria = criteria;
     345        // Create storage for monitor context
     346        monitor_ctx( this->monitors, this->monitor_count );
     347
     348        // Lock all monitors (aggregates the locks them as well)
     349        lock_all( monitors, locks, count );
     350
     351        // Create the node specific to this wait operation
     352        wait_ctx_primed( this_thread, 0 )
    322353
    323354        //save contexts
    324         save_recursion( this->monitors, recursions, count );
     355        save_recursion( monitors, recursions, count );
    325356
    326357        //Find the thread to run
    327358        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
    328359        for(int i = 0; i < count; i++) {
    329                 set_owner( this->monitors[i], signallee );
    330         }
    331 
    332         LIB_DEBUG_PRINT_SAFE( "Waiting on signal block\n" );
     360                set_owner( monitors[i], signallee );
     361        }
    333362
    334363        //Everything is ready to go to sleep
     
    336365
    337366
    338 
    339 
    340         LIB_DEBUG_PRINT_SAFE( "Back from signal block\n" );
     367        // WE WOKE UP
     368
    341369
    342370        //We are back, restore the owners and recursions
    343371        lock_all( locks, count );
    344         restore_recursion( this->monitors, recursions, count );
     372        restore_recursion( monitors, recursions, count );
    345373        unlock_all( locks, count );
    346374
     
    348376}
    349377
     378// Access the user_info of the thread waiting at the front of the queue
    350379uintptr_t front( condition * this ) {
    351380        verifyf( !is_empty(this),
     
    358387//-----------------------------------------------------------------------------
    359388// Internal scheduling
    360 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
    361         // thread_desc * this = this_thread;
    362 
    363         // unsigned short count = this->current_monitor_count;
    364         // unsigned int recursions[ count ];            //Save the current recursion levels to restore them later
    365         // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to BlockInternal
    366 
    367         // lock_all( this->current_monitors, locks, count );
    368 
    369 
    370 
    371 
    372 
    373         // // // Everything is ready to go to sleep
    374         // // BlockInternal( locks, count, threads, thread_count );
    375 
    376 
    377         // //WE WOKE UP
    378 
    379 
    380         // //We are back, restore the owners and recursions
    381         // lock_all( locks, count );
    382         // restore_recursion( this->monitors, recursions, count );
    383         // unlock_all( locks, count );
     389int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) {
     390        thread_desc * thrd = this_thread;
     391
     392        // Create storage for monitor context
     393        monitor_ctx( acceptables->monitors, acceptables->count );
     394
     395        // Lock all monitors (aggregates the lock them as well)
     396        lock_all( monitors, locks, count );
     397
     398        // Create the node specific to this wait operation
     399        wait_ctx_primed( thrd, 0 );
     400
     401        // Check if the entry queue
     402        thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count );
     403
     404        if( !next ) {
     405                // Update acceptables on the current monitors
     406                for(int i = 0; i < count; i++) {
     407                        monitors[i]->acceptables = acceptables;
     408                        monitors[i]->acceptable_count = acc_count;
     409                }
     410        }
     411
     412        save_recursion( monitors, recursions, count );
     413
     414        // Everything is ready to go to sleep
     415        BlockInternal( locks, count, &next, next ? 1 : 0 );
     416
     417
     418        //WE WOKE UP
     419
     420
     421        //We are back, restore the owners and recursions
     422        lock_all( locks, count );
     423        restore_recursion( monitors, recursions, count );
     424        int acc_idx = monitors[0]->accepted_index;
     425        unlock_all( locks, count );
     426
     427        return acc_idx;
    384428}
    385429
     
    415459}
    416460
     461static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
     462        __acceptable_t* accs = this->acceptables; // Optim
     463        int acc_cnt = this->acceptable_count;
     464
     465        // Check if there are any acceptable functions
     466        if( !accs ) return -1;
     467
     468        // If this isn't the first monitor to test this, there is no reason to repeat the test.
     469        if( this != group[0] ) return group[0]->accepted_index;
     470
     471        // For all acceptable functions check if this is the current function.
     472        OUT_LOOP:
     473        for( int i = 0; i < acc_cnt; i++ ) {
     474                __acceptable_t * acc = &accs[i];
     475
     476                // if function matches, check the monitors
     477                if( acc->func == func ) {
     478
     479                        // If the group count is different then it can't be a match
     480                        if( acc->count != group_cnt ) return -1;
     481
     482                        // Check that all the monitors match
     483                        for( int j = 0; j < group_cnt; j++ ) {
     484                                // If not a match, check next function
     485                                if( acc->monitors[j] != group[j] ) continue OUT_LOOP;
     486                        }
     487
     488                        // It's a complete match, accept the call
     489                        return i;
     490                }
     491        }
     492
     493        // No function matched
     494        return -1;
     495}
     496
     497static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
     498        for(int i = 0; i < count; i++) {
     499                (&criteria[i]){ monitors[i], waiter };
     500        }
     501
     502        waiter->criteria = criteria;
     503}
     504
     505static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
     506        for(int i = 0; i < count; i++) {
     507                (&criteria[i]){ monitors[i], waiter };
     508                push( &criteria[i].target->signal_stack, &criteria[i] );
     509        }
     510
     511        waiter->criteria = criteria;
     512}
     513
    417514static inline void lock_all( spinlock ** locks, unsigned short count ) {
    418515        for( int i = 0; i < count; i++ ) {
     
    505602}
    506603
     604static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) {
     605        return NULL;
     606}
     607
    507608void ?{}( __condition_blocked_queue_t * this ) {
    508609        this->head = NULL;
Note: See TracChangeset for help on using the changeset viewer.