Changes in / [66b8773:3eab0ef6]


Ignore:
Location:
src
Files:
1 added
9 edited

Legend:

Unmodified
Added
Removed
  • src/Concurrency/Keywords.cc

    r66b8773 r3eab0ef6  
    200200                std::list<DeclarationWithType*> findMutexArgs( FunctionDecl* );
    201201                void validate( DeclarationWithType * );
    202                 void addStatments( CompoundStmt *, const std::list<DeclarationWithType * > &);
     202                void addStatments( FunctionDecl* func, CompoundStmt *, const std::list<DeclarationWithType * > &);
    203203
    204204                static void implement( std::list< Declaration * > & translationUnit ) {
     
    210210                StructDecl* monitor_decl = nullptr;
    211211                StructDecl* guard_decl = nullptr;
     212
     213                static std::unique_ptr< Type > generic_func;
    212214        };
     215
     216        std::unique_ptr< Type > MutexKeyword::generic_func = std::unique_ptr< Type >(
     217                new FunctionType(
     218                        noQualifiers,
     219                        true
     220                )
     221        );
    213222
    214223        //-----------------------------------------------------------------------------
     
    394403        // Mutex keyword implementation
    395404        //=============================================================================================
     405
    396406        void MutexKeyword::visit(FunctionDecl* decl) {
    397407                Visitor::visit(decl);
     
    410420                if( !guard_decl ) throw SemanticError( "mutex keyword requires monitors to be in scope, add #include <monitor>", decl );
    411421
    412                 addStatments( body, mutexArgs );
     422                addStatments( decl, body, mutexArgs );
    413423        }
    414424
     
    456466        }
    457467
    458         void MutexKeyword::addStatments( CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {
     468        void MutexKeyword::addStatments( FunctionDecl* func, CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {
    459469                ObjectDecl * monitors = new ObjectDecl(
    460470                        "__monitors",
     
    487497                );
    488498
     499                assert(generic_func);
     500
    489501                //in reverse order :
    490                 // monitor_guard_t __guard = { __monitors, # };
     502                // monitor_guard_t __guard = { __monitors, #, func };
    491503                body->push_front(
    492504                        new DeclStmt( noLabels, new ObjectDecl(
     
    502514                                        {
    503515                                                new SingleInit( new VariableExpr( monitors ) ),
    504                                                 new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) )
     516                                                new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) ),
     517                                                new SingleInit( new CastExpr( new VariableExpr( func ), generic_func->clone() ) )
    505518                                        },
    506519                                        noDesignators,
  • src/libcfa/concurrency/invoke.h

    r66b8773 r3eab0ef6  
    2828      #define thread_local _Thread_local
    2929
     30      typedef void (*fptr_t)();
     31
    3032      struct spinlock {
    3133            volatile int lock;
     
    5052            void append( struct __thread_queue_t *, struct thread_desc * );
    5153            struct thread_desc * pop_head( struct __thread_queue_t * );
     54            struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** );
    5255
    5356            void ?{}( struct __condition_stack_t * );
     
    8790            struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    8891            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    89       };
     92
     93            struct __acceptable_t * acceptables;      // list of acceptable functions, null if any
     94            unsigned short acceptable_count;          // number of acceptable functions
     95            short accepted_index;                     // the index of the accepted function, -1 if none
     96       };
    9097
    9198      struct thread_desc {
     99            // Core threading fields
    92100            struct coroutine_desc cor;                // coroutine body used to store context
    93101            struct monitor_desc mon;                  // monitor body used for mutual exclusion
     102
     103            // Link lists fields
    94104            struct thread_desc * next;                // instrusive link field for threads
     105
     106            // Current status related to monitors
    95107            struct monitor_desc ** current_monitors;  // currently held monitors
    96108            unsigned short current_monitor_count;     // number of currently held monitors
    97       };
     109            fptr_t current_monitor_func;              // last function that acquired monitors
     110     };
    98111
    99112#endif //_INVOKE_H_
  • src/libcfa/concurrency/kernel.c

    r66b8773 r3eab0ef6  
    366366
    367367void BlockInternal( thread_desc * thrd ) {
     368        assert(thrd);
    368369        disable_interrupts();
    369370        assert( thrd->cor.state != Halted );
     
    379380
    380381void BlockInternal( spinlock * lock, thread_desc * thrd ) {
     382        assert(thrd);
    381383        disable_interrupts();
    382384        this_processor->finish.action_code = Release_Schedule;
     
    666668}
    667669
     670thread_desc * remove( __thread_queue_t * this, thread_desc ** it ) {
     671        thread_desc * thrd = *it;
     672        verify( thrd );
     673
     674        (*it) = thrd->next;
     675
     676        if( this->tail == &thrd->next ) {
     677                this->tail = it;
     678        }
     679
     680        thrd->next = NULL;
     681
     682        verify( (this->head == NULL) == (&this->head == this->tail) );
     683        verify( *this->tail == NULL );
     684        return thrd;
     685}
     686
     687
     688
    668689void ?{}( __condition_stack_t * this ) {
    669690        this->top = NULL;
  • src/libcfa/concurrency/monitor

    r66b8773 r3eab0ef6  
    2323
    2424static inline void ?{}(monitor_desc * this) {
     25        (&this->lock){};
    2526        this->owner = NULL;
     27        (&this->entry_queue){};
     28        (&this->signal_stack){};
    2629        this->recursion = 0;
     30        this->acceptables = NULL;
     31        this->acceptable_count = 0;
     32        this->accepted_index = -1;
    2733}
    2834
     
    3238        monitor_desc ** prev_mntrs;
    3339        unsigned short  prev_count;
     40        fptr_t          prev_func;
    3441};
    3542
     
    3845}
    3946
    40 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count );
     47void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() );
    4148void ^?{}( monitor_guard_t * this );
    4249
     
    8996uintptr_t front( condition * this );
    9097
     98//-----------------------------------------------------------------------------
     99// External scheduling
     100
    91101struct __acceptable_t {
    92         void (*func)(void);
     102        fptr_t func;
    93103        unsigned short count;
    94         monitor_desc * monitors[1];
     104        monitor_desc ** monitors;
    95105};
    96106
    97 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) );
     107int __accept_internal( unsigned short count, __acceptable_t * acceptables );
    98108
    99109// Local Variables: //
  • src/libcfa/concurrency/monitor.c

    r66b8773 r3eab0ef6  
    2525static inline void set_owner( monitor_desc * this, thread_desc * owner );
    2626static inline thread_desc * next_thread( monitor_desc * this );
     27static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() );
    2728
    2829static inline void lock_all( spinlock ** locks, unsigned short count );
     
    3435static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
    3536
     37static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
     38static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
     39
    3640static inline thread_desc * check_condition( __condition_criterion_t * );
    3741static inline void brand_condition( condition * );
    3842static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
    3943
     44static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count );
     45
     46//-----------------------------------------------------------------------------
     47// Useful defines
     48#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack       */ \
     49        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
     50        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
     51        init( count, monitors, &waiter, criteria );               /* Link everything together                                          */ \
     52
     53#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack       */ \
     54        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
     55        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
     56        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack              */ \
     57
     58#define monitor_ctx( mons, cnt )              /* Define that create the necessary struct for internal/external scheduling operations */ \
     59        monitor_desc ** monitors = mons;        /* Save the targeted monitors                                                          */ \
     60        unsigned short count = cnt;             /* Save the count to a local variable                                                  */ \
     61        unsigned int recursions[ count ];       /* Save the current recursion levels to restore them later                             */ \
     62        spinlock *   locks     [ count ];       /* We need to pass-in an array of locks to BlockInternal                               */ \
     63
    4064//-----------------------------------------------------------------------------
    4165// Enter/Leave routines
     
    4367
    4468extern "C" {
    45         void __enter_monitor_desc( monitor_desc * this ) {
     69        // Enter single monitor
     70        static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
     71                // Lock the monitor spinlock, lock_yield to reduce contention
    4672                lock_yield( &this->lock DEBUG_CTX2 );
    4773                thread_desc * thrd = this_thread;
    4874
    49                 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    50 
     75                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     76
     77                this->accepted_index = -1;
    5178                if( !this->owner ) {
    52                         //No one has the monitor, just take it
     79                        // No one has the monitor, just take it
    5380                        set_owner( this, thrd );
     81
     82                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon is free \n");
    5483                }
    5584                else if( this->owner == thrd) {
    56                         //We already have the monitor, just not how many times we took it
     85                        // We already have the monitor, just not how many times we took it
    5786                        verify( this->recursion > 0 );
    5887                        this->recursion += 1;
     88
     89                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon already owned \n");
     90                }
     91                else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) {
     92                        // Some one was waiting for us, enter
     93                        set_owner( this, thrd );
     94
     95                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts \n");
    5996                }
    6097                else {
    61                         //Some one else has the monitor, wait in line for it
     98                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
     99
     100                        // Some one else has the monitor, wait in line for it
    62101                        append( &this->entry_queue, thrd );
    63                         // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
    64102                        BlockInternal( &this->lock );
    65103
    66                         //BlockInternal will unlock spinlock, no need to unlock ourselves
     104                        LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
     105
     106                        // BlockInternal will unlock spinlock, no need to unlock ourselves
    67107                        return;
    68108                }
    69109
     110                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
     111
     112                // Release the lock and leave
    70113                unlock( &this->lock );
    71114                return;
    72115        }
    73116
    74         // leave pseudo code :
    75         //      TODO
     117        // Leave single monitor
    76118        void __leave_monitor_desc( monitor_desc * this ) {
     119                // Lock the monitor spinlock, lock_yield to reduce contention
    77120                lock_yield( &this->lock DEBUG_CTX2 );
    78121
    79                 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);
    80122                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
    81123
    82                 //Leaving a recursion level, decrement the counter
     124                // Leaving a recursion level, decrement the counter
    83125                this->recursion -= 1;
    84126
    85                 //If we haven't left the last level of recursion
    86                 //it means we don't need to do anything
     127                // If we haven't left the last level of recursion
     128                // it means we don't need to do anything
    87129                if( this->recursion != 0) {
    88130                        unlock( &this->lock );
     
    90132                }
    91133
     134                // Get the next thread, will be null on low contention monitor
    92135                thread_desc * new_owner = next_thread( this );
    93136
    94                 //We can now let other threads in safely
     137                // We can now let other threads in safely
    95138                unlock( &this->lock );
    96 
    97                 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
    98139
    99140                //We need to wake-up the thread
     
    101142        }
    102143
     144        // Leave the thread monitor
     145        // last routine called by a thread.
     146        // Should never return
    103147        void __leave_thread_monitor( thread_desc * thrd ) {
    104148                monitor_desc * this = &thrd->mon;
     149
     150                // Lock the monitor now
    105151                lock_yield( &this->lock DEBUG_CTX2 );
    106152
     
    111157                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
    112158
    113                 //Leaving a recursion level, decrement the counter
     159                // Leaving a recursion level, decrement the counter
    114160                this->recursion -= 1;
    115161
    116                 //If we haven't left the last level of recursion
    117                 //it means we don't need to do anything
    118                 if( this->recursion != 0) {
    119                         unlock( &this->lock );
    120                         return;
    121                 }
    122 
     162                // If we haven't left the last level of recursion
     163                // it must mean there is an error
     164                if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); }
     165
     166                // Fetch the next thread, can be null
    123167                thread_desc * new_owner = next_thread( this );
    124168
     169                // Leave the thread, this will unlock the spinlock
     170                // Use leave thread instead of BlockInternal which is
     171                // specialized for this case and supports null new_owner
    125172                LeaveThread( &this->lock, new_owner );
    126         }
    127 }
    128 
    129 static inline void enter(monitor_desc ** monitors, int count) {
     173
     174                // Control flow should never reach here!
     175        }
     176}
     177
     178// Enter multiple monitor
     179// relies on the monitor array being sorted
     180static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) {
    130181        for(int i = 0; i < count; i++) {
    131                 __enter_monitor_desc( monitors[i] );
    132         }
    133 }
    134 
     182                __enter_monitor_desc( monitors[i], monitors, count, func );
     183        }
     184}
     185
     186// Leave multiple monitor
     187// relies on the monitor array being sorted
    135188static inline void leave(monitor_desc ** monitors, int count) {
    136189        for(int i = count - 1; i >= 0; i--) {
     
    139192}
    140193
    141 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) {
     194// Ctor for monitor guard
     195// Sorts monitors before entering
     196void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() ) {
     197        // Store current array
    142198        this->m = m;
    143199        this->count = count;
     200
     201        // Sort monitors based on address -> TODO use a sort specialized for small numbers
    144202        qsort(this->m, count);
    145         enter( this->m, this->count );
    146 
     203
     204        // Save previous thread context
    147205        this->prev_mntrs = this_thread->current_monitors;
    148206        this->prev_count = this_thread->current_monitor_count;
    149 
     207        this->prev_func  = this_thread->current_monitor_func;
     208
     209        // Update thread context (needed for conditions)
    150210        this_thread->current_monitors      = m;
    151211        this_thread->current_monitor_count = count;
    152 }
    153 
     212        this_thread->current_monitor_func  = func;
     213
     214        // Enter the monitors in order
     215        enter( this->m, this->count, func );
     216}
     217
     218// Dtor for monitor guard
    154219void ^?{}( monitor_guard_t * this ) {
     220        // Leave the monitors in order
    155221        leave( this->m, this->count );
    156222
     223        // Restore thread context
    157224        this_thread->current_monitors      = this->prev_mntrs;
    158225        this_thread->current_monitor_count = this->prev_count;
    159 }
     226        this_thread->current_monitor_func  = this->prev_func;
     227}
     228
     229//-----------------------------------------------------------------------------
     230// Internal scheduling types
    160231
    161232void ?{}(__condition_node_t * this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
     
    183254// Internal scheduling
    184255void wait( condition * this, uintptr_t user_info = 0 ) {
    185         // LIB_DEBUG_PRINT_SAFE("Waiting\n");
    186 
    187256        brand_condition( this );
    188257
    189         //Check that everything is as expected
     258        // Check that everything is as expected
    190259        assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    191260        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    192261        verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
    193262
    194         unsigned short count = this->monitor_count;
    195         unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    196         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    197 
    198         // LIB_DEBUG_PRINT_SAFE("count %i\n", count);
    199 
    200         __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info };
    201 
    202         __condition_criterion_t criteria[count];
    203         for(int i = 0; i < count; i++) {
    204                 (&criteria[i]){ this->monitors[i], &waiter };
    205                 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    206         }
    207 
    208         waiter.criteria = criteria;
     263        // Create storage for monitor context
     264        monitor_ctx( this->monitors, this->monitor_count );
     265
     266        // Create the node specific to this wait operation
     267        wait_ctx( this_thread, user_info );
     268
     269        // Append the current wait operation to the ones already queued on the condition
     270        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
    209271        append( &this->blocked, &waiter );
    210272
    211         lock_all( this->monitors, locks, count );
    212         save_recursion( this->monitors, recursions, count );
    213         //DON'T unlock, ask the kernel to do it
    214 
    215         //Find the next thread(s) to run
     273        // Lock all monitors (aggregates the lock them as well)
     274        lock_all( monitors, locks, count );
     275
     276        // DON'T unlock, ask the kernel to do it
     277
     278        // Save monitor state
     279        save_recursion( monitors, recursions, count );
     280
     281        // Find the next thread(s) to run
    216282        unsigned short thread_count = 0;
    217283        thread_desc * threads[ count ];
     
    220286        }
    221287
     288        // Remove any duplicate threads
    222289        for( int i = 0; i < count; i++) {
    223                 thread_desc * new_owner = next_thread( this->monitors[i] );
     290                thread_desc * new_owner = next_thread( monitors[i] );
    224291                thread_count = insert_unique( threads, thread_count, new_owner );
    225292        }
    226 
    227         // LIB_DEBUG_PRINT_SAFE("Will unblock: ");
    228         for(int i = 0; i < thread_count; i++) {
    229                 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
    230         }
    231         // LIB_DEBUG_PRINT_SAFE("\n");
    232293
    233294        // Everything is ready to go to sleep
     
    235296
    236297
    237         //WE WOKE UP
    238 
    239 
    240         //We are back, restore the owners and recursions
     298        // WE WOKE UP
     299
     300
     301        // We are back, restore the owners and recursions
    241302        lock_all( locks, count );
    242         restore_recursion( this->monitors, recursions, count );
     303        restore_recursion( monitors, recursions, count );
    243304        unlock_all( locks, count );
    244305}
    245306
    246307bool signal( condition * this ) {
    247         if( is_empty( this ) ) {
    248                 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
    249                 return false;
    250         }
     308        if( is_empty( this ) ) { return false; }
    251309
    252310        //Check that everything is as expected
    253311        verify( this->monitors );
    254312        verify( this->monitor_count != 0 );
    255 
    256         unsigned short count = this->monitor_count;
    257313
    258314        //Some more checking in debug
     
    261317                if ( this->monitor_count != this_thrd->current_monitor_count ) {
    262318                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
    263                 } // if
     319                }
    264320
    265321                for(int i = 0; i < this->monitor_count; i++) {
    266322                        if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
    267323                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
    268                         } // if
     324                        }
    269325                }
    270326        );
    271327
    272         //Lock all the monitors
     328        unsigned short count = this->monitor_count;
     329
     330        // Lock all monitors
    273331        lock_all( this->monitors, NULL, count );
    274         // LIB_DEBUG_PRINT_SAFE("Signalling");
    275332
    276333        //Pop the head of the waiting queue
     
    280337        for(int i = 0; i < count; i++) {
    281338                __condition_criterion_t * crit = &node->criteria[i];
    282                 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
    283339                assert( !crit->ready );
    284340                push( &crit->target->signal_stack, crit );
    285341        }
    286342
    287         // LIB_DEBUG_PRINT_SAFE("\n");
    288 
    289343        //Release
    290344        unlock_all( this->monitors, count );
     
    294348
    295349bool signal_block( condition * this ) {
    296         if( !this->blocked.head ) {
    297                 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
    298                 return false;
    299         }
     350        if( !this->blocked.head ) { return false; }
    300351
    301352        //Check that everything is as expected
     
    303354        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    304355
    305         unsigned short count = this->monitor_count;
    306         unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    307         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    308 
    309         lock_all( this->monitors, locks, count );
    310 
    311         //create creteria
    312         __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 };
    313 
    314         __condition_criterion_t criteria[count];
    315         for(int i = 0; i < count; i++) {
    316                 (&criteria[i]){ this->monitors[i], &waiter };
    317                 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    318                 push( &criteria[i].target->signal_stack, &criteria[i] );
    319         }
    320 
    321         waiter.criteria = criteria;
     356        // Create storage for monitor context
     357        monitor_ctx( this->monitors, this->monitor_count );
     358
     359        // Lock all monitors (aggregates the locks them as well)
     360        lock_all( monitors, locks, count );
     361
     362        // Create the node specific to this wait operation
     363        wait_ctx_primed( this_thread, 0 )
    322364
    323365        //save contexts
    324         save_recursion( this->monitors, recursions, count );
     366        save_recursion( monitors, recursions, count );
    325367
    326368        //Find the thread to run
    327369        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
    328370        for(int i = 0; i < count; i++) {
    329                 set_owner( this->monitors[i], signallee );
    330         }
    331 
    332         LIB_DEBUG_PRINT_SAFE( "Waiting on signal block\n" );
     371                set_owner( monitors[i], signallee );
     372        }
    333373
    334374        //Everything is ready to go to sleep
     
    336376
    337377
    338 
    339 
    340         LIB_DEBUG_PRINT_SAFE( "Back from signal block\n" );
     378        // WE WOKE UP
     379
    341380
    342381        //We are back, restore the owners and recursions
    343382        lock_all( locks, count );
    344         restore_recursion( this->monitors, recursions, count );
     383        restore_recursion( monitors, recursions, count );
    345384        unlock_all( locks, count );
    346385
     
    348387}
    349388
     389// Access the user_info of the thread waiting at the front of the queue
    350390uintptr_t front( condition * this ) {
    351391        verifyf( !is_empty(this),
     
    358398//-----------------------------------------------------------------------------
    359399// Internal scheduling
    360 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
    361         // thread_desc * this = this_thread;
    362 
    363         // unsigned short count = this->current_monitor_count;
    364         // unsigned int recursions[ count ];            //Save the current recursion levels to restore them later
    365         // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to BlockInternal
    366 
    367         // lock_all( this->current_monitors, locks, count );
    368 
    369 
    370 
    371 
    372 
    373         // // // Everything is ready to go to sleep
    374         // // BlockInternal( locks, count, threads, thread_count );
    375 
    376 
    377         // //WE WOKE UP
    378 
    379 
    380         // //We are back, restore the owners and recursions
    381         // lock_all( locks, count );
    382         // restore_recursion( this->monitors, recursions, count );
    383         // unlock_all( locks, count );
     400int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) {
     401        thread_desc * thrd = this_thread;
     402
     403        // Create storage for monitor context
     404        monitor_ctx( acceptables->monitors, acceptables->count );
     405
     406        // Lock all monitors (aggregates the lock them as well)
     407        lock_all( monitors, locks, count );
     408
     409        // Create the node specific to this wait operation
     410        wait_ctx_primed( thrd, 0 );
     411
     412        // Check if the entry queue
     413        thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count );
     414
     415        LIB_DEBUG_PRINT_SAFE("Owner(s) :");
     416        for(int i = 0; i < count; i++) {
     417                LIB_DEBUG_PRINT_SAFE(" %p", monitors[i]->owner );
     418        }
     419        LIB_DEBUG_PRINT_SAFE("\n");
     420
     421        LIB_DEBUG_PRINT_SAFE("Passing mon to %p\n", next);
     422
     423        if( !next ) {
     424                // Update acceptables on the current monitors
     425                for(int i = 0; i < count; i++) {
     426                        monitors[i]->acceptables = acceptables;
     427                        monitors[i]->acceptable_count = acc_count;
     428                }
     429        }
     430        else {
     431                for(int i = 0; i < count; i++) {
     432                        set_owner( monitors[i], next );
     433                }
     434        }
     435
     436
     437        save_recursion( monitors, recursions, count );
     438
     439
     440        // Everything is ready to go to sleep
     441        BlockInternal( locks, count, &next, next ? 1 : 0 );
     442
     443
     444        //WE WOKE UP
     445
     446
     447        //We are back, restore the owners and recursions
     448        lock_all( locks, count );
     449        restore_recursion( monitors, recursions, count );
     450        int acc_idx = monitors[0]->accepted_index;
     451        unlock_all( locks, count );
     452
     453        return acc_idx;
    384454}
    385455
     
    415485}
    416486
     487static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
     488        __acceptable_t* accs = this->acceptables; // Optim
     489        int acc_cnt = this->acceptable_count;
     490
     491        // Check if there are any acceptable functions
     492        if( !accs ) return -1;
     493
     494        // If this isn't the first monitor to test this, there is no reason to repeat the test.
     495        if( this != group[0] ) return group[0]->accepted_index;
     496
     497        // For all acceptable functions check if this is the current function.
     498        OUT_LOOP:
     499        for( int i = 0; i < acc_cnt; i++ ) {
     500                __acceptable_t * acc = &accs[i];
     501
     502                // if function matches, check the monitors
     503                if( acc->func == func ) {
     504
     505                        // If the group count is different then it can't be a match
     506                        if( acc->count != group_cnt ) return -1;
     507
     508                        // Check that all the monitors match
     509                        for( int j = 0; j < group_cnt; j++ ) {
     510                                // If not a match, check next function
     511                                if( acc->monitors[j] != group[j] ) continue OUT_LOOP;
     512                        }
     513
     514                        // It's a complete match, accept the call
     515                        return i;
     516                }
     517        }
     518
     519        // No function matched
     520        return -1;
     521}
     522
     523static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
     524        for(int i = 0; i < count; i++) {
     525                (&criteria[i]){ monitors[i], waiter };
     526        }
     527
     528        waiter->criteria = criteria;
     529}
     530
     531static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
     532        for(int i = 0; i < count; i++) {
     533                (&criteria[i]){ monitors[i], waiter };
     534                push( &criteria[i].target->signal_stack, &criteria[i] );
     535        }
     536
     537        waiter->criteria = criteria;
     538}
     539
    417540static inline void lock_all( spinlock ** locks, unsigned short count ) {
    418541        for( int i = 0; i < count; i++ ) {
     
    505628}
    506629
     630static inline bool match( __acceptable_t * acc, thread_desc * thrd ) {
     631        verify( thrd );
     632        verify( acc );
     633        if( acc->func != thrd->current_monitor_func ) return false;
     634
     635        return true;
     636}
     637
     638static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) {
     639
     640        __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
     641
     642        // For each thread in the entry-queue
     643        for(    thread_desc ** thrd_it = &entry_queue->head;
     644                *thrd_it;
     645                thrd_it = &(*thrd_it)->next)
     646        {
     647                // For each acceptable check if it matches
     648                __acceptable_t * acc_end = acceptables + acc_count;
     649                for( __acceptable_t * acc_it = acceptables; acc_it != acc_end; acc_it++ ) {
     650                        // Check if we have a match
     651                        if( match( acc_it, *thrd_it ) ) {
     652
     653                                // If we have a match return it
     654                                // after removeing it from the entry queue
     655                                return remove( entry_queue, thrd_it );
     656                        }
     657                }
     658        }
     659
     660        return NULL;
     661}
     662
    507663void ?{}( __condition_blocked_queue_t * this ) {
    508664        this->head = NULL;
  • src/libcfa/concurrency/preemption.c

    r66b8773 r3eab0ef6  
    332332                assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int);
    333333
    334                 LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
     334                // LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
    335335                // Switch on the code (a.k.a. the sender) to
    336336                switch( info.si_code )
     
    340340                case SI_TIMER:
    341341                case SI_KERNEL:
    342                         LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n");
     342                        // LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n");
    343343                        lock( &event_kernel->lock DEBUG_CTX2 );
    344344                        tick_preemption();
  • src/tests/preempt_longrun/stack.c

    r66b8773 r3eab0ef6  
    1515
    1616void main(worker_t * this) {
    17         volatile long p = 5_021_609ul;
    18         volatile long a = 326_417ul;
    19         volatile long n = 1l;
    20         for (volatile long i = 0; i < p; i++) {
     17        volatile long long p = 5_021_609ul;
     18        volatile long long a = 326_417ul;
     19        volatile long long n = 1l;
     20        for (volatile long long i = 0; i < p; i++) {
    2121                n *= a;
    2222                n %= p;
  • src/tests/sched-int-disjoint.c

    r66b8773 r3eab0ef6  
    33#include <monitor>
    44#include <thread>
     5
     6#include <time.h>
    57
    68static const unsigned long N = 10_000ul;
     
    107109// Main loop
    108110int main(int argc, char* argv[]) {
     111        rand48seed( time( NULL ) );
    109112        all_done = false;
    110113        processor p;
  • src/tests/sched-int-wait.c

    r66b8773 r3eab0ef6  
    55#include <thread>
    66
    7 static const unsigned long N = 10_000ul;
     7#include <time.h>
     8
     9static const unsigned long N = 2_500ul;
    810
    911#ifndef PREEMPTION_RATE
     
    119121// Main
    120122int main(int argc, char* argv[]) {
     123        rand48seed( time( NULL ) );
    121124        waiter_left = 4;
    122125        processor p[2];
Note: See TracChangeset for help on using the changeset viewer.