Changeset f265042 for src/libcfa


Ignore:
Timestamp:
Sep 25, 2017, 12:07:43 PM (8 years ago)
Author:
Rob Schluntz <rschlunt@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
3aeaecd
Parents:
1755226 (diff), 596bc0a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:/u/cforall/software/cfa/cfa-cc

Location:
src/libcfa
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/Makefile.am

    r1755226 rf265042  
    3636         ${AM_V_GEN}@BACKEND_CC@ @CFA_FLAGS@ -D__CFA_DEBUG__ -O0 -c -o $@ $<
    3737
    38 EXTRA_FLAGS = -g -Wall -Werror -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
     38EXTRA_FLAGS = -g -Wall -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
    3939
    4040AM_CCASFLAGS = @CFA_FLAGS@
  • src/libcfa/Makefile.in

    r1755226 rf265042  
    416416ARFLAGS = cr
    417417lib_LIBRARIES = $(am__append_1) $(am__append_2)
    418 EXTRA_FLAGS = -g -Wall -Werror -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
     418EXTRA_FLAGS = -g -Wall -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
    419419AM_CCASFLAGS = @CFA_FLAGS@
    420420headers = fstream iostream iterator limits rational stdlib \
  • src/libcfa/concurrency/coroutine.c

    r1755226 rf265042  
    123123        if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
    124124
    125         LIB_DEBUG_PRINT_SAFE("FRED");
    126 
    127125        size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
    128126
  • src/libcfa/concurrency/invoke.h

    r1755226 rf265042  
    8484      };
    8585
     86      struct __waitfor_mask_t {
     87            short * accepted;                         // the index of the accepted function, -1 if none
     88            struct __acceptable_t * clauses;          // list of acceptable functions, null if any
     89            short size;                               // number of acceptable functions
     90      };
     91
    8692      struct monitor_desc {
    8793            struct spinlock lock;                     // spinlock to protect internal data
     
    9096            struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    9197            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
     98            struct __waitfor_mask_t mask;               // mask used to know if some thread is waiting for something while holding the monitor
     99      };
    92100
    93             struct __acceptable_t * acceptables;      // list of acceptable functions, null if any
    94             unsigned short acceptable_count;          // number of acceptable functions
    95             short accepted_index;                     // the index of the accepted function, -1 if none
    96        };
     101      struct __monitor_group_t {
     102            struct monitor_desc ** list;              // currently held monitors
     103            short                  size;              // number of currently held monitors
     104            fptr_t                 func;              // last function that acquired monitors
     105      };
    97106
    98107      struct thread_desc {
    99108            // Core threading fields
    100             struct coroutine_desc cor;                // coroutine body used to store context
    101             struct monitor_desc mon;                  // monitor body used for mutual exclusion
     109            struct coroutine_desc  self_cor;          // coroutine body used to store context
     110            struct monitor_desc    self_mon;          // monitor body used for mutual exclusion
     111            struct monitor_desc *  self_mon_p;        // pointer to monitor with sufficient lifetime for current monitors
     112            struct __monitor_group_t monitors;          // monitors currently held by this thread
    102113
    103114            // Link lists fields
    104115            struct thread_desc * next;                // instrusive link field for threads
    105116
    106             // Current status related to monitors
    107             struct monitor_desc ** current_monitors;  // currently held monitors
    108             unsigned short current_monitor_count;     // number of currently held monitors
    109             fptr_t current_monitor_func;              // last function that acquired monitors
     117
    110118     };
     119
     120     #ifdef __CFORALL__
     121     extern "Cforall" {
     122            static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {
     123                  return this.list[index];
     124            }
     125
     126            static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
     127                  if( (lhs.list != 0) != (rhs.list != 0) ) return false;
     128                  if( lhs.size != rhs.size ) return false;
     129                  if( lhs.func != rhs.func ) return false;
     130
     131                  // Check that all the monitors match
     132                  for( int i = 0; i < lhs.size; i++ ) {
     133                        // If not a match, check next function
     134                        if( lhs[i] != rhs[i] ) return false;
     135                  }
     136
     137                  return true;
     138            }
     139      }
     140      #endif
    111141
    112142#endif //_INVOKE_H_
  • src/libcfa/concurrency/kernel.c

    r1755226 rf265042  
    106106
    107107void ?{}( thread_desc & this, current_stack_info_t * info) {
    108         (this.cor){ info };
     108        (this.self_cor){ info };
    109109}
    110110
     
    328328        // if( !thrd ) return;
    329329        verify( thrd );
    330         verify( thrd->cor.state != Halted );
     330        verify( thrd->self_cor.state != Halted );
    331331
    332332        verify( disable_preempt_count > 0 );
     
    373373        assert(thrd);
    374374        disable_interrupts();
    375         assert( thrd->cor.state != Halted );
     375        assert( thrd->self_cor.state != Halted );
    376376        this_processor->finish.action_code = Schedule;
    377377        this_processor->finish.thrd = thrd;
     
    466466        this_processor = mainProcessor;
    467467        this_thread = mainThread;
    468         this_coroutine = &mainThread->cor;
     468        this_coroutine = &mainThread->self_cor;
    469469
    470470        // Enable preemption
     
    547547        thread_desc * thrd = kernel_data;
    548548
    549         int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->cor.name, thrd );
     549        int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->self_cor.name, thrd );
    550550        __lib_debug_write( STDERR_FILENO, abort_text, len );
    551551
  • src/libcfa/concurrency/monitor

    r1755226 rf265042  
    2222#include "stdlib"
    2323
     24trait is_monitor(dtype T) {
     25        monitor_desc * get_monitor( T & );
     26        void ^?{}( T & mutex );
     27};
     28
    2429static inline void ?{}(monitor_desc & this) {
    2530        (this.lock){};
     
    2833        (this.signal_stack){};
    2934        this.recursion = 0;
    30         this.acceptables = NULL;
    31         this.acceptable_count = 0;
    32         this.accepted_index = -1;
     35        this.mask.accepted = NULL;
     36        this.mask.clauses  = NULL;
     37        this.mask.size     = 0;
    3338}
    3439
     
    100105
    101106struct __acceptable_t {
    102         fptr_t func;
    103         unsigned short count;
    104         monitor_desc ** monitors;
     107        __monitor_group_t;
    105108        bool is_dtor;
    106109};
    107110
    108 int __accept_internal( unsigned short count, __acceptable_t * acceptables );
     111void __waitfor_internal( const __waitfor_mask_t & mask, int duration );
    109112
    110113// Local Variables: //
  • src/libcfa/concurrency/monitor.c

    r1755226 rf265042  
    2424// Forward declarations
    2525static inline void set_owner( monitor_desc * this, thread_desc * owner );
     26static inline void set_owner( monitor_desc ** storage, short count, thread_desc * owner );
     27static inline void set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     28
    2629static inline thread_desc * next_thread( monitor_desc * this );
    27 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() );
     30static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
    2831
    2932static inline void lock_all( spinlock ** locks, unsigned short count );
     
    3235static inline void unlock_all( monitor_desc ** locks, unsigned short count );
    3336
    34 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count );
    35 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
     37static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
     38static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
    3639
    3740static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    3841static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    3942
    40 static inline thread_desc * check_condition( __condition_criterion_t * );
    41 static inline void brand_condition( condition * );
    42 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
    43 
    44 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count );
     43static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     44static inline void                 brand_condition   ( condition * );
     45static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
     46
     47forall(dtype T | sized( T ))
     48static inline short insert_unique( T ** array, short & size, T * val );
     49static inline short count_max    ( const __waitfor_mask_t & mask );
     50static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
    4551
    4652//-----------------------------------------------------------------------------
    4753// Useful defines
    48 #define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack       */ \
    49         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    50         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    51         init( count, monitors, &waiter, criteria );               /* Link everything together                                          */ \
    52 
    53 #define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack       */ \
    54         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    55         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    56         init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack              */ \
    57 
    58 #define monitor_ctx( mons, cnt )              /* Define that create the necessary struct for internal/external scheduling operations */ \
    59         monitor_desc ** monitors = mons;        /* Save the targeted monitors                                                          */ \
    60         unsigned short count = cnt;             /* Save the count to a local variable                                                  */ \
    61         unsigned int recursions[ count ];       /* Save the current recursion levels to restore them later                             */ \
    62         spinlock *   locks     [ count ];       /* We need to pass-in an array of locks to BlockInternal                               */ \
     54#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
     55        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     56        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     57        init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
     58
     59#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
     60        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     61        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     62        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
     63
     64#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
     65        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     66        unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
     67        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     68        __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
     69        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
     70
     71#define monitor_save    save   ( monitors, count, locks, recursions, masks )
     72#define monitor_restore restore( monitors, count, locks, recursions, masks )
     73
     74#define blockAndWake( thrd, cnt )                               /* Create the necessary information to use the signaller stack                         */ \
     75        monitor_save;                                             /* Save monitor states                                                                 */ \
     76        BlockInternal( locks, count, thrd, cnt );                 /* Everything is ready to go to sleep                                                  */ \
     77        monitor_restore;                                          /* We are back, restore the owners and recursions                                      */ \
     78
    6379
    6480//-----------------------------------------------------------------------------
     
    6884extern "C" {
    6985        // Enter single monitor
    70         static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
     86        static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    7187                // Lock the monitor spinlock, lock_yield to reduce contention
    7288                lock_yield( &this->lock DEBUG_CTX2 );
     
    7591                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    7692
    77                 this->accepted_index = -1;
    7893                if( !this->owner ) {
    7994                        // No one has the monitor, just take it
     
    89104                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon already owned \n");
    90105                }
    91                 else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) {
     106                else if( is_accepted( this, group) ) {
    92107                        // Some one was waiting for us, enter
    93108                        set_owner( this, thrd );
     
    120135                lock_yield( &this->lock DEBUG_CTX2 );
    121136
    122                 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
     137                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
     138
     139                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this );
    123140
    124141                // Leaving a recursion level, decrement the counter
     
    146163        // Should never return
    147164        void __leave_thread_monitor( thread_desc * thrd ) {
    148                 monitor_desc * this = &thrd->mon;
     165                monitor_desc * this = &thrd->self_mon;
    149166
    150167                // Lock the monitor now
     
    153170                disable_interrupts();
    154171
    155                 thrd->cor.state = Halted;
    156 
    157                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
     172                thrd->self_cor.state = Halted;
     173
     174                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    158175
    159176                // Leaving a recursion level, decrement the counter
     
    178195// Enter multiple monitor
    179196// relies on the monitor array being sorted
    180 static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) {
    181         for(int i = 0; i < count; i++) {
    182                 __enter_monitor_desc( monitors[i], monitors, count, func );
     197static inline void enter( __monitor_group_t monitors ) {
     198        for(int i = 0; i < monitors.size; i++) {
     199                __enter_monitor_desc( monitors.list[i], monitors );
    183200        }
    184201}
     
    203220
    204221        // Save previous thread context
    205         this.prev_mntrs = this_thread->current_monitors;
    206         this.prev_count = this_thread->current_monitor_count;
    207         this.prev_func  = this_thread->current_monitor_func;
     222        this.prev_mntrs = this_thread->monitors.list;
     223        this.prev_count = this_thread->monitors.size;
     224        this.prev_func  = this_thread->monitors.func;
    208225
    209226        // Update thread context (needed for conditions)
    210         this_thread->current_monitors      = m;
    211         this_thread->current_monitor_count = count;
    212         this_thread->current_monitor_func  = func;
     227        this_thread->monitors.list = m;
     228        this_thread->monitors.size = count;
     229        this_thread->monitors.func = func;
     230
     231        LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
    213232
    214233        // Enter the monitors in order
    215         enter( this.m, this.count, func );
     234        __monitor_group_t group = {this.m, this.count, func};
     235        enter( group );
     236
     237        LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");
    216238}
    217239
     
    219241// Dtor for monitor guard
    220242void ^?{}( monitor_guard_t & this ) {
     243        LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);
     244
    221245        // Leave the monitors in order
    222246        leave( this.m, this.count );
    223247
     248        LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
     249
    224250        // Restore thread context
    225         this_thread->current_monitors      = this.prev_mntrs;
    226         this_thread->current_monitor_count = this.prev_count;
    227         this_thread->current_monitor_func = this.prev_func;
     251        this_thread->monitors.list = this.prev_mntrs;
     252        this_thread->monitors.size = this.prev_count;
     253        this_thread->monitors.func = this.prev_func;
    228254}
    229255
     
    271297        append( &this->blocked, &waiter );
    272298
    273         // Lock all monitors (aggregates the lock them as well)
     299        // Lock all monitors (aggregates the locks as well)
    274300        lock_all( monitors, locks, count );
    275301
    276         // DON'T unlock, ask the kernel to do it
    277 
    278         // Save monitor state
    279         save_recursion( monitors, recursions, count );
    280 
    281302        // Find the next thread(s) to run
    282         unsigned short thread_count = 0;
     303        short thread_count = 0;
    283304        thread_desc * threads[ count ];
    284305        for(int i = 0; i < count; i++) {
     
    286307        }
    287308
     309        // Save monitor states
     310        monitor_save;
     311
    288312        // Remove any duplicate threads
    289313        for( int i = 0; i < count; i++) {
    290314                thread_desc * new_owner = next_thread( monitors[i] );
    291                 thread_count = insert_unique( threads, thread_count, new_owner );
     315                insert_unique( threads, thread_count, new_owner );
    292316        }
    293317
     
    295319        BlockInternal( locks, count, threads, thread_count );
    296320
    297 
    298         // WE WOKE UP
    299 
    300 
    301321        // We are back, restore the owners and recursions
    302         lock_all( locks, count );
    303         restore_recursion( monitors, recursions, count );
    304         unlock_all( locks, count );
     322        monitor_restore;
    305323}
    306324
     
    315333        LIB_DEBUG_DO(
    316334                thread_desc * this_thrd = this_thread;
    317                 if ( this->monitor_count != this_thrd->current_monitor_count ) {
    318                         abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
     335                if ( this->monitor_count != this_thrd->monitors.size ) {
     336                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
    319337                }
    320338
    321339                for(int i = 0; i < this->monitor_count; i++) {
    322                         if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
    323                                 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
     340                        if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
     341                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
    324342                        }
    325343                }
     
    364382
    365383        //save contexts
    366         save_recursion( monitors, recursions, count );
     384        monitor_save;
    367385
    368386        //Find the thread to run
    369387        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
    370         for(int i = 0; i < count; i++) {
    371                 set_owner( monitors[i], signallee );
    372         }
     388        set_owner( monitors, count, signallee );
    373389
    374390        //Everything is ready to go to sleep
     
    379395
    380396
    381         //We are back, restore the owners and recursions
    382         lock_all( locks, count );
    383         restore_recursion( monitors, recursions, count );
    384         unlock_all( locks, count );
     397        //We are back, restore the masks and recursions
     398        monitor_restore;
    385399
    386400        return true;
     
    397411
    398412//-----------------------------------------------------------------------------
    399 // Internal scheduling
    400 int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) {
    401         thread_desc * thrd = this_thread;
     413// External scheduling
     414// cases to handle :
     415//      - target already there :
     416//              block and wake
     417//      - dtor already there
     418//              put thread on signaller stack
     419//      - non-blocking
     420//              return else
     421//      - timeout
     422//              return timeout
     423//      - block
     424//              setup mask
     425//              block
     426void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
     427        // This statment doesn't have a contiguous list of monitors...
     428        // Create one!
     429        short max = count_max( mask );
     430        monitor_desc * mon_storage[max];
     431        short actual_count = aggregate( mon_storage, mask );
     432
     433        if(actual_count == 0) return;
    402434
    403435        // Create storage for monitor context
    404         monitor_ctx( acceptables->monitors, acceptables->count );
    405 
    406         // Lock all monitors (aggregates the lock them as well)
     436        monitor_ctx( mon_storage, actual_count );
     437
     438        // Lock all monitors (aggregates the locks as well)
    407439        lock_all( monitors, locks, count );
    408440
    409         // Create the node specific to this wait operation
    410         wait_ctx_primed( thrd, 0 );
    411 
    412         // Check if the entry queue
    413         thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count );
    414 
    415         LIB_DEBUG_PRINT_SAFE("Owner(s) :");
    416         for(int i = 0; i < count; i++) {
    417                 LIB_DEBUG_PRINT_SAFE(" %p", monitors[i]->owner );
    418         }
    419         LIB_DEBUG_PRINT_SAFE("\n");
    420 
    421         LIB_DEBUG_PRINT_SAFE("Passing mon to %p\n", next);
    422 
    423         if( !next ) {
    424                 // Update acceptables on the current monitors
    425                 for(int i = 0; i < count; i++) {
    426                         monitors[i]->acceptables = acceptables;
    427                         monitors[i]->acceptable_count = acc_count;
    428                 }
    429         }
    430         else {
    431                 for(int i = 0; i < count; i++) {
    432                         set_owner( monitors[i], next );
    433                 }
    434         }
    435 
    436 
    437         save_recursion( monitors, recursions, count );
    438 
    439 
    440         // Everything is ready to go to sleep
    441         BlockInternal( locks, count, &next, next ? 1 : 0 );
    442 
    443 
     441        {
     442                // Check if the entry queue
     443                thread_desc * next; int index;
     444                [next, index] = search_entry_queue( mask, monitors, count );
     445
     446                if( next ) {
     447                        if( mask.clauses[index].is_dtor ) {
     448                                #warning case not implemented
     449                        }
     450                        else {
     451                                blockAndWake( &next, 1 );
     452                        }
     453
     454                        return index;
     455                }
     456        }
     457
     458
     459        if( duration == 0 ) {
     460                unlock_all( locks, count );
     461                return;
     462        }
     463
     464
     465        verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
     466
     467
     468        monitor_save;
     469        set_mask( monitors, count, mask );
     470
     471        BlockInternal( locks, count );       // Everything is ready to go to sleep
    444472        //WE WOKE UP
    445 
    446 
    447         //We are back, restore the owners and recursions
    448         lock_all( locks, count );
    449         restore_recursion( monitors, recursions, count );
    450         int acc_idx = monitors[0]->accepted_index;
    451         unlock_all( locks, count );
    452 
    453         return acc_idx;
     473        monitor_restore;                     //We are back, restore the masks and recursions
    454474}
    455475
     
    458478
    459479static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
     480        LIB_DEBUG_PRINT_SAFE("Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     481
    460482        //Pass the monitor appropriately
    461483        this->owner = owner;
     
    463485        //We are passing the monitor to someone else, which means recursion level is not 0
    464486        this->recursion = owner ? 1 : 0;
     487}
     488
     489static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
     490        for( int i = 0; i < count; i++ ) {
     491                set_owner( monitors[i], owner );
     492        }
     493}
     494
     495static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
     496        for(int i = 0; i < count; i++) {
     497                storage[i]->mask = mask;
     498        }
    465499}
    466500
     
    485519}
    486520
    487 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
    488         __acceptable_t* accs = this->acceptables; // Optim
    489         int acc_cnt = this->acceptable_count;
     521static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     522        __acceptable_t * it = this->mask.clauses; // Optim
     523        int count = this->mask.size;
    490524
    491525        // Check if there are any acceptable functions
    492         if( !accs ) return -1;
     526        if( !it ) return false;
    493527
    494528        // If this isn't the first monitor to test this, there is no reason to repeat the test.
    495         if( this != group[0] ) return group[0]->accepted_index;
     529        if( this != group[0] ) return group[0]->mask.accepted >= 0;
    496530
    497531        // For all acceptable functions check if this is the current function.
    498         OUT_LOOP:
    499         for( int i = 0; i < acc_cnt; i++ ) {
    500                 __acceptable_t * acc = &accs[i];
    501 
    502                 // if function matches, check the monitors
    503                 if( acc->func == func ) {
    504 
    505                         // If the group count is different then it can't be a match
    506                         if( acc->count != group_cnt ) return -1;
    507 
    508                         // Check that all the monitors match
    509                         for( int j = 0; j < group_cnt; j++ ) {
    510                                 // If not a match, check next function
    511                                 if( acc->monitors[j] != group[j] ) continue OUT_LOOP;
    512                         }
    513 
    514                         // It's a complete match, accept the call
    515                         return i;
     532        for( short i = 0; i < count; i++, it++ ) {
     533                if( *it == group ) {
     534                        *this->mask.accepted = i;
     535                        return true;
    516536                }
    517537        }
    518538
    519539        // No function matched
    520         return -1;
     540        return false;
    521541}
    522542
     
    564584}
    565585
    566 
    567 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count ) {
     586static inline void save   ( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
    568587        for( int i = 0; i < count; i++ ) {
    569588                recursions[i] = ctx[i]->recursion;
    570         }
    571 }
    572 
    573 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ) {
     589                masks[i]      = ctx[i]->mask;
     590        }
     591}
     592
     593static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
     594        lock_all( locks, count );
    574595        for( int i = 0; i < count; i++ ) {
    575596                ctx[i]->recursion = recursions[i];
    576         }
     597                ctx[i]->mask      = masks[i];
     598        }
     599        unlock_all( locks, count );
    577600}
    578601
     
    607630        if( !this->monitors ) {
    608631                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    609                 assertf( thrd->current_monitors != NULL, "No current monitor to brand condition %p", thrd->current_monitors );
    610                 this->monitor_count = thrd->current_monitor_count;
     632                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
     633                this->monitor_count = thrd->monitors.size;
    611634
    612635                this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
    613636                for( int i = 0; i < this->monitor_count; i++ ) {
    614                         this->monitors[i] = thrd->current_monitors[i];
    615                 }
    616         }
    617 }
    618 
    619 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ) {
    620         if( !val ) return end;
    621 
    622         for(int i = 0; i <= end; i++) {
    623                 if( thrds[i] == val ) return end;
    624         }
    625 
    626         thrds[end] = val;
    627         return end + 1;
    628 }
    629 
    630 
    631 static inline bool match( __acceptable_t * acc, thread_desc * thrd ) {
    632         verify( thrd );
    633         verify( acc );
    634         if( acc->func != thrd->current_monitor_func ) return false;
    635 
    636         return true;
    637 }
    638 
    639 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) {
     637                        this->monitors[i] = thrd->monitors.list[i];
     638                }
     639        }
     640}
     641
     642static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
    640643
    641644        __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
     
    644647        for(    thread_desc ** thrd_it = &entry_queue->head;
    645648                *thrd_it;
    646                 thrd_it = &(*thrd_it)->next)
    647         {
     649                thrd_it = &(*thrd_it)->next
     650        ) {
    648651                // For each acceptable check if it matches
    649                 __acceptable_t * acc_end = acceptables + acc_count;
    650                 for( __acceptable_t * acc_it = acceptables; acc_it != acc_end; acc_it++ ) {
     652                int i = 0;
     653                __acceptable_t * end = mask.clauses + mask.size;
     654                for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) {
    651655                        // Check if we have a match
    652                         if( match( acc_it, *thrd_it ) ) {
     656                        if( *it == (*thrd_it)->monitors ) {
    653657
    654658                                // If we have a match return it
    655659                                // after removeing it from the entry queue
    656                                 return remove( entry_queue, thrd_it );
     660                                return [remove( entry_queue, thrd_it ), i];
    657661                        }
    658662                }
    659663        }
    660664
    661         return NULL;
    662 }
     665        return [0, -1];
     666}
     667
     668forall(dtype T | sized( T ))
     669static inline short insert_unique( T ** array, short & size, T * val ) {
     670        if( !val ) return size;
     671
     672        for(int i = 0; i <= size; i++) {
     673                if( array[i] == val ) return size;
     674        }
     675
     676        array[size] = val;
     677        size = size + 1;
     678        return size;
     679}
     680
     681static inline short count_max( const __waitfor_mask_t & mask ) {
     682        short max = 0;
     683        for( int i = 0; i < mask.size; i++ ) {
     684                max += mask.clauses[i].size;
     685        }
     686        return max;
     687}
     688
     689static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
     690        short size = 0;
     691        for( int i = 0; i < mask.size; i++ ) {
     692                for( int j = 0; j < mask.clauses[i].size; j++) {
     693                        insert_unique( storage, size, mask.clauses[i].list[j] );
     694                }
     695        }
     696        qsort( storage, size );
     697        return size;
     698}
     699
    663700void ?{}( __condition_blocked_queue_t & this ) {
    664701        this.head = NULL;
  • src/libcfa/concurrency/preemption.c

    r1755226 rf265042  
    328328                siginfo_t info;
    329329                int sig = sigwaitinfo( &mask, &info );
     330
     331                if( sig < 0 ) {
     332                        //Error!
     333                        int err = errno;
     334                        switch( err ) {
     335                                case EAGAIN :
     336                                case EINTR :
     337                                        continue;
     338                        case EINVAL :
     339                                        abortf("Timeout was invalid.");
     340                                default:
     341                                        abortf("Unhandled error %d", err);
     342                        }
     343                }
    330344
    331345                // If another signal arrived something went wrong
  • src/libcfa/concurrency/thread

    r1755226 rf265042  
    3636forall( dtype T | is_thread(T) )
    3737static inline coroutine_desc* get_coroutine(T & this) {
    38         return &get_thread(this)->cor;
     38        return &get_thread(this)->self_cor;
    3939}
    4040
    4141forall( dtype T | is_thread(T) )
    4242static inline monitor_desc* get_monitor(T & this) {
    43         return &get_thread(this)->mon;
     43        return &get_thread(this)->self_mon;
    4444}
    4545
    4646static inline coroutine_desc* get_coroutine(thread_desc * this) {
    47         return &this->cor;
     47        return &this->self_cor;
    4848}
    4949
    5050static inline monitor_desc* get_monitor(thread_desc * this) {
    51         return &this->mon;
     51        return &this->self_mon;
    5252}
    5353
  • src/libcfa/concurrency/thread.c

    r1755226 rf265042  
    3333
    3434void ?{}(thread_desc& this) {
    35         (this.cor){};
    36         this.cor.name = "Anonymous Coroutine";
    37         this.mon.owner = &this;
    38         this.mon.recursion = 1;
     35        (this.self_cor){};
     36        this.self_cor.name = "Anonymous Coroutine";
     37        this.self_mon.owner = &this;
     38        this.self_mon.recursion = 1;
     39        this.self_mon_p = &this.self_mon;
    3940        this.next = NULL;
    4041
    41         this.current_monitors      = &this.mon;
    42         this.current_monitor_count = 1;
     42        (this.monitors){ &this.self_mon_p, 1, (fptr_t)0 };
    4343}
    4444
    4545void ^?{}(thread_desc& this) {
    46         ^(this.cor){};
     46        ^(this.self_cor){};
    4747}
    4848
Note: See TracChangeset for help on using the changeset viewer.