Ignore:
Timestamp:
Nov 8, 2017, 5:43:33 PM (8 years ago)
Author:
Aaron Moss <a3moss@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, stuck-waitfor-destruct, with_gc
Children:
954908d
Parents:
78315272 (diff), e35f30a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
src/libcfa/concurrency
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/coroutine.c

    r78315272 r3f7e12cb  
    123123        if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
    124124
    125         LIB_DEBUG_PRINT_SAFE("FRED");
    126 
    127125        size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
    128126
     
    158156                this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ); // minimum alignment
    159157        } // if
    160         assertf( this->size >= MinStackSize, "Stack size %d provides less than minimum of %d bytes for a stack.", this->size, MinStackSize );
     158        assertf( this->size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", this->size, MinStackSize );
    161159
    162160        this->base = (char *)this->limit + this->size;
  • src/libcfa/concurrency/invoke.h

    r78315272 r3f7e12cb  
    2525#define _INVOKE_H_
    2626
    27       #define unlikely(x)    __builtin_expect(!!(x), 0)
    28       #define thread_local _Thread_local
    29 
    30       typedef void (*fptr_t)();
    31 
    32       struct spinlock {
    33             volatile int lock;
    34             #ifdef __CFA_DEBUG__
    35                   const char * prev_name;
    36                   void* prev_thrd;
    37             #endif
    38       };
    39 
    40       struct __thread_queue_t {
    41             struct thread_desc * head;
    42             struct thread_desc ** tail;
    43       };
    44 
    45       struct __condition_stack_t {
    46             struct __condition_criterion_t * top;
    47       };
    48 
    49       #ifdef __CFORALL__
    50       extern "Cforall" {
    51             void ?{}( struct __thread_queue_t & );
    52             void append( struct __thread_queue_t *, struct thread_desc * );
    53             struct thread_desc * pop_head( struct __thread_queue_t * );
    54             struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** );
    55 
    56             void ?{}( struct __condition_stack_t & );
    57             void push( struct __condition_stack_t *, struct __condition_criterion_t * );
    58             struct __condition_criterion_t * pop( struct __condition_stack_t * );
    59 
    60             void ?{}(spinlock & this);
    61             void ^?{}(spinlock & this);
    62       }
    63       #endif
    64 
    65       struct coStack_t {
    66             unsigned int size;                        // size of stack
    67             void *storage;                            // pointer to stack
    68             void *limit;                              // stack grows towards stack limit
    69             void *base;                               // base of stack
    70             void *context;                            // address of cfa_context_t
    71             void *top;                                // address of top of storage
    72             bool userStack;                           // whether or not the user allocated the stack
    73       };
    74 
    75       enum coroutine_state { Halted, Start, Inactive, Active, Primed };
    76 
    77       struct coroutine_desc {
    78             struct coStack_t stack;                   // stack information of the coroutine
    79             const char *name;                         // textual name for coroutine/task, initialized by uC++ generated code
    80             int errno_;                               // copy of global UNIX variable errno
    81             enum coroutine_state state;               // current execution status for coroutine
    82             struct coroutine_desc * starter;          // first coroutine to resume this one
    83             struct coroutine_desc * last;             // last coroutine to resume this one
    84       };
    85 
    86       struct monitor_desc {
    87             struct spinlock lock;                     // spinlock to protect internal data
    88             struct thread_desc * owner;               // current owner of the monitor
    89             struct __thread_queue_t entry_queue;      // queue of threads that are blocked waiting for the monitor
    90             struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    91             unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    92 
    93             struct __acceptable_t * acceptables;      // list of acceptable functions, null if any
    94             unsigned short acceptable_count;          // number of acceptable functions
    95             short accepted_index;                     // the index of the accepted function, -1 if none
    96        };
    97 
    98       struct thread_desc {
    99             // Core threading fields
    100             struct coroutine_desc cor;                // coroutine body used to store context
    101             struct monitor_desc mon;                  // monitor body used for mutual exclusion
    102 
    103             // Link lists fields
    104             struct thread_desc * next;                // instrusive link field for threads
    105 
    106             // Current status related to monitors
    107             struct monitor_desc ** current_monitors;  // currently held monitors
    108             unsigned short current_monitor_count;     // number of currently held monitors
    109             fptr_t current_monitor_func;              // last function that acquired monitors
     27        #define unlikely(x)    __builtin_expect(!!(x), 0)
     28        #define thread_local _Thread_local
     29
     30        typedef void (*fptr_t)();
     31        typedef int_fast16_t __lock_size_t;
     32
     33        struct spinlock {
     34                volatile int lock;
     35                #ifdef __CFA_DEBUG__
     36                        const char * prev_name;
     37                        void* prev_thrd;
     38                #endif
     39        };
     40
     41        struct __thread_queue_t {
     42                struct thread_desc * head;
     43                struct thread_desc ** tail;
     44        };
     45
     46        struct __condition_stack_t {
     47                struct __condition_criterion_t * top;
     48        };
     49
     50        #ifdef __CFORALL__
     51        extern "Cforall" {
     52                void ?{}( struct __thread_queue_t & );
     53                void append( struct __thread_queue_t &, struct thread_desc * );
     54                struct thread_desc * pop_head( struct __thread_queue_t & );
     55                struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** );
     56
     57                void ?{}( struct __condition_stack_t & );
     58                void push( struct __condition_stack_t &, struct __condition_criterion_t * );
     59                struct __condition_criterion_t * pop( struct __condition_stack_t & );
     60
     61                void  ?{}(spinlock & this);
     62                void ^?{}(spinlock & this);
     63        }
     64        #endif
     65
     66        struct coStack_t {
     67                // size of stack
     68                size_t size;
     69
     70                // pointer to stack
     71                void *storage;
     72
     73                // stack grows towards stack limit
     74                void *limit;
     75
     76                // base of stack
     77                void *base;
     78
     79                // address of cfa_context_t
     80                void *context;
     81
     82                // address of top of storage
     83                void *top;
     84
     85                // whether or not the user allocated the stack
     86                bool userStack;
     87        };
     88
     89        enum coroutine_state { Halted, Start, Inactive, Active, Primed };
     90
     91        struct coroutine_desc {
     92                // stack information of the coroutine
     93                struct coStack_t stack;
     94
     95                // textual name for coroutine/task, initialized by uC++ generated code
     96                const char *name;
     97
     98                // copy of global UNIX variable errno
     99                int errno_;
     100
     101                // current execution status for coroutine
     102                enum coroutine_state state;
     103
     104                // first coroutine to resume this one
     105                struct coroutine_desc * starter;
     106
     107                // last coroutine to resume this one
     108                struct coroutine_desc * last;
     109        };
     110
     111        struct __waitfor_mask_t {
     112                // the index of the accepted function, -1 if none
     113                short * accepted;
     114
     115                // list of acceptable functions, null if any
     116                struct __acceptable_t * clauses;
     117
     118                // number of acceptable functions
     119                __lock_size_t size;
     120        };
     121
     122        struct monitor_desc {
     123                // spinlock to protect internal data
     124                struct spinlock lock;
     125
     126                // current owner of the monitor
     127                struct thread_desc * owner;
     128
     129                // queue of threads that are blocked waiting for the monitor
     130                struct __thread_queue_t entry_queue;
     131
     132                // stack of conditions to run next once we exit the monitor
     133                struct __condition_stack_t signal_stack;
     134
     135                // monitor routines can be called recursively, we need to keep track of that
     136                unsigned int recursion;
     137
     138                // mask used to know if some thread is waiting for something while holding the monitor
     139                struct __waitfor_mask_t mask;
     140
     141                // node used to signal the dtor in a waitfor dtor
     142                struct __condition_node_t * dtor_node;
     143        };
     144
     145        struct __monitor_group_t {
     146                // currently held monitors
     147                struct monitor_desc ** list;
     148
     149                // number of currently held monitors
     150                __lock_size_t size;
     151
     152                // last function that acquired monitors
     153                fptr_t func;
     154        };
     155
     156        struct thread_desc {
     157                // Core threading fields
     158                // coroutine body used to store context
     159                struct coroutine_desc  self_cor;
     160
     161                // monitor body used for mutual exclusion
     162                struct monitor_desc    self_mon;
     163
     164                // pointer to monitor with sufficient lifetime for current monitors
     165                struct monitor_desc *  self_mon_p;
     166
     167                // monitors currently held by this thread
     168                struct __monitor_group_t monitors;
     169
     170                // Link lists fields
     171                // instrusive link field for threads
     172                struct thread_desc * next;
    110173     };
     174
     175     #ifdef __CFORALL__
     176     extern "Cforall" {
     177                static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {
     178                        return this.list[index];
     179                }
     180
     181                static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
     182                        if( (lhs.list != 0) != (rhs.list != 0) ) return false;
     183                        if( lhs.size != rhs.size ) return false;
     184                        if( lhs.func != rhs.func ) return false;
     185
     186                        // Check that all the monitors match
     187                        for( int i = 0; i < lhs.size; i++ ) {
     188                                // If not a match, check next function
     189                                if( lhs[i] != rhs[i] ) return false;
     190                        }
     191
     192                        return true;
     193                }
     194        }
     195        #endif
    111196
    112197#endif //_INVOKE_H_
     
    115200#define _INVOKE_PRIVATE_H_
    116201
    117       struct machine_context_t {
    118             void *SP;
    119             void *FP;
    120             void *PC;
    121       };
    122 
    123       // assembler routines that performs the context switch
    124       extern void CtxInvokeStub( void );
    125       void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
    126 
    127       #if   defined( __x86_64__ )
    128       #define CtxGet( ctx ) __asm__ ( \
    129                   "movq %%rsp,%0\n"   \
    130                   "movq %%rbp,%1\n"   \
    131             : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    132       #elif defined( __i386__ )
    133       #define CtxGet( ctx ) __asm__ ( \
    134                   "movl %%esp,%0\n"   \
    135                   "movl %%ebp,%1\n"   \
    136             : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    137       #endif
     202        struct machine_context_t {
     203                void *SP;
     204                void *FP;
     205                void *PC;
     206        };
     207
     208        // assembler routines that performs the context switch
     209        extern void CtxInvokeStub( void );
     210        void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
     211
     212        #if   defined( __x86_64__ )
     213        #define CtxGet( ctx ) __asm__ ( \
     214                        "movq %%rsp,%0\n"   \
     215                        "movq %%rbp,%1\n"   \
     216                : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     217        #elif defined( __i386__ )
     218        #define CtxGet( ctx ) __asm__ ( \
     219                        "movl %%esp,%0\n"   \
     220                        "movl %%ebp,%1\n"   \
     221                : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     222        #endif
    138223
    139224#endif //_INVOKE_PRIVATE_H_
  • src/libcfa/concurrency/kernel

    r78315272 r3f7e12cb  
    2626//-----------------------------------------------------------------------------
    2727// Locks
    28 void lock      ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, spin if already acquired
    29 void lock_yield( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, yield repeatedly if already acquired
    30 bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, return false if already acquired
    31 void unlock    ( spinlock * );                        // Unlock the spinlock
     28// Lock the spinlock, spin if already acquired
     29void lock      ( spinlock * DEBUG_CTX_PARAM2 );
     30
     31// Lock the spinlock, yield repeatedly if already acquired
     32void lock_yield( spinlock * DEBUG_CTX_PARAM2 );
     33
     34// Lock the spinlock, return false if already acquired
     35bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );
     36
     37// Unlock the spinlock
     38void unlock    ( spinlock * );
    3239
    3340struct semaphore {
     
    3946void  ?{}(semaphore & this, int count = 1);
    4047void ^?{}(semaphore & this);
    41 void P(semaphore * this);
    42 void V(semaphore * this);
     48void   P (semaphore & this);
     49void   V (semaphore & this);
    4350
    4451
     
    4653// Cluster
    4754struct cluster {
    48         spinlock ready_queue_lock;                      // Ready queue locks
    49         __thread_queue_t ready_queue;                   // Ready queue for threads
    50         unsigned long long int preemption;              // Preemption rate on this cluster
     55        // Ready queue locks
     56        spinlock ready_queue_lock;
     57
     58        // Ready queue for threads
     59        __thread_queue_t ready_queue;
     60
     61        // Preemption rate on this cluster
     62        unsigned long long int preemption;
    5163};
    5264
    53 void ?{}(cluster & this);
     65void ?{} (cluster & this);
    5466void ^?{}(cluster & this);
    5567
     
    7991struct processor {
    8092        // Main state
    81         struct processorCtx_t * runner;                 // Coroutine ctx who does keeps the state of the processor
    82         cluster * cltr;                                 // Cluster from which to get threads
    83         pthread_t kernel_thread;                        // Handle to pthreads
     93        // Coroutine ctx who does keeps the state of the processor
     94        struct processorCtx_t * runner;
     95
     96        // Cluster from which to get threads
     97        cluster * cltr;
     98
     99        // Handle to pthreads
     100        pthread_t kernel_thread;
    84101
    85102        // Termination
    86         volatile bool do_terminate;                     // Set to true to notify the processor should terminate
    87         semaphore terminated;                           // Termination synchronisation
     103        // Set to true to notify the processor should terminate
     104        volatile bool do_terminate;
     105
     106        // Termination synchronisation
     107        semaphore terminated;
    88108
    89109        // RunThread data
    90         struct FinishAction finish;                     // Action to do after a thread is ran
     110        // Action to do after a thread is ran
     111        struct FinishAction finish;
    91112
    92113        // Preemption data
    93         struct alarm_node_t * preemption_alarm;         // Node which is added in the discrete event simulaiton
    94         bool pending_preemption;                        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
     114        // Node which is added in the discrete event simulaiton
     115        struct alarm_node_t * preemption_alarm;
     116
     117        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
     118        bool pending_preemption;
    95119
    96120#ifdef __CFA_DEBUG__
    97         char * last_enable;                             // Last function to enable preemption on this processor
     121        // Last function to enable preemption on this processor
     122        char * last_enable;
    98123#endif
    99124};
    100125
    101 void ?{}(processor & this);
    102 void ?{}(processor & this, cluster * cltr);
     126void  ?{}(processor & this);
     127void  ?{}(processor & this, cluster * cltr);
    103128void ^?{}(processor & this);
    104129
  • src/libcfa/concurrency/kernel.c

    r78315272 r3f7e12cb  
    106106
    107107void ?{}( thread_desc & this, current_stack_info_t * info) {
    108         (this.cor){ info };
     108        (this.self_cor){ info };
    109109}
    110110
     
    158158                LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", &this);
    159159                this.do_terminate = true;
    160                 P( &this.terminated );
     160                P( this.terminated );
    161161                pthread_join( this.kernel_thread, NULL );
    162162        }
     
    216216        }
    217217
    218         V( &this->terminated );
     218        V( this->terminated );
    219219
    220220        LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this);
     
    328328        // if( !thrd ) return;
    329329        verify( thrd );
    330         verify( thrd->cor.state != Halted );
     330        verify( thrd->self_cor.state != Halted );
    331331
    332332        verify( disable_preempt_count > 0 );
     
    335335
    336336        lock(   &this_processor->cltr->ready_queue_lock DEBUG_CTX2 );
    337         append( &this_processor->cltr->ready_queue, thrd );
     337        append( this_processor->cltr->ready_queue, thrd );
    338338        unlock( &this_processor->cltr->ready_queue_lock );
    339339
     
    344344        verify( disable_preempt_count > 0 );
    345345        lock( &this->ready_queue_lock DEBUG_CTX2 );
    346         thread_desc * head = pop_head( &this->ready_queue );
     346        thread_desc * head = pop_head( this->ready_queue );
    347347        unlock( &this->ready_queue_lock );
    348348        verify( disable_preempt_count > 0 );
     
    373373        assert(thrd);
    374374        disable_interrupts();
    375         assert( thrd->cor.state != Halted );
     375        assert( thrd->self_cor.state != Halted );
    376376        this_processor->finish.action_code = Schedule;
    377377        this_processor->finish.thrd = thrd;
     
    398398}
    399399
    400 void BlockInternal(spinlock ** locks, unsigned short count) {
     400void BlockInternal(spinlock * locks [], unsigned short count) {
    401401        disable_interrupts();
    402402        this_processor->finish.action_code = Release_Multi;
     
    411411}
    412412
    413 void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
     413void BlockInternal(spinlock * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    414414        disable_interrupts();
    415415        this_processor->finish.action_code = Release_Multi_Schedule;
     
    466466        this_processor = mainProcessor;
    467467        this_thread = mainThread;
    468         this_coroutine = &mainThread->cor;
     468        this_coroutine = &mainThread->self_cor;
    469469
    470470        // Enable preemption
     
    547547        thread_desc * thrd = kernel_data;
    548548
    549         int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->cor.name, thrd );
    550         __lib_debug_write( STDERR_FILENO, abort_text, len );
     549        int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->self_cor.name, thrd );
     550        __lib_debug_write( abort_text, len );
    551551
    552552        if ( thrd != this_coroutine ) {
    553553                len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine );
    554                 __lib_debug_write( STDERR_FILENO, abort_text, len );
     554                __lib_debug_write( abort_text, len );
    555555        }
    556556        else {
    557                 __lib_debug_write( STDERR_FILENO, ".\n", 2 );
     557                __lib_debug_write( ".\n", 2 );
    558558        }
    559559}
     
    618618void ^?{}(semaphore & this) {}
    619619
    620 void P(semaphore * this) {
    621         lock( &this->lock DEBUG_CTX2 );
    622         this->count -= 1;
    623         if ( this->count < 0 ) {
     620void P(semaphore & this) {
     621        lock( &this.lock DEBUG_CTX2 );
     622        this.count -= 1;
     623        if ( this.count < 0 ) {
    624624                // queue current task
    625                 append( &this->waiting, (thread_desc *)this_thread );
     625                append( this.waiting, (thread_desc *)this_thread );
    626626
    627627                // atomically release spin lock and block
    628                 BlockInternal( &this->lock );
     628                BlockInternal( &this.lock );
    629629        }
    630630        else {
    631             unlock( &this->lock );
    632         }
    633 }
    634 
    635 void V(semaphore * this) {
     631            unlock( &this.lock );
     632        }
     633}
     634
     635void V(semaphore & this) {
    636636        thread_desc * thrd = NULL;
    637         lock( &this->lock DEBUG_CTX2 );
    638         this->count += 1;
    639         if ( this->count <= 0 ) {
     637        lock( &this.lock DEBUG_CTX2 );
     638        this.count += 1;
     639        if ( this.count <= 0 ) {
    640640                // remove task at head of waiting list
    641                 thrd = pop_head( &this->waiting );
    642         }
    643 
    644         unlock( &this->lock );
     641                thrd = pop_head( this.waiting );
     642        }
     643
     644        unlock( &this.lock );
    645645
    646646        // make new owner
     
    655655}
    656656
    657 void append( __thread_queue_t * this, thread_desc * t ) {
    658         verify(this->tail != NULL);
    659         *this->tail = t;
    660         this->tail = &t->next;
    661 }
    662 
    663 thread_desc * pop_head( __thread_queue_t * this ) {
    664         thread_desc * head = this->head;
     657void append( __thread_queue_t & this, thread_desc * t ) {
     658        verify(this.tail != NULL);
     659        *this.tail = t;
     660        this.tail = &t->next;
     661}
     662
     663thread_desc * pop_head( __thread_queue_t & this ) {
     664        thread_desc * head = this.head;
    665665        if( head ) {
    666                 this->head = head->next;
     666                this.head = head->next;
    667667                if( !head->next ) {
    668                         this->tail = &this->head;
     668                        this.tail = &this.head;
    669669                }
    670670                head->next = NULL;
     
    673673}
    674674
    675 thread_desc * remove( __thread_queue_t * this, thread_desc ** it ) {
     675thread_desc * remove( __thread_queue_t & this, thread_desc ** it ) {
    676676        thread_desc * thrd = *it;
    677677        verify( thrd );
     
    679679        (*it) = thrd->next;
    680680
    681         if( this->tail == &thrd->next ) {
    682                 this->tail = it;
     681        if( this.tail == &thrd->next ) {
     682                this.tail = it;
    683683        }
    684684
    685685        thrd->next = NULL;
    686686
    687         verify( (this->head == NULL) == (&this->head == this->tail) );
    688         verify( *this->tail == NULL );
     687        verify( (this.head == NULL) == (&this.head == this.tail) );
     688        verify( *this.tail == NULL );
    689689        return thrd;
    690690}
     
    694694}
    695695
    696 void push( __condition_stack_t * this, __condition_criterion_t * t ) {
     696void push( __condition_stack_t & this, __condition_criterion_t * t ) {
    697697        verify( !t->next );
    698         t->next = this->top;
    699         this->top = t;
    700 }
    701 
    702 __condition_criterion_t * pop( __condition_stack_t * this ) {
    703         __condition_criterion_t * top = this->top;
     698        t->next = this.top;
     699        this.top = t;
     700}
     701
     702__condition_criterion_t * pop( __condition_stack_t & this ) {
     703        __condition_criterion_t * top = this.top;
    704704        if( top ) {
    705                 this->top = top->next;
     705                this.top = top->next;
    706706                top->next = NULL;
    707707        }
  • src/libcfa/concurrency/kernel_private.h

    r78315272 r3f7e12cb  
    4848void BlockInternal(thread_desc * thrd);
    4949void BlockInternal(spinlock * lock, thread_desc * thrd);
    50 void BlockInternal(spinlock ** locks, unsigned short count);
    51 void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);
     50void BlockInternal(spinlock * locks [], unsigned short count);
     51void BlockInternal(spinlock * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);
    5252void LeaveThread(spinlock * lock, thread_desc * thrd);
    5353
  • src/libcfa/concurrency/monitor

    r78315272 r3f7e12cb  
    1010// Created On       : Thd Feb 23 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Jul 22 09:59:01 2017
    13 // Update Count     : 3
     12// Last Modified On : Sat Oct  7 18:06:45 2017
     13// Update Count     : 10
    1414//
    1515
     
    2222#include "stdlib"
    2323
     24trait is_monitor(dtype T) {
     25        monitor_desc * get_monitor( T & );
     26        void ^?{}( T & mutex );
     27};
     28
    2429static inline void ?{}(monitor_desc & this) {
    2530        (this.lock){};
    26         this.owner = NULL;
    2731        (this.entry_queue){};
    2832        (this.signal_stack){};
    29         this.recursion = 0;
    30         this.acceptables = NULL;
    31         this.acceptable_count = 0;
    32         this.accepted_index = -1;
     33        this.owner         = NULL;
     34        this.recursion     = 0;
     35        this.mask.accepted = NULL;
     36        this.mask.clauses  = NULL;
     37        this.mask.size     = 0;
     38        this.dtor_node     = NULL;
    3339}
    3440
    3541struct monitor_guard_t {
    3642        monitor_desc ** m;
    37         int count;
     43        __lock_size_t  count;
    3844        monitor_desc ** prev_mntrs;
    39         unsigned short  prev_count;
     45        __lock_size_t   prev_count;
    4046        fptr_t          prev_func;
    4147};
    4248
    43 static inline int ?<?(monitor_desc* lhs, monitor_desc* rhs) {
    44         return ((intptr_t)lhs) < ((intptr_t)rhs);
     49void ?{}( monitor_guard_t & this, monitor_desc ** m, __lock_size_t count, void (*func)() );
     50void ^?{}( monitor_guard_t & this );
     51
     52struct monitor_dtor_guard_t {
     53        monitor_desc * m;
     54        monitor_desc ** prev_mntrs;
     55        __lock_size_t   prev_count;
     56        fptr_t          prev_func;
     57};
     58
     59void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, void (*func)() );
     60void ^?{}( monitor_dtor_guard_t & this );
     61
     62static inline forall( dtype T | sized(T) | { void ^?{}( T & mutex ); } )
     63void delete( T * th ) {
     64        ^(*th){};
     65        free( th );
    4566}
    46 
    47 void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() );
    48 void ^?{}( monitor_guard_t & this );
    4967
    5068//-----------------------------------------------------------------------------
     
    5270
    5371struct __condition_criterion_t {
    54         bool ready;                                             //Whether or not the criterion is met (True if met)
    55         monitor_desc * target;                          //The monitor this criterion concerns
    56         struct __condition_node_t * owner;              //The parent node to which this criterion belongs
    57         __condition_criterion_t * next;         //Intrusive linked list Next field
     72        // Whether or not the criterion is met (True if met)
     73        bool ready;
     74
     75        // The monitor this criterion concerns
     76        monitor_desc * target;
     77
     78        // The parent node to which this criterion belongs
     79        struct __condition_node_t * owner;
     80
     81        // Intrusive linked list Next field
     82        __condition_criterion_t * next;
    5883};
    5984
    6085struct __condition_node_t {
    61         thread_desc * waiting_thread;                   //Thread that needs to be woken when all criteria are met
    62         __condition_criterion_t * criteria;     //Array of criteria (Criterions are contiguous in memory)
    63         unsigned short count;                           //Number of criterions in the criteria
    64         __condition_node_t * next;                      //Intrusive linked list Next field
    65         uintptr_t user_info;                            //Custom user info accessible before signalling
     86        // Thread that needs to be woken when all criteria are met
     87        thread_desc * waiting_thread;
     88
     89        // Array of criteria (Criterions are contiguous in memory)
     90        __condition_criterion_t * criteria;
     91
     92        // Number of criterions in the criteria
     93        __lock_size_t count;
     94
     95        // Intrusive linked list Next field
     96        __condition_node_t * next;
     97
     98        // Custom user info accessible before signalling
     99        uintptr_t user_info;
    66100};
    67101
     
    71105};
    72106
     107void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info );
     108void ?{}(__condition_criterion_t & this );
     109void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner );
     110
    73111void ?{}( __condition_blocked_queue_t & );
    74 void append( __condition_blocked_queue_t *, __condition_node_t * );
    75 __condition_node_t * pop_head( __condition_blocked_queue_t * );
     112void append( __condition_blocked_queue_t &, __condition_node_t * );
     113__condition_node_t * pop_head( __condition_blocked_queue_t & );
    76114
    77115struct condition {
    78         __condition_blocked_queue_t blocked;    //Link list which contains the blocked threads as-well as the information needed to unblock them
    79         monitor_desc ** monitors;                       //Array of monitor pointers (Monitors are NOT contiguous in memory)
    80         unsigned short monitor_count;                   //Number of monitors in the array
     116        // Link list which contains the blocked threads as-well as the information needed to unblock them
     117        __condition_blocked_queue_t blocked;
     118
     119        // Array of monitor pointers (Monitors are NOT contiguous in memory)
     120        monitor_desc ** monitors;
     121
     122        // Number of monitors in the array
     123        __lock_size_t monitor_count;
    81124};
    82125
     
    90133}
    91134
    92 void wait( condition * this, uintptr_t user_info = 0 );
    93 bool signal( condition * this );
    94 bool signal_block( condition * this );
    95 static inline bool is_empty( condition * this ) { return !this->blocked.head; }
    96 uintptr_t front( condition * this );
     135              void wait        ( condition & this, uintptr_t user_info = 0 );
     136              bool signal      ( condition & this );
     137              bool signal_block( condition & this );
     138static inline bool is_empty    ( condition & this ) { return !this.blocked.head; }
     139         uintptr_t front       ( condition & this );
    97140
    98141//-----------------------------------------------------------------------------
     
    100143
    101144struct __acceptable_t {
    102         fptr_t func;
    103         unsigned short count;
    104         monitor_desc ** monitors;
     145        __monitor_group_t;
    105146        bool is_dtor;
    106147};
    107148
    108 int __accept_internal( unsigned short count, __acceptable_t * acceptables );
     149void __waitfor_internal( const __waitfor_mask_t & mask, int duration );
    109150
    110151// Local Variables: //
  • src/libcfa/concurrency/monitor.c

    r78315272 r3f7e12cb  
    1717
    1818#include <stdlib>
     19#include <inttypes.h>
    1920
    2021#include "libhdr.h"
    2122#include "kernel_private.h"
    2223
     24#include "bits/algorithms.h"
     25
    2326//-----------------------------------------------------------------------------
    2427// Forward declarations
    25 static inline void set_owner( monitor_desc * this, thread_desc * owner );
     28static inline void set_owner ( monitor_desc * this, thread_desc * owner );
     29static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
     30static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     31static inline void reset_mask( monitor_desc * this );
     32
    2633static inline thread_desc * next_thread( monitor_desc * this );
    27 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() );
    28 
    29 static inline void lock_all( spinlock ** locks, unsigned short count );
    30 static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
    31 static inline void unlock_all( spinlock ** locks, unsigned short count );
    32 static inline void unlock_all( monitor_desc ** locks, unsigned short count );
    33 
    34 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count );
    35 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
    36 
    37 static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    38 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    39 
    40 static inline thread_desc * check_condition( __condition_criterion_t * );
    41 static inline void brand_condition( condition * );
    42 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
    43 
    44 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count );
     34static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     35
     36static inline void lock_all  ( spinlock * locks [], __lock_size_t count );
     37static inline void lock_all  ( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count );
     38static inline void unlock_all( spinlock * locks [], __lock_size_t count );
     39static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
     40
     41static inline void save   ( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     42static inline void restore( monitor_desc * ctx [], __lock_size_t count, spinlock * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     43
     44static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     45static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     46
     47static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     48static inline void                 brand_condition   ( condition & );
     49static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     50
     51forall(dtype T | sized( T ))
     52static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
     53static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
     54static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
    4555
    4656//-----------------------------------------------------------------------------
    4757// Useful defines
    48 #define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack       */ \
    49         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    50         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    51         init( count, monitors, &waiter, criteria );               /* Link everything together                                          */ \
    52 
    53 #define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack       */ \
    54         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    55         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    56         init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack              */ \
    57 
    58 #define monitor_ctx( mons, cnt )              /* Define that create the necessary struct for internal/external scheduling operations */ \
    59         monitor_desc ** monitors = mons;        /* Save the targeted monitors                                                          */ \
    60         unsigned short count = cnt;             /* Save the count to a local variable                                                  */ \
    61         unsigned int recursions[ count ];       /* Save the current recursion levels to restore them later                             */ \
    62         spinlock *   locks     [ count ];       /* We need to pass-in an array of locks to BlockInternal                               */ \
     58#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
     59        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     60        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     61        init( count, monitors, waiter, criteria );                /* Link everything together                                                            */ \
     62
     63#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
     64        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     65        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     66        init_push( count, monitors, waiter, criteria );           /* Link everything together and push it to the AS-Stack                                */ \
     67
     68#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
     69        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     70        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
     71        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     72        __waitfor_mask_t masks [ count ];                         /* Save the current waitfor masks to restore them later                                */ \
     73        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
     74
     75#define monitor_save    save   ( monitors, count, locks, recursions, masks )
     76#define monitor_restore restore( monitors, count, locks, recursions, masks )
     77
    6378
    6479//-----------------------------------------------------------------------------
     
    6883extern "C" {
    6984        // Enter single monitor
    70         static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
     85        static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    7186                // Lock the monitor spinlock, lock_yield to reduce contention
    7287                lock_yield( &this->lock DEBUG_CTX2 );
     
    7590                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    7691
    77                 this->accepted_index = -1;
    7892                if( !this->owner ) {
    7993                        // No one has the monitor, just take it
     
    8397                }
    8498                else if( this->owner == thrd) {
    85                         // We already have the monitor, just not how many times we took it
    86                         verify( this->recursion > 0 );
     99                        // We already have the monitor, just note how many times we took it
    87100                        this->recursion += 1;
    88101
    89102                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon already owned \n");
    90103                }
    91                 else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) {
     104                else if( is_accepted( this, group) ) {
    92105                        // Some one was waiting for us, enter
    93106                        set_owner( this, thrd );
    94107
     108                        // Reset mask
     109                        reset_mask( this );
     110
    95111                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts \n");
    96112                }
     
    99115
    100116                        // Some one else has the monitor, wait in line for it
    101                         append( &this->entry_queue, thrd );
     117                        append( this->entry_queue, thrd );
    102118                        BlockInternal( &this->lock );
    103119
     
    113129                unlock( &this->lock );
    114130                return;
     131        }
     132
     133        static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
     134                // Lock the monitor spinlock, lock_yield to reduce contention
     135                lock_yield( &this->lock DEBUG_CTX2 );
     136                thread_desc * thrd = this_thread;
     137
     138                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     139
     140
     141                if( !this->owner ) {
     142                        LIB_DEBUG_PRINT_SAFE("Kernel : Destroying free mon %p\n", this);
     143
     144                        // No one has the monitor, just take it
     145                        set_owner( this, thrd );
     146
     147                        unlock( &this->lock );
     148                        return;
     149                }
     150                else if( this->owner == thrd) {
     151                        // We already have the monitor... but where about to destroy it so the nesting will fail
     152                        // Abort!
     153                        abortf("Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.");
     154                }
     155
     156                __lock_size_t count = 1;
     157                monitor_desc ** monitors = &this;
     158                __monitor_group_t group = { &this, 1, func };
     159                if( is_accepted( this, group) ) {
     160                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts dtor, block and signal it \n");
     161
     162                        // Wake the thread that is waiting for this
     163                        __condition_criterion_t * urgent = pop( this->signal_stack );
     164                        verify( urgent );
     165
     166                        // Reset mask
     167                        reset_mask( this );
     168
     169                        // Create the node specific to this wait operation
     170                        wait_ctx_primed( this_thread, 0 )
     171
     172                        // Some one else has the monitor, wait for him to finish and then run
     173                        BlockInternal( &this->lock, urgent->owner->waiting_thread );
     174
     175                        // Some one was waiting for us, enter
     176                        set_owner( this, thrd );
     177                }
     178                else {
     179                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
     180
     181                        wait_ctx( this_thread, 0 )
     182                        this->dtor_node = &waiter;
     183
     184                        // Some one else has the monitor, wait in line for it
     185                        append( this->entry_queue, thrd );
     186                        BlockInternal( &this->lock );
     187
     188                        // BlockInternal will unlock spinlock, no need to unlock ourselves
     189                        return;
     190                }
     191
     192                LIB_DEBUG_PRINT_SAFE("Kernel : Destroying %p\n", this);
     193
    115194        }
    116195
     
    120199                lock_yield( &this->lock DEBUG_CTX2 );
    121200
    122                 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
     201                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
     202
     203                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this );
    123204
    124205                // Leaving a recursion level, decrement the counter
     
    128209                // it means we don't need to do anything
    129210                if( this->recursion != 0) {
     211                        LIB_DEBUG_PRINT_SAFE("Kernel :  recursion still %d\n", this->recursion);
    130212                        unlock( &this->lock );
    131213                        return;
     
    140222                //We need to wake-up the thread
    141223                WakeThread( new_owner );
     224        }
     225
     226        // Leave single monitor for the last time
     227        void __leave_dtor_monitor_desc( monitor_desc * this ) {
     228                LIB_DEBUG_DO(
     229                        if( this_thread != this->owner ) {
     230                                abortf("Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner);
     231                        }
     232                        if( this->recursion != 1 ) {
     233                                abortf("Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
     234                        }
     235                )
    142236        }
    143237
     
    146240        // Should never return
    147241        void __leave_thread_monitor( thread_desc * thrd ) {
    148                 monitor_desc * this = &thrd->mon;
     242                monitor_desc * this = &thrd->self_mon;
    149243
    150244                // Lock the monitor now
     
    153247                disable_interrupts();
    154248
    155                 thrd->cor.state = Halted;
    156 
    157                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
     249                thrd->self_cor.state = Halted;
     250
     251                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    158252
    159253                // Leaving a recursion level, decrement the counter
     
    178272// Enter multiple monitor
    179273// relies on the monitor array being sorted
    180 static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) {
    181         for(int i = 0; i < count; i++) {
    182                 __enter_monitor_desc( monitors[i], monitors, count, func );
     274static inline void enter( __monitor_group_t monitors ) {
     275        for( __lock_size_t i = 0; i < monitors.size; i++) {
     276                __enter_monitor_desc( monitors.list[i], monitors );
    183277        }
    184278}
     
    186280// Leave multiple monitor
    187281// relies on the monitor array being sorted
    188 static inline void leave(monitor_desc ** monitors, int count) {
    189         for(int i = count - 1; i >= 0; i--) {
     282static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     283        for( __lock_size_t i = count - 1; i >= 0; i--) {
    190284                __leave_monitor_desc( monitors[i] );
    191285        }
     
    194288// Ctor for monitor guard
    195289// Sorts monitors before entering
    196 void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() ) {
     290void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    197291        // Store current array
    198292        this.m = m;
     
    200294
    201295        // Sort monitors based on address -> TODO use a sort specialized for small numbers
    202         qsort(this.m, count);
     296        __libcfa_small_sort(this.m, count);
    203297
    204298        // Save previous thread context
    205         this.prev_mntrs = this_thread->current_monitors;
    206         this.prev_count = this_thread->current_monitor_count;
    207         this.prev_func  = this_thread->current_monitor_func;
     299        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
    208300
    209301        // Update thread context (needed for conditions)
    210         this_thread->current_monitors      = m;
    211         this_thread->current_monitor_count = count;
    212         this_thread->current_monitor_func  = func;
     302        this_thread->monitors.[list, size, func] = [m, count, func];
     303
     304        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
    213305
    214306        // Enter the monitors in order
    215         enter( this.m, this.count, func );
     307        __monitor_group_t group = {this.m, this.count, func};
     308        enter( group );
     309
     310        // LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");
    216311}
    217312
     
    219314// Dtor for monitor guard
    220315void ^?{}( monitor_guard_t & this ) {
     316        // LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);
     317
    221318        // Leave the monitors in order
    222319        leave( this.m, this.count );
    223320
     321        // LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
     322
    224323        // Restore thread context
    225         this_thread->current_monitors      = this.prev_mntrs;
    226         this_thread->current_monitor_count = this.prev_count;
    227         this_thread->current_monitor_func  = this.prev_func;
     324        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
     325}
     326
     327// Ctor for monitor guard
     328// Sorts monitors before entering
     329void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     330        // Store current array
     331        this.m = *m;
     332
     333        // Save previous thread context
     334        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
     335
     336        // Update thread context (needed for conditions)
     337        this_thread->monitors.[list, size, func] = [m, 1, func];
     338
     339        __enter_monitor_dtor( this.m, func );
     340}
     341
     342// Dtor for monitor guard
     343void ^?{}( monitor_dtor_guard_t & this ) {
     344        // Leave the monitors in order
     345        __leave_dtor_monitor_desc( this.m );
     346
     347        // Restore thread context
     348        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
    228349}
    229350
    230351//-----------------------------------------------------------------------------
    231352// Internal scheduling types
    232 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
     353void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    233354        this.waiting_thread = waiting_thread;
    234355        this.count = count;
     
    244365}
    245366
    246 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ) {
     367void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
    247368        this.ready  = false;
    248369        this.target = target;
    249         this.owner  = owner;
     370        this.owner  = &owner;
    250371        this.next   = NULL;
    251372}
     
    253374//-----------------------------------------------------------------------------
    254375// Internal scheduling
    255 void wait( condition * this, uintptr_t user_info = 0 ) {
     376void wait( condition & this, uintptr_t user_info = 0 ) {
    256377        brand_condition( this );
    257378
    258379        // Check that everything is as expected
    259         assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    260         verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
    261         verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
     380        assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     381        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
     382        verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count );
    262383
    263384        // Create storage for monitor context
    264         monitor_ctx( this->monitors, this->monitor_count );
     385        monitor_ctx( this.monitors, this.monitor_count );
    265386
    266387        // Create the node specific to this wait operation
     
    269390        // Append the current wait operation to the ones already queued on the condition
    270391        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
    271         append( &this->blocked, &waiter );
    272 
    273         // Lock all monitors (aggregates the lock them as well)
     392        append( this.blocked, &waiter );
     393
     394        // Lock all monitors (aggregates the locks as well)
    274395        lock_all( monitors, locks, count );
    275396
    276         // DON'T unlock, ask the kernel to do it
    277 
    278         // Save monitor state
    279         save_recursion( monitors, recursions, count );
    280 
    281397        // Find the next thread(s) to run
    282         unsigned short thread_count = 0;
     398        __lock_size_t thread_count = 0;
    283399        thread_desc * threads[ count ];
    284         for(int i = 0; i < count; i++) {
    285                 threads[i] = 0;
    286         }
     400        __builtin_memset( threads, 0, sizeof( threads ) );
     401
     402        // Save monitor states
     403        monitor_save;
    287404
    288405        // Remove any duplicate threads
    289         for( int i = 0; i < count; i++) {
     406        for( __lock_size_t i = 0; i < count; i++) {
    290407                thread_desc * new_owner = next_thread( monitors[i] );
    291                 thread_count = insert_unique( threads, thread_count, new_owner );
     408                insert_unique( threads, thread_count, new_owner );
    292409        }
    293410
     
    295412        BlockInternal( locks, count, threads, thread_count );
    296413
    297 
    298         // WE WOKE UP
    299 
    300 
    301414        // We are back, restore the owners and recursions
    302         lock_all( locks, count );
    303         restore_recursion( monitors, recursions, count );
    304         unlock_all( locks, count );
    305 }
    306 
    307 bool signal( condition * this ) {
     415        monitor_restore;
     416}
     417
     418bool signal( condition & this ) {
    308419        if( is_empty( this ) ) { return false; }
    309420
    310421        //Check that everything is as expected
    311         verify( this->monitors );
    312         verify( this->monitor_count != 0 );
     422        verify( this.monitors );
     423        verify( this.monitor_count != 0 );
    313424
    314425        //Some more checking in debug
    315426        LIB_DEBUG_DO(
    316427                thread_desc * this_thrd = this_thread;
    317                 if ( this->monitor_count != this_thrd->current_monitor_count ) {
    318                         abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
    319                 }
    320 
    321                 for(int i = 0; i < this->monitor_count; i++) {
    322                         if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
    323                                 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
     428                if ( this.monitor_count != this_thrd->monitors.size ) {
     429                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", &this, this.monitor_count, this_thrd->monitors.size );
     430                }
     431
     432                for(int i = 0; i < this.monitor_count; i++) {
     433                        if ( this.monitors[i] != this_thrd->monitors.list[i] ) {
     434                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors.list[i] );
    324435                        }
    325436                }
    326437        );
    327438
    328         unsigned short count = this->monitor_count;
     439        __lock_size_t count = this.monitor_count;
    329440
    330441        // Lock all monitors
    331         lock_all( this->monitors, NULL, count );
     442        lock_all( this.monitors, NULL, count );
    332443
    333444        //Pop the head of the waiting queue
    334         __condition_node_t * node = pop_head( &this->blocked );
     445        __condition_node_t * node = pop_head( this.blocked );
    335446
    336447        //Add the thread to the proper AS stack
     
    338449                __condition_criterion_t * crit = &node->criteria[i];
    339450                assert( !crit->ready );
    340                 push( &crit->target->signal_stack, crit );
     451                push( crit->target->signal_stack, crit );
    341452        }
    342453
    343454        //Release
    344         unlock_all( this->monitors, count );
     455        unlock_all( this.monitors, count );
    345456
    346457        return true;
    347458}
    348459
    349 bool signal_block( condition * this ) {
    350         if( !this->blocked.head ) { return false; }
     460bool signal_block( condition & this ) {
     461        if( !this.blocked.head ) { return false; }
    351462
    352463        //Check that everything is as expected
    353         verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
    354         verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
     464        verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     465        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
    355466
    356467        // Create storage for monitor context
    357         monitor_ctx( this->monitors, this->monitor_count );
     468        monitor_ctx( this.monitors, this.monitor_count );
    358469
    359470        // Lock all monitors (aggregates the locks them as well)
     
    364475
    365476        //save contexts
    366         save_recursion( monitors, recursions, count );
     477        monitor_save;
    367478
    368479        //Find the thread to run
    369         thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
    370         for(int i = 0; i < count; i++) {
    371                 set_owner( monitors[i], signallee );
    372         }
     480        thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
     481        set_owner( monitors, count, signallee );
     482
     483        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    373484
    374485        //Everything is ready to go to sleep
     
    379490
    380491
    381         //We are back, restore the owners and recursions
    382         lock_all( locks, count );
    383         restore_recursion( monitors, recursions, count );
    384         unlock_all( locks, count );
     492        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel :   signal_block returned\n" );
     493
     494        //We are back, restore the masks and recursions
     495        monitor_restore;
    385496
    386497        return true;
     
    388499
    389500// Access the user_info of the thread waiting at the front of the queue
    390 uintptr_t front( condition * this ) {
     501uintptr_t front( condition & this ) {
    391502        verifyf( !is_empty(this),
    392503                "Attempt to access user data on an empty condition.\n"
    393504                "Possible cause is not checking if the condition is empty before reading stored data."
    394505        );
    395         return this->blocked.head->user_info;
     506        return this.blocked.head->user_info;
    396507}
    397508
    398509//-----------------------------------------------------------------------------
    399 // Internal scheduling
    400 int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) {
    401         thread_desc * thrd = this_thread;
     510// External scheduling
     511// cases to handle :
     512//      - target already there :
     513//              block and wake
     514//      - dtor already there
     515//              put thread on signaller stack
     516//      - non-blocking
     517//              return else
     518//      - timeout
     519//              return timeout
     520//      - block
     521//              setup mask
     522//              block
     523void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
     524        // This statment doesn't have a contiguous list of monitors...
     525        // Create one!
     526        __lock_size_t max = count_max( mask );
     527        monitor_desc * mon_storage[max];
     528        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
     529        __lock_size_t actual_count = aggregate( mon_storage, mask );
     530
     531        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max);
     532
     533        if(actual_count == 0) return;
     534
     535        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : waitfor internal proceeding\n");
    402536
    403537        // Create storage for monitor context
    404         monitor_ctx( acceptables->monitors, acceptables->count );
    405 
    406         // Lock all monitors (aggregates the lock them as well)
     538        monitor_ctx( mon_storage, actual_count );
     539
     540        // Lock all monitors (aggregates the locks as well)
    407541        lock_all( monitors, locks, count );
    408542
     543        {
     544                // Check if the entry queue
     545                thread_desc * next; int index;
     546                [next, index] = search_entry_queue( mask, monitors, count );
     547
     548                if( next ) {
     549                        *mask.accepted = index;
     550                        if( mask.clauses[index].is_dtor ) {
     551                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : dtor already there\n");
     552                                verifyf( mask.clauses[index].size == 1        , "ERROR: Accepted dtor has more than 1 mutex parameter." );
     553
     554                                monitor_desc * mon2dtor = mask.clauses[index].list[0];
     555                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
     556
     557                                __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
     558                                push( mon2dtor->signal_stack, dtor_crit );
     559
     560                                unlock_all( locks, count );
     561                        }
     562                        else {
     563                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, baton-passing\n");
     564
     565                                // Create the node specific to this wait operation
     566                                wait_ctx_primed( this_thread, 0 );
     567
     568                                // Save monitor states
     569                                monitor_save;
     570
     571                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel :  baton of %d monitors : ", count );
     572                                #ifdef __CFA_DEBUG_PRINT__
     573                                        for( int i = 0; i < count; i++) {
     574                                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "%p %p ", monitors[i], monitors[i]->signal_stack.top );
     575                                        }
     576                                #endif
     577                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "\n");
     578
     579                                // Set the owners to be the next thread
     580                                set_owner( monitors, count, next );
     581
     582                                // Everything is ready to go to sleep
     583                                BlockInternal( locks, count, &next, 1 );
     584
     585                                // We are back, restore the owners and recursions
     586                                monitor_restore;
     587
     588                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, returned\n");
     589                        }
     590
     591                        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
     592
     593                        return;
     594                }
     595        }
     596
     597
     598        if( duration == 0 ) {
     599                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : non-blocking, exiting\n");
     600
     601                unlock_all( locks, count );
     602
     603                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
     604                return;
     605        }
     606
     607
     608        verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
     609
     610        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : blocking waitfor\n");
     611
    409612        // Create the node specific to this wait operation
    410         wait_ctx_primed( thrd, 0 );
    411 
    412         // Check if the entry queue
    413         thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count );
    414 
    415         LIB_DEBUG_PRINT_SAFE("Owner(s) :");
    416         for(int i = 0; i < count; i++) {
    417                 LIB_DEBUG_PRINT_SAFE(" %p", monitors[i]->owner );
    418         }
    419         LIB_DEBUG_PRINT_SAFE("\n");
    420 
    421         LIB_DEBUG_PRINT_SAFE("Passing mon to %p\n", next);
    422 
    423         if( !next ) {
    424                 // Update acceptables on the current monitors
    425                 for(int i = 0; i < count; i++) {
    426                         monitors[i]->acceptables = acceptables;
    427                         monitors[i]->acceptable_count = acc_count;
    428                 }
    429         }
    430         else {
    431                 for(int i = 0; i < count; i++) {
    432                         set_owner( monitors[i], next );
    433                 }
    434         }
    435 
    436 
    437         save_recursion( monitors, recursions, count );
    438 
    439 
    440         // Everything is ready to go to sleep
    441         BlockInternal( locks, count, &next, next ? 1 : 0 );
    442 
    443 
    444         //WE WOKE UP
    445 
    446 
    447         //We are back, restore the owners and recursions
    448         lock_all( locks, count );
    449         restore_recursion( monitors, recursions, count );
    450         int acc_idx = monitors[0]->accepted_index;
    451         unlock_all( locks, count );
    452 
    453         return acc_idx;
     613        wait_ctx_primed( this_thread, 0 );
     614
     615        monitor_save;
     616        set_mask( monitors, count, mask );
     617
     618        for( __lock_size_t i = 0; i < count; i++) {
     619                verify( monitors[i]->owner == this_thread );
     620        }
     621
     622        //Everything is ready to go to sleep
     623        BlockInternal( locks, count );
     624
     625
     626        // WE WOKE UP
     627
     628
     629        //We are back, restore the masks and recursions
     630        monitor_restore;
     631
     632        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : exiting\n");
     633
     634        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
    454635}
    455636
     
    458639
    459640static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
     641        // LIB_DEBUG_PRINT_SAFE("Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     642
    460643        //Pass the monitor appropriately
    461644        this->owner = owner;
     
    465648}
    466649
     650static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
     651        monitors[0]->owner     = owner;
     652        monitors[0]->recursion = 1;
     653        for( __lock_size_t i = 1; i < count; i++ ) {
     654                monitors[i]->owner     = owner;
     655                monitors[i]->recursion = 0;
     656        }
     657}
     658
     659static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     660        for( __lock_size_t i = 0; i < count; i++) {
     661                storage[i]->mask = mask;
     662        }
     663}
     664
     665static inline void reset_mask( monitor_desc * this ) {
     666        this->mask.accepted = NULL;
     667        this->mask.clauses = NULL;
     668        this->mask.size = 0;
     669}
     670
    467671static inline thread_desc * next_thread( monitor_desc * this ) {
    468672        //Check the signaller stack
    469         __condition_criterion_t * urgent = pop( &this->signal_stack );
     673        LIB_DEBUG_PRINT_SAFE("Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     674        __condition_criterion_t * urgent = pop( this->signal_stack );
    470675        if( urgent ) {
    471676                //The signaller stack is not empty,
     
    479684        // No signaller thread
    480685        // Get the next thread in the entry_queue
    481         thread_desc * new_owner = pop_head( &this->entry_queue );
     686        thread_desc * new_owner = pop_head( this->entry_queue );
    482687        set_owner( this, new_owner );
    483688
     
    485690}
    486691
    487 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) {
    488         __acceptable_t* accs = this->acceptables; // Optim
    489         int acc_cnt = this->acceptable_count;
     692static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     693        __acceptable_t * it = this->mask.clauses; // Optim
     694        __lock_size_t count = this->mask.size;
    490695
    491696        // Check if there are any acceptable functions
    492         if( !accs ) return -1;
     697        if( !it ) return false;
    493698
    494699        // If this isn't the first monitor to test this, there is no reason to repeat the test.
    495         if( this != group[0] ) return group[0]->accepted_index;
     700        if( this != group[0] ) return group[0]->mask.accepted >= 0;
    496701
    497702        // For all acceptable functions check if this is the current function.
    498         OUT_LOOP:
    499         for( int i = 0; i < acc_cnt; i++ ) {
    500                 __acceptable_t * acc = &accs[i];
    501 
    502                 // if function matches, check the monitors
    503                 if( acc->func == func ) {
    504 
    505                         // If the group count is different then it can't be a match
    506                         if( acc->count != group_cnt ) return -1;
    507 
    508                         // Check that all the monitors match
    509                         for( int j = 0; j < group_cnt; j++ ) {
    510                                 // If not a match, check next function
    511                                 if( acc->monitors[j] != group[j] ) continue OUT_LOOP;
    512                         }
    513 
    514                         // It's a complete match, accept the call
    515                         return i;
     703        for( __lock_size_t i = 0; i < count; i++, it++ ) {
     704                if( *it == group ) {
     705                        *this->mask.accepted = i;
     706                        return true;
    516707                }
    517708        }
    518709
    519710        // No function matched
    520         return -1;
    521 }
    522 
    523 static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
    524         for(int i = 0; i < count; i++) {
     711        return false;
     712}
     713
     714static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     715        for( __lock_size_t i = 0; i < count; i++) {
    525716                (criteria[i]){ monitors[i], waiter };
    526717        }
    527718
    528         waiter->criteria = criteria;
    529 }
    530 
    531 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
    532         for(int i = 0; i < count; i++) {
     719        waiter.criteria = criteria;
     720}
     721
     722static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     723        for( __lock_size_t i = 0; i < count; i++) {
    533724                (criteria[i]){ monitors[i], waiter };
    534                 push( &criteria[i].target->signal_stack, &criteria[i] );
    535         }
    536 
    537         waiter->criteria = criteria;
    538 }
    539 
    540 static inline void lock_all( spinlock ** locks, unsigned short count ) {
    541         for( int i = 0; i < count; i++ ) {
     725                LIB_DEBUG_PRINT_SAFE( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
     726                push( criteria[i].target->signal_stack, &criteria[i] );
     727        }
     728
     729        waiter.criteria = criteria;
     730}
     731
     732static inline void lock_all( spinlock * locks [], __lock_size_t count ) {
     733        for( __lock_size_t i = 0; i < count; i++ ) {
    542734                lock_yield( locks[i] DEBUG_CTX2 );
    543735        }
    544736}
    545737
    546 static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
    547         for( int i = 0; i < count; i++ ) {
     738static inline void lock_all( monitor_desc * source [], spinlock * /*out*/ locks [], __lock_size_t count ) {
     739        for( __lock_size_t i = 0; i < count; i++ ) {
    548740                spinlock * l = &source[i]->lock;
    549741                lock_yield( l DEBUG_CTX2 );
     
    552744}
    553745
    554 static inline void unlock_all( spinlock ** locks, unsigned short count ) {
    555         for( int i = 0; i < count; i++ ) {
     746static inline void unlock_all( spinlock * locks [], __lock_size_t count ) {
     747        for( __lock_size_t i = 0; i < count; i++ ) {
    556748                unlock( locks[i] );
    557749        }
    558750}
    559751
    560 static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
    561         for( int i = 0; i < count; i++ ) {
     752static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     753        for( __lock_size_t i = 0; i < count; i++ ) {
    562754                unlock( &locks[i]->lock );
    563755        }
    564756}
    565757
    566 
    567 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count ) {
    568         for( int i = 0; i < count; i++ ) {
     758static inline void save(
     759        monitor_desc * ctx [],
     760        __lock_size_t count,
     761        __attribute((unused)) spinlock * locks [],
     762        unsigned int /*out*/ recursions [],
     763        __waitfor_mask_t /*out*/ masks []
     764) {
     765        for( __lock_size_t i = 0; i < count; i++ ) {
    569766                recursions[i] = ctx[i]->recursion;
    570         }
    571 }
    572 
    573 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ) {
    574         for( int i = 0; i < count; i++ ) {
     767                masks[i]      = ctx[i]->mask;
     768        }
     769}
     770
     771static inline void restore(
     772        monitor_desc * ctx [],
     773        __lock_size_t count,
     774        spinlock * locks [],
     775        unsigned int /*out*/ recursions [],
     776        __waitfor_mask_t /*out*/ masks []
     777) {
     778        lock_all( locks, count );
     779        for( __lock_size_t i = 0; i < count; i++ ) {
    575780                ctx[i]->recursion = recursions[i];
    576         }
     781                ctx[i]->mask      = masks[i];
     782        }
     783        unlock_all( locks, count );
    577784}
    578785
     
    599806        }
    600807
    601         // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
     808        LIB_DEBUG_PRINT_SAFE( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL );
    602809        return ready2run ? node->waiting_thread : NULL;
    603810}
    604811
    605 static inline void brand_condition( condition * this ) {
     812static inline void brand_condition( condition & this ) {
    606813        thread_desc * thrd = this_thread;
    607         if( !this->monitors ) {
     814        if( !this.monitors ) {
    608815                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    609                 assertf( thrd->current_monitors != NULL, "No current monitor to brand condition %p", thrd->current_monitors );
    610                 this->monitor_count = thrd->current_monitor_count;
    611 
    612                 this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
    613                 for( int i = 0; i < this->monitor_count; i++ ) {
    614                         this->monitors[i] = thrd->current_monitors[i];
    615                 }
    616         }
    617 }
    618 
    619 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ) {
    620         if( !val ) return end;
    621 
    622         for(int i = 0; i <= end; i++) {
    623                 if( thrds[i] == val ) return end;
    624         }
    625 
    626         thrds[end] = val;
    627         return end + 1;
    628 }
    629 
    630 
    631 static inline bool match( __acceptable_t * acc, thread_desc * thrd ) {
    632         verify( thrd );
    633         verify( acc );
    634         if( acc->func != thrd->current_monitor_func ) return false;
    635 
    636         return true;
    637 }
    638 
    639 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) {
    640 
    641         __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
     816                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
     817                this.monitor_count = thrd->monitors.size;
     818
     819                this.monitors = malloc( this.monitor_count * sizeof( *this.monitors ) );
     820                for( int i = 0; i < this.monitor_count; i++ ) {
     821                        this.monitors[i] = thrd->monitors.list[i];
     822                }
     823        }
     824}
     825
     826static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
     827
     828        __thread_queue_t & entry_queue = monitors[0]->entry_queue;
    642829
    643830        // For each thread in the entry-queue
    644         for(    thread_desc ** thrd_it = &entry_queue->head;
     831        for(    thread_desc ** thrd_it = &entry_queue.head;
    645832                *thrd_it;
    646                 thrd_it = &(*thrd_it)->next)
    647         {
     833                thrd_it = &(*thrd_it)->next
     834        ) {
    648835                // For each acceptable check if it matches
    649                 __acceptable_t * acc_end = acceptables + acc_count;
    650                 for( __acceptable_t * acc_it = acceptables; acc_it != acc_end; acc_it++ ) {
     836                int i = 0;
     837                __acceptable_t * end = mask.clauses + mask.size;
     838                for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) {
    651839                        // Check if we have a match
    652                         if( match( acc_it, *thrd_it ) ) {
     840                        if( *it == (*thrd_it)->monitors ) {
    653841
    654842                                // If we have a match return it
    655843                                // after removeing it from the entry queue
    656                                 return remove( entry_queue, thrd_it );
     844                                return [remove( entry_queue, thrd_it ), i];
    657845                        }
    658846                }
    659847        }
    660848
    661         return NULL;
    662 }
     849        return [0, -1];
     850}
     851
     852forall(dtype T | sized( T ))
     853static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) {
     854        if( !val ) return size;
     855
     856        for( __lock_size_t i = 0; i <= size; i++) {
     857                if( array[i] == val ) return size;
     858        }
     859
     860        array[size] = val;
     861        size = size + 1;
     862        return size;
     863}
     864
     865static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) {
     866        __lock_size_t max = 0;
     867        for( __lock_size_t i = 0; i < mask.size; i++ ) {
     868                max += mask.clauses[i].size;
     869        }
     870        return max;
     871}
     872
     873static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     874        __lock_size_t size = 0;
     875        for( __lock_size_t i = 0; i < mask.size; i++ ) {
     876                __libcfa_small_sort( mask.clauses[i].list, mask.clauses[i].size );
     877                for( __lock_size_t j = 0; j < mask.clauses[i].size; j++) {
     878                        insert_unique( storage, size, mask.clauses[i].list[j] );
     879                }
     880        }
     881        // TODO insertion sort instead of this
     882        __libcfa_small_sort( storage, size );
     883        return size;
     884}
     885
    663886void ?{}( __condition_blocked_queue_t & this ) {
    664887        this.head = NULL;
     
    666889}
    667890
    668 void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
    669         verify(this->tail != NULL);
    670         *this->tail = c;
    671         this->tail = &c->next;
    672 }
    673 
    674 __condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
    675         __condition_node_t * head = this->head;
     891void append( __condition_blocked_queue_t & this, __condition_node_t * c ) {
     892        verify(this.tail != NULL);
     893        *this.tail = c;
     894        this.tail = &c->next;
     895}
     896
     897__condition_node_t * pop_head( __condition_blocked_queue_t & this ) {
     898        __condition_node_t * head = this.head;
    676899        if( head ) {
    677                 this->head = head->next;
     900                this.head = head->next;
    678901                if( !head->next ) {
    679                         this->tail = &this->head;
     902                        this.tail = &this.head;
    680903                }
    681904                head->next = NULL;
  • src/libcfa/concurrency/preemption.c

    r78315272 r3f7e12cb  
    328328                siginfo_t info;
    329329                int sig = sigwaitinfo( &mask, &info );
     330
     331                if( sig < 0 ) {
     332                        //Error!
     333                        int err = errno;
     334                        switch( err ) {
     335                                case EAGAIN :
     336                                case EINTR :
     337                                        continue;
     338                        case EINVAL :
     339                                        abortf("Timeout was invalid.");
     340                                default:
     341                                        abortf("Unhandled error %d", err);
     342                        }
     343                }
    330344
    331345                // If another signal arrived something went wrong
     
    366380
    367381        if ( sigaction( sig, &act, NULL ) == -1 ) {
    368                 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     382                LIB_DEBUG_PRINT_BUFFER_DECL(
    369383                        " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n",
    370384                        sig, handler, flags, errno, strerror( errno )
     
    383397
    384398        if ( sigaction( sig, &act, NULL ) == -1 ) {
    385                 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     399                LIB_DEBUG_PRINT_BUFFER_DECL(
    386400                        " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n",
    387401                        sig, errno, strerror( errno )
  • src/libcfa/concurrency/thread

    r78315272 r3f7e12cb  
    3636forall( dtype T | is_thread(T) )
    3737static inline coroutine_desc* get_coroutine(T & this) {
    38         return &get_thread(this)->cor;
     38        return &get_thread(this)->self_cor;
    3939}
    4040
    4141forall( dtype T | is_thread(T) )
    4242static inline monitor_desc* get_monitor(T & this) {
    43         return &get_thread(this)->mon;
     43        return &get_thread(this)->self_mon;
    4444}
    4545
    4646static inline coroutine_desc* get_coroutine(thread_desc * this) {
    47         return &this->cor;
     47        return &this->self_cor;
    4848}
    4949
    5050static inline monitor_desc* get_monitor(thread_desc * this) {
    51         return &this->mon;
     51        return &this->self_mon;
    5252}
    5353
  • src/libcfa/concurrency/thread.c

    r78315272 r3f7e12cb  
    3333
    3434void ?{}(thread_desc& this) {
    35         (this.cor){};
    36         this.cor.name = "Anonymous Coroutine";
    37         this.mon.owner = &this;
    38         this.mon.recursion = 1;
     35        (this.self_cor){};
     36        this.self_cor.name = "Anonymous Coroutine";
     37        this.self_mon.owner = &this;
     38        this.self_mon.recursion = 1;
     39        this.self_mon_p = &this.self_mon;
    3940        this.next = NULL;
    4041
    41         this.current_monitors      = &this.mon;
    42         this.current_monitor_count = 1;
     42        (this.monitors){ &this.self_mon_p, 1, (fptr_t)0 };
    4343}
    4444
    4545void ^?{}(thread_desc& this) {
    46         ^(this.cor){};
     46        ^(this.self_cor){};
    4747}
    4848
Note: See TracChangeset for help on using the changeset viewer.