Changes in / [1d29d46:727cf70f]


Ignore:
Files:
1 added
7 edited

Legend:

Unmodified
Added
Removed
  • doc/proposals/concurrency/thePlan.md

    r1d29d46 r727cf70f  
    1010done - Multi monitors calls,
    1111done - Monitors as a language feature (not calling enter/leave by hand)
     12
     13_Phase 3_ : Monitor features
    1214Internal scheduling
     15External scheduling
    1316
    14 _Phase 3_ : Kernel features
     17_Phase 4_ : Kernel features
    1518Preemption
    1619Detach thread
    1720Cluster migration
    18 
    19 _Phase 4_ : Monitor features
    20 External scheduling
    2121
    2222_Phase 5_ : Performance
  • src/libcfa/concurrency/invoke.h

    r1d29d46 r727cf70f  
    3333      };
    3434
    35       struct simple_thread_list {
     35      struct __thread_queue_t {
    3636            struct thread_desc * head;
    3737            struct thread_desc ** tail;
    3838      };
    3939
     40      struct __thread_stack_t {
     41            struct thread_desc * top;
     42      };
     43
    4044      #ifdef __CFORALL__
    4145      extern "Cforall" {
    42             void ?{}( struct simple_thread_list * );
    43             void append( struct simple_thread_list *, struct thread_desc * );
    44             struct thread_desc * pop_head( struct simple_thread_list * );
     46            void ?{}( struct __thread_queue_t * );
     47            void append( struct __thread_queue_t *, struct thread_desc * );
     48            struct thread_desc * pop_head( struct __thread_queue_t * );
     49
     50            void ?{}( struct __thread_stack_t * );
     51            void push( struct __thread_stack_t *, struct thread_desc * );           
     52            struct thread_desc * pop( struct __thread_stack_t * );
    4553
    4654            void ?{}(spinlock * this);
     
    5058
    5159      struct coStack_t {
    52             unsigned int size;                  // size of stack
    53             void *storage;                      // pointer to stack
    54             void *limit;                        // stack grows towards stack limit
    55             void *base;                         // base of stack
    56             void *context;                      // address of cfa_context_t
    57             void *top;                          // address of top of storage
    58             bool userStack;                     // whether or not the user allocated the stack
     60            unsigned int size;                        // size of stack
     61            void *storage;                            // pointer to stack
     62            void *limit;                              // stack grows towards stack limit
     63            void *base;                               // base of stack
     64            void *context;                            // address of cfa_context_t
     65            void *top;                                // address of top of storage
     66            bool userStack;                           // whether or not the user allocated the stack
    5967      };
    6068
     
    6270
    6371      struct coroutine_desc {
    64             struct coStack_t stack;             // stack information of the coroutine
    65             const char *name;                   // textual name for coroutine/task, initialized by uC++ generated code
    66             int errno_;                         // copy of global UNIX variable errno
    67             enum coroutine_state state;         // current execution status for coroutine
    68             struct coroutine_desc *starter;     // first coroutine to resume this one
    69             struct coroutine_desc *last;              // last coroutine to resume this one
     72            struct coStack_t stack;                   // stack information of the coroutine
     73            const char *name;                         // textual name for coroutine/task, initialized by uC++ generated code
     74            int errno_;                               // copy of global UNIX variable errno
     75            enum coroutine_state state;               // current execution status for coroutine
     76            struct coroutine_desc *starter;           // first coroutine to resume this one
     77            struct coroutine_desc *last;                    // last coroutine to resume this one
    7078      };
    7179
    7280      struct monitor_desc {
    73             struct spinlock lock;
    74             struct thread_desc * owner;
    75             struct simple_thread_list entry_queue;
    76             unsigned int recursion;
     81            struct spinlock lock;                     // spinlock to protect internal data
     82            struct thread_desc * owner;               // current owner of the monitor
     83            struct __thread_queue_t entry_queue;      // queue of threads that are blocked waiting for the monitor
     84            struct __thread_stack_t signal_stack;     // stack of threads to run next once we exit the monitor
     85            struct monitor_desc * stack_owner;        // if bulk acquiring was used we need to synchronize signals with an other monitor
     86            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    7787      };
    7888
    7989      struct thread_desc {
    80             struct coroutine_desc cor;          // coroutine body used to store context
    81             struct monitor_desc mon;            // monitor body used for mutual exclusion
    82             struct thread_desc * next;          // instrusive link field for threads
     90            struct coroutine_desc cor;                // coroutine body used to store context
     91            struct monitor_desc mon;                  // monitor body used for mutual exclusion
     92            struct thread_desc * next;                // instrusive link field for threads
     93            struct monitor_desc ** current_monitors;  // currently held monitors
     94            unsigned short current_monitor_count;     // number of currently held monitors
    8395      };
    8496
  • src/libcfa/concurrency/kernel

    r1d29d46 r727cf70f  
    3232
    3333struct signal_once {
    34         volatile bool condition;
     34        volatile bool cond;
    3535        struct spinlock lock;
    36         struct simple_thread_list blocked;
     36        struct __thread_queue_t blocked;
    3737};
    3838
     
    4646// Cluster
    4747struct cluster {
    48         simple_thread_list ready_queue;
     48        __thread_queue_t ready_queue;
    4949        spinlock lock;
    5050};
  • src/libcfa/concurrency/kernel.c

    r1d29d46 r727cf70f  
    299299// Scheduler routines
    300300void ScheduleThread( thread_desc * thrd ) {
     301        if( !thrd ) return;
     302
    301303        assertf( thrd->next == NULL, "Expected null got %p", thrd->next );
    302304       
     
    473475
    474476void ?{}( signal_once * this ) {
    475         this->condition = false;
     477        this->cond = false;
    476478}
    477479void ^?{}( signal_once * this ) {
     
    481483void wait( signal_once * this ) {
    482484        lock( &this->lock );
    483         if( !this->condition ) {
     485        if( !this->cond ) {
    484486                append( &this->blocked, this_thread() );
    485487                ScheduleInternal( &this->lock );
     
    492494        lock( &this->lock );
    493495        {
    494                 this->condition = true;
     496                this->cond = true;
    495497
    496498                thread_desc * it;
     
    504506//-----------------------------------------------------------------------------
    505507// Queues
    506 void ?{}( simple_thread_list * this ) {
     508void ?{}( __thread_queue_t * this ) {
    507509        this->head = NULL;
    508510        this->tail = &this->head;
    509511}
    510512
    511 void append( simple_thread_list * this, thread_desc * t ) {
     513void append( __thread_queue_t * this, thread_desc * t ) {
    512514        assert(this->tail != NULL);
    513515        *this->tail = t;
     
    515517}
    516518
    517 thread_desc * pop_head( simple_thread_list * this ) {
     519thread_desc * pop_head( __thread_queue_t * this ) {
    518520        thread_desc * head = this->head;
    519521        if( head ) {
     
    526528        return head;
    527529}
     530
     531void ?{}( __thread_stack_t * this ) {
     532        this->top = NULL;
     533}
     534
     535void push( __thread_stack_t * this, thread_desc * t ) {
     536        assert(t->next != NULL);
     537        t->next = this->top;
     538        this->top = t;
     539}
     540
     541thread_desc * pop( __thread_stack_t * this ) {
     542        thread_desc * top = this->top;
     543        if( top ) {
     544                this->top = top->next;
     545                top->next = NULL;
     546        }       
     547        return top;
     548}
    528549// Local Variables: //
    529550// mode: c //
  • src/libcfa/concurrency/monitor

    r1d29d46 r727cf70f  
    1818#define MONITOR_H
    1919
     20#include <stddef.h>
     21
    2022#include "assert"
    2123#include "invoke.h"
     
    2325
    2426static inline void ?{}(monitor_desc * this) {
    25         this->owner = 0;
     27        this->owner = NULL;
     28      this->stack_owner = NULL;
    2629        this->recursion = 0;
    2730}
    28 
    29 //Array entering routine
    30 void enter(monitor_desc **, int count);
    31 void leave(monitor_desc **, int count);
    3231
    3332struct monitor_guard_t {
    3433        monitor_desc ** m;
    3534        int count;
     35      monitor_desc ** prev_mntrs;
     36      unsigned short  prev_count;
    3637};
    3738
     
    4041}
    4142
    42 static inline void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) {
    43         this->m = m;
    44         this->count = count;
    45         qsort(this->m, count);
    46         enter( this->m, this->count );
     43void ?{}( monitor_guard_t * this, monitor_desc ** m, int count );
     44void ^?{}( monitor_guard_t * this );
     45
     46//-----------------------------------------------------------------------------
     47// Internal scheduling
     48struct condition {
     49        __thread_queue_t blocked;
     50        monitor_desc ** monitors;
     51        unsigned short monitor_count;
     52};
     53
     54static inline void ?{}( condition * this ) {
     55        this->monitors = NULL;
     56        this->monitor_count = 0;
    4757}
    4858
    49 static inline void ^?{}( monitor_guard_t * this ) {
    50         leave( this->m, this->count );
    51 }
    52 
    53 
     59void wait( condition * this );
     60void signal( condition * this );
    5461#endif //MONITOR_H
  • src/libcfa/concurrency/monitor.c

    r1d29d46 r727cf70f  
    1818
    1919#include "kernel_private.h"
     20#include "libhdr.h"
     21
     22void set_owner( monitor_desc * this, thread_desc * owner ) {
     23        //Pass the monitor appropriately
     24        this->owner = owner;
     25
     26        //We are passing the monitor to someone else, which means recursion level is not 0
     27        this->recursion = owner ? 1 : 0;
     28}
    2029
    2130extern "C" {
    22         void __enter_monitor_desc(monitor_desc * this) {
     31        void __enter_monitor_desc(monitor_desc * this, monitor_desc * leader) {
    2332                lock( &this->lock );
    2433                thread_desc * thrd = this_thread();
     34
     35                //Update the stack owner
     36                this->stack_owner = leader;
    2537
    2638                if( !this->owner ) {
     
    4456
    4557                unlock( &this->lock );
    46         }
    47 
    48         void __leave_monitor_desc(monitor_desc * this) {
     58                return;
     59        }
     60
     61        // leave pseudo code :
     62        //      decrement level
     63        //      leve == 0 ?
     64        //              no : done
     65        //              yes :
     66        //                      signal stack empty ?
     67        //                              has leader :
     68        //                                      bulk acquiring means we don't own the signal stack
     69        //                                      ignore it but don't release the monitor
     70        //                              yes :
     71        //                                      next in entry queue is new owner
     72        //                              no :
     73        //                                      top of the signal stack is the owner
     74        //                                      context switch to him right away
     75        //
     76        void __leave_monitor_desc(monitor_desc * this, monitor_desc * leader) {
    4977                lock( &this->lock );
    5078
    5179                thread_desc * thrd = this_thread();
    52                 assert( thrd == this->owner );
     80                assert( thrd == this->owner || this->stack_owner );
    5381
    5482                //Leaving a recursion level, decrement the counter
    5583                this->recursion -= 1;
    5684
    57                 //If we left the last level of recursion it means we are changing who owns the monitor
     85                //If we haven't left the last level of recursion
     86                //it means we don't need to do anything
     87                if( this->recursion != 0) {
     88                        this->stack_owner = leader;
     89                        unlock( &this->lock );
     90                        return;
     91                }
     92                       
     93                //If we don't own the signal stack then just leave it to the owner
     94                if( this->stack_owner ) {
     95                        this->stack_owner = leader;
     96                        unlock( &this->lock );
     97                        return;
     98                }
     99
     100                //We are the stack owner and have left the last recursion level.
     101                //We are in charge of passing the monitor
    58102                thread_desc * new_owner = 0;
    59                 if( this->recursion == 0) {
    60                         //Get the next thread in the list
    61                         new_owner = this->owner = pop_head( &this->entry_queue );
    62 
    63                         //We are passing the monitor to someone else, which means recursion level is not 0
    64                         this->recursion = new_owner ? 1 : 0;
    65                 }       
    66 
     103
     104                //Check the signaller stack
     105                new_owner = pop( &this->signal_stack );
     106                if( new_owner ) {
     107                        //The signaller stack is not empty,
     108                        //transfer control immediately
     109                        set_owner( this, new_owner );
     110                        this->stack_owner = leader;
     111                        ScheduleInternal( &this->lock, new_owner );
     112                        return;
     113                }
     114               
     115                // No signaller thread
     116                // Get the next thread in the entry_queue
     117                new_owner = pop_head( &this->entry_queue );
     118                set_owner( this, new_owner );
     119
     120                //Update the stack owner
     121                this->stack_owner = leader;
     122
     123                //We can now let other threads in safely
    67124                unlock( &this->lock );
    68125
    69                 //If we have a new owner, we need to wake-up the thread
    70                 if( new_owner ) {
    71                         ScheduleThread( new_owner );
    72                 }
    73         }
    74 }
    75 
    76 void enter(monitor_desc ** monitors, int count) {
    77         for(int i = 0; i < count; i++) {
    78                 __enter_monitor_desc( monitors[i] );
    79         }
    80 }
    81 
    82 void leave(monitor_desc ** monitors, int count) {
     126                //We need to wake-up the thread
     127                ScheduleThread( new_owner );
     128        }
     129}
     130
     131static inline void enter(monitor_desc ** monitors, int count) {
     132        __enter_monitor_desc( monitors[0], NULL );
     133        for(int i = 1; i < count; i++) {
     134                __enter_monitor_desc( monitors[i], monitors[0] );
     135        }
     136}
     137
     138static inline void leave(monitor_desc ** monitors, int count) {
     139        __leave_monitor_desc( monitors[0], NULL );
    83140        for(int i = count - 1; i >= 0; i--) {
    84                 __leave_monitor_desc( monitors[i] );
    85         }
    86 }
     141                __leave_monitor_desc( monitors[i], monitors[0] );
     142        }
     143}
     144
     145void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) {
     146        this->m = m;
     147        this->count = count;
     148        qsort(this->m, count);
     149        enter( this->m, this->count );
     150
     151        this->prev_mntrs = this_thread()->current_monitors;
     152        this->prev_count = this_thread()->current_monitor_count;
     153
     154        this_thread()->current_monitors      = m;
     155        this_thread()->current_monitor_count = count;
     156}
     157
     158void ^?{}( monitor_guard_t * this ) {
     159        leave( this->m, this->count );
     160
     161        this_thread()->current_monitors      = this->prev_mntrs;
     162        this_thread()->current_monitor_count = this->prev_count;
     163}
     164
     165//-----------------------------------------------------------------------------
     166// Internal scheduling
     167void wait( condition * this ) {
     168        // LIB_DEBUG_FPRINTF("Waiting\n");
     169        thread_desc * this_thrd = this_thread();
     170
     171        if( !this->monitors ) {
     172                this->monitors = this_thrd->current_monitors;
     173                this->monitor_count = this_thrd->current_monitor_count;
     174        }
     175
     176        unsigned short count = this->monitor_count;
     177
     178        //Check that everything is as expected
     179        assert( this->monitors != NULL );
     180        assert( this->monitor_count != 0 );
     181
     182        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
     183        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
     184
     185        // LIB_DEBUG_FPRINTF("Getting ready to wait\n");
     186
     187        //Loop on all the monitors and release the owner
     188        for( unsigned int i = 0; i < count; i++ ) {
     189                monitor_desc * cur = this->monitors[i];
     190
     191                assert( cur );
     192
     193                // LIB_DEBUG_FPRINTF("cur %p lock %p\n", cur, &cur->lock);
     194
     195                //Store the locks for later
     196                locks[i] = &cur->lock;
     197
     198                //Protect the monitors
     199                lock( locks[i] );
     200                {               
     201                        //Save the recursion levels
     202                        recursions[i] = cur->recursion;
     203
     204                        //Release the owner
     205                        cur->recursion = 0;
     206                        cur->owner = NULL;
     207                }
     208                //Release the monitor
     209                unlock( locks[i] );
     210        }
     211
     212        // LIB_DEBUG_FPRINTF("Waiting now\n");
     213
     214        //Everything is ready to go to sleep
     215        ScheduleInternal( locks, count );
     216
     217
     218        //WE WOKE UP
     219
     220
     221        //We are back, restore the owners and recursions
     222        for( unsigned int i = 0; i < count; i++ ) {
     223                monitor_desc * cur = this->monitors[i];
     224
     225                //Protect the monitors
     226                lock( locks[i] );
     227                {
     228                        //Release the owner
     229                        cur->owner = this_thrd;
     230                        cur->recursion = recursions[i];
     231                }
     232                //Release the monitor
     233                unlock( locks[i] );
     234        }
     235}
     236
     237static void __signal_internal( condition * this ) {
     238        if( !this->blocked.head ) return;
     239
     240        //Check that everything is as expected
     241        assert( this->monitors );
     242        assert( this->monitor_count != 0 );
     243       
     244        LIB_DEBUG_DO(
     245                if ( this->monitors != this_thread()->current_monitors ) {
     246                        abortf( "Signal on condition %p made outside of the correct monitor(s)", this );
     247                } // if
     248        );
     249
     250        monitor_desc * owner = this->monitors[0];
     251        lock( &owner->lock );
     252        {
     253                thread_desc * unblock = pop_head( &this->blocked );
     254                push( &owner->signal_stack, unblock );
     255        }
     256        unlock( &owner->lock );
     257}
     258
     259void signal( condition * this ) {
     260        __signal_internal( this );
     261}
  • src/libcfa/concurrency/thread.c

    r1d29d46 r727cf70f  
    3939        this->mon.recursion = 1;
    4040        this->next = NULL;
     41
     42        this->current_monitors      = NULL;
     43        this->current_monitor_count = 0;
    4144}
    4245
Note: See TracChangeset for help on using the changeset viewer.