Ignore:
Timestamp:
Jul 12, 2021, 1:44:35 PM (3 years ago)
Author:
caparsons <caparson@…>
Branches:
ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
605673f, 9345684
Parents:
cf444b6 (diff), a953c2e3 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/monitor.cfa

    rcf444b6 r6ff08d8  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // $monitor.c --
     7// monitor.cfa --
    88//
    99// Author           : Thierry Delisle
     
    2828//-----------------------------------------------------------------------------
    2929// Forward declarations
    30 static inline void __set_owner ( $monitor * this, $thread * owner );
    31 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
    32 static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    33 static inline void reset_mask( $monitor * this );
    34 
    35 static inline $thread * next_thread( $monitor * this );
    36 static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
     30static inline void __set_owner ( monitor$ * this, thread$ * owner );
     31static inline void __set_owner ( monitor$ * storage [], __lock_size_t count, thread$ * owner );
     32static inline void set_mask  ( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     33static inline void reset_mask( monitor$ * this );
     34
     35static inline thread$ * next_thread( monitor$ * this );
     36static inline bool is_accepted( monitor$ * this, const __monitor_group_t & monitors );
    3737
    3838static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    39 static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     39static inline void lock_all  ( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    4040static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    41 static inline void unlock_all( $monitor * locks [], __lock_size_t count );
    42 
    43 static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    44 static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    45 
    46 static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    48 
    49 static inline $thread *        check_condition   ( __condition_criterion_t * );
     41static inline void unlock_all( monitor$ * locks [], __lock_size_t count );
     42
     43static inline void save   ( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     44static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     45
     46static inline void init     ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     48
     49static inline thread$ *        check_condition   ( __condition_criterion_t * );
    5050static inline void                 brand_condition   ( condition & );
    51 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
     51static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t &, monitor$ * monitors [], __lock_size_t count );
    5252
    5353forall(T & | sized( T ))
    5454static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5555static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    56 static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
     56static inline __lock_size_t aggregate    ( monitor$ * storage [], const __waitfor_mask_t & mask );
    5757
    5858//-----------------------------------------------------------------------------
     
    6969
    7070#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    71         $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     71        monitor$ ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7272        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7373        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8282// Enter/Leave routines
    8383// Enter single monitor
    84 static void __enter( $monitor * this, const __monitor_group_t & group ) {
    85         $thread * thrd = active_thread();
     84static void __enter( monitor$ * this, const __monitor_group_t & group ) {
     85        thread$ * thrd = active_thread();
    8686
    8787        // Lock the monitor spinlock
     
    141141}
    142142
    143 static void __dtor_enter( $monitor * this, fptr_t func, bool join ) {
    144         $thread * thrd = active_thread();
     143static void __dtor_enter( monitor$ * this, fptr_t func, bool join ) {
     144        thread$ * thrd = active_thread();
    145145        #if defined( __CFA_WITH_VERIFY__ )
    146146                bool is_thrd = this == &thrd->self_mon;
     
    173173        // because join will not release the monitor after it executed.
    174174        // to avoid that it sets the owner to the special value thrd | 1p before exiting
    175         else if( this->owner == ($thread*)(1 | (uintptr_t)thrd) ) {
     175        else if( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) {
    176176                // restore the owner and just return
    177177                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     
    191191
    192192        __lock_size_t count = 1;
    193         $monitor ** monitors = &this;
     193        monitor$ ** monitors = &this;
    194194        __monitor_group_t group = { &this, 1, func };
    195195        if( is_accepted( this, group) ) {
     
    243243
    244244// Leave single monitor
    245 void __leave( $monitor * this ) {
     245void __leave( monitor$ * this ) {
    246246        // Lock the monitor spinlock
    247247        lock( this->lock __cfaabi_dbg_ctx2 );
     
    263263
    264264        // Get the next thread, will be null on low contention monitor
    265         $thread * new_owner = next_thread( this );
     265        thread$ * new_owner = next_thread( this );
    266266
    267267        // Check the new owner is consistent with who we wake-up
     
    278278
    279279// Leave single monitor for the last time
    280 void __dtor_leave( $monitor * this, bool join ) {
     280void __dtor_leave( monitor$ * this, bool join ) {
    281281        __cfaabi_dbg_debug_do(
    282282                if( active_thread() != this->owner ) {
     
    288288        )
    289289
    290         this->owner = ($thread*)(1 | (uintptr_t)this->owner);
    291 }
    292 
    293 void __thread_finish( $thread * thrd ) {
    294         $monitor * this = &thrd->self_mon;
     290        this->owner = (thread$*)(1 | (uintptr_t)this->owner);
     291}
     292
     293void __thread_finish( thread$ * thrd ) {
     294        monitor$ * this = &thrd->self_mon;
    295295
    296296        // Lock the monitor now
     
    298298        /* paranoid */ verify( this->lock.lock );
    299299        /* paranoid */ verify( thrd->context.SP );
    300         /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd );
    301         /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd );
     300        /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
     301        /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
    302302        /* paranoid */ verify( ! __preemption_enabled() );
    303303
     
    311311
    312312        // Fetch the next thread, can be null
    313         $thread * new_owner = next_thread( this );
     313        thread$ * new_owner = next_thread( this );
    314314
    315315        // Mark the state as fully halted
     
    336336// Leave multiple monitor
    337337// relies on the monitor array being sorted
    338 static inline void leave($monitor * monitors [], __lock_size_t count) {
     338static inline void leave(monitor$ * monitors [], __lock_size_t count) {
    339339        for( __lock_size_t i = count - 1; i >= 0; i--) {
    340340                __leave( monitors[i] );
     
    344344// Ctor for monitor guard
    345345// Sorts monitors before entering
    346 void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
    347         $thread * thrd = active_thread();
     346void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {
     347        thread$ * thrd = active_thread();
    348348
    349349        // Store current array
     
    385385// Ctor for monitor guard
    386386// Sorts monitors before entering
    387 void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func, bool join ) {
     387void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {
    388388        // optimization
    389         $thread * thrd = active_thread();
     389        thread$ * thrd = active_thread();
    390390
    391391        // Store current array
     
    415415//-----------------------------------------------------------------------------
    416416// Internal scheduling types
    417 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     417void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    418418        this.waiting_thread = waiting_thread;
    419419        this.count = count;
     
    429429}
    430430
    431 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
     431void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
    432432        this.ready  = false;
    433433        this.target = target;
     
    463463        // Find the next thread(s) to run
    464464        __lock_size_t thread_count = 0;
    465         $thread * threads[ count ];
     465        thread$ * threads[ count ];
    466466        __builtin_memset( threads, 0, sizeof( threads ) );
    467467
     
    471471        // Remove any duplicate threads
    472472        for( __lock_size_t i = 0; i < count; i++) {
    473                 $thread * new_owner = next_thread( monitors[i] );
     473                thread$ * new_owner = next_thread( monitors[i] );
    474474                insert_unique( threads, thread_count, new_owner );
    475475        }
     
    501501        //Some more checking in debug
    502502        __cfaabi_dbg_debug_do(
    503                 $thread * this_thrd = active_thread();
     503                thread$ * this_thrd = active_thread();
    504504                if ( this.monitor_count != this_thrd->monitors.size ) {
    505505                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    555555
    556556        //Find the thread to run
    557         $thread * signallee = pop_head( this.blocked )->waiting_thread;
     557        thread$ * signallee = pop_head( this.blocked )->waiting_thread;
    558558        __set_owner( monitors, count, signallee );
    559559
     
    608608        // Create one!
    609609        __lock_size_t max = count_max( mask );
    610         $monitor * mon_storage[max];
     610        monitor$ * mon_storage[max];
    611611        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    612612        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    626626        {
    627627                // Check if the entry queue
    628                 $thread * next; int index;
     628                thread$ * next; int index;
    629629                [next, index] = search_entry_queue( mask, monitors, count );
    630630
     
    636636                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    637637
    638                                 $monitor * mon2dtor = accepted[0];
     638                                monitor$ * mon2dtor = accepted[0];
    639639                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    640640
     
    730730// Utilities
    731731
    732 static inline void __set_owner( $monitor * this, $thread * owner ) {
     732static inline void __set_owner( monitor$ * this, thread$ * owner ) {
    733733        /* paranoid */ verify( this->lock.lock );
    734734
     
    740740}
    741741
    742 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
     742static inline void __set_owner( monitor$ * monitors [], __lock_size_t count, thread$ * owner ) {
    743743        /* paranoid */ verify ( monitors[0]->lock.lock );
    744744        /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     
    753753}
    754754
    755 static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     755static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    756756        for( __lock_size_t i = 0; i < count; i++) {
    757757                storage[i]->mask = mask;
     
    759759}
    760760
    761 static inline void reset_mask( $monitor * this ) {
     761static inline void reset_mask( monitor$ * this ) {
    762762        this->mask.accepted = 0p;
    763763        this->mask.data = 0p;
     
    765765}
    766766
    767 static inline $thread * next_thread( $monitor * this ) {
     767static inline thread$ * next_thread( monitor$ * this ) {
    768768        //Check the signaller stack
    769769        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    781781        // No signaller thread
    782782        // Get the next thread in the entry_queue
    783         $thread * new_owner = pop_head( this->entry_queue );
     783        thread$ * new_owner = pop_head( this->entry_queue );
    784784        /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    785785        /* paranoid */ verify( !new_owner || new_owner->link.next == 0p );
     
    789789}
    790790
    791 static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
     791static inline bool is_accepted( monitor$ * this, const __monitor_group_t & group ) {
    792792        __acceptable_t * it = this->mask.data; // Optim
    793793        __lock_size_t count = this->mask.size;
     
    811811}
    812812
    813 static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     813static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    814814        for( __lock_size_t i = 0; i < count; i++) {
    815815                (criteria[i]){ monitors[i], waiter };
     
    819819}
    820820
    821 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     821static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    822822        for( __lock_size_t i = 0; i < count; i++) {
    823823                (criteria[i]){ monitors[i], waiter };
     
    835835}
    836836
    837 static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     837static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    838838        for( __lock_size_t i = 0; i < count; i++ ) {
    839839                __spinlock_t * l = &source[i]->lock;
     
    849849}
    850850
    851 static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
     851static inline void unlock_all( monitor$ * locks [], __lock_size_t count ) {
    852852        for( __lock_size_t i = 0; i < count; i++ ) {
    853853                unlock( locks[i]->lock );
     
    856856
    857857static inline void save(
    858         $monitor * ctx [],
     858        monitor$ * ctx [],
    859859        __lock_size_t count,
    860860        __attribute((unused)) __spinlock_t * locks [],
     
    869869
    870870static inline void restore(
    871         $monitor * ctx [],
     871        monitor$ * ctx [],
    872872        __lock_size_t count,
    873873        __spinlock_t * locks [],
     
    887887// 2 - Checks if all the monitors are ready to run
    888888//     if so return the thread to run
    889 static inline $thread * check_condition( __condition_criterion_t * target ) {
     889static inline thread$ * check_condition( __condition_criterion_t * target ) {
    890890        __condition_node_t * node = target->owner;
    891891        unsigned short count = node->count;
     
    910910
    911911static inline void brand_condition( condition & this ) {
    912         $thread * thrd = active_thread();
     912        thread$ * thrd = active_thread();
    913913        if( !this.monitors ) {
    914914                // __cfaabi_dbg_print_safe( "Branding\n" );
     
    916916                this.monitor_count = thrd->monitors.size;
    917917
    918                 this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     918                this.monitors = (monitor$ **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    919919                for( int i = 0; i < this.monitor_count; i++ ) {
    920920                        this.monitors[i] = thrd->monitors[i];
     
    923923}
    924924
    925 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
    926 
    927         __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
     925static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor$ * monitors [], __lock_size_t count ) {
     926
     927        __queue_t(thread$) & entry_queue = monitors[0]->entry_queue;
    928928
    929929        // For each thread in the entry-queue
    930         for(    $thread ** thrd_it = &entry_queue.head;
     930        for(    thread$ ** thrd_it = &entry_queue.head;
    931931                (*thrd_it) != 1p;
    932932                thrd_it = &(*thrd_it)->link.next
     
    972972}
    973973
    974 static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
     974static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ) {
    975975        __lock_size_t size = 0;
    976976        for( __lock_size_t i = 0; i < mask.size; i++ ) {
Note: See TracChangeset for help on using the changeset viewer.