Ignore:
Timestamp:
Feb 24, 2020, 2:21:03 PM (2 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
arm-eh, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr
Children:
959f6ad
Parents:
0f2c555
Message:

Changed descriptors for concurrency to use $ prefix instead of trailing _desc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/monitor.cfa

    r0f2c555 rac2b598  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // monitor_desc.c --
     7// $monitor.c --
    88//
    99// Author           : Thierry Delisle
     
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void __set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void __set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    31 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    32 static inline void reset_mask( monitor_desc * this );
    33 
    34 static inline thread_desc * next_thread( monitor_desc * this );
    35 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     29static inline void __set_owner ( $monitor * this, $thread * owner );
     30static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
     31static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     32static inline void reset_mask( $monitor * this );
     33
     34static inline $thread * next_thread( $monitor * this );
     35static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
    3636
    3737static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    38 static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     38static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    3939static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    40 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    41 
    42 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    43 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    44 
    45 static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    46 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 
    48 static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     40static inline void unlock_all( $monitor * locks [], __lock_size_t count );
     41
     42static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     43static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     44
     45static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     46static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47
     48static inline $thread *        check_condition   ( __condition_criterion_t * );
    4949static inline void                 brand_condition   ( condition & );
    50 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     50static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
    5151
    5252forall(dtype T | sized( T ))
    5353static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5454static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    55 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
     55static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
    5656
    5757//-----------------------------------------------------------------------------
     
    6868
    6969#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    70         monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     70        $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7171        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7272        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8181// Enter/Leave routines
    8282// Enter single monitor
    83 static void __enter( monitor_desc * this, const __monitor_group_t & group ) {
     83static void __enter( $monitor * this, const __monitor_group_t & group ) {
    8484        // Lock the monitor spinlock
    8585        lock( this->lock __cfaabi_dbg_ctx2 );
    8686        // Interrupts disable inside critical section
    87         thread_desc * thrd = kernelTLS.this_thread;
     87        $thread * thrd = kernelTLS.this_thread;
    8888
    8989        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     
    137137}
    138138
    139 static void __dtor_enter( monitor_desc * this, fptr_t func ) {
     139static void __dtor_enter( $monitor * this, fptr_t func ) {
    140140        // Lock the monitor spinlock
    141141        lock( this->lock __cfaabi_dbg_ctx2 );
    142142        // Interrupts disable inside critical section
    143         thread_desc * thrd = kernelTLS.this_thread;
     143        $thread * thrd = kernelTLS.this_thread;
    144144
    145145        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     
    164164
    165165        __lock_size_t count = 1;
    166         monitor_desc ** monitors = &this;
     166        $monitor ** monitors = &this;
    167167        __monitor_group_t group = { &this, 1, func };
    168168        if( is_accepted( this, group) ) {
     
    216216
    217217// Leave single monitor
    218 void __leave( monitor_desc * this ) {
     218void __leave( $monitor * this ) {
    219219        // Lock the monitor spinlock
    220220        lock( this->lock __cfaabi_dbg_ctx2 );
     
    236236
    237237        // Get the next thread, will be null on low contention monitor
    238         thread_desc * new_owner = next_thread( this );
     238        $thread * new_owner = next_thread( this );
    239239
    240240        // Check the new owner is consistent with who we wake-up
     
    251251
    252252// Leave single monitor for the last time
    253 void __dtor_leave( monitor_desc * this ) {
     253void __dtor_leave( $monitor * this ) {
    254254        __cfaabi_dbg_debug_do(
    255255                if( TL_GET( this_thread ) != this->owner ) {
     
    267267        // Should never return
    268268        void __cfactx_thrd_leave() {
    269                 thread_desc * thrd = TL_GET( this_thread );
    270                 monitor_desc * this = &thrd->self_mon;
     269                $thread * thrd = TL_GET( this_thread );
     270                $monitor * this = &thrd->self_mon;
    271271
    272272                // Lock the monitor now
     
    287287
    288288                // Fetch the next thread, can be null
    289                 thread_desc * new_owner = next_thread( this );
     289                $thread * new_owner = next_thread( this );
    290290
    291291                // Release the monitor lock
     
    317317// Leave multiple monitor
    318318// relies on the monitor array being sorted
    319 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     319static inline void leave($monitor * monitors [], __lock_size_t count) {
    320320        for( __lock_size_t i = count - 1; i >= 0; i--) {
    321321                __leave( monitors[i] );
     
    325325// Ctor for monitor guard
    326326// Sorts monitors before entering
    327 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    328         thread_desc * thrd = TL_GET( this_thread );
     327void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
     328        $thread * thrd = TL_GET( this_thread );
    329329
    330330        // Store current array
     
    366366// Ctor for monitor guard
    367367// Sorts monitors before entering
    368 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     368void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) {
    369369        // optimization
    370         thread_desc * thrd = TL_GET( this_thread );
     370        $thread * thrd = TL_GET( this_thread );
    371371
    372372        // Store current array
     
    393393//-----------------------------------------------------------------------------
    394394// Internal scheduling types
    395 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     395void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    396396        this.waiting_thread = waiting_thread;
    397397        this.count = count;
     
    407407}
    408408
    409 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
     409void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
    410410        this.ready  = false;
    411411        this.target = target;
     
    441441        // Find the next thread(s) to run
    442442        __lock_size_t thread_count = 0;
    443         thread_desc * threads[ count ];
     443        $thread * threads[ count ];
    444444        __builtin_memset( threads, 0, sizeof( threads ) );
    445445
     
    449449        // Remove any duplicate threads
    450450        for( __lock_size_t i = 0; i < count; i++) {
    451                 thread_desc * new_owner = next_thread( monitors[i] );
     451                $thread * new_owner = next_thread( monitors[i] );
    452452                insert_unique( threads, thread_count, new_owner );
    453453        }
     
    479479        //Some more checking in debug
    480480        __cfaabi_dbg_debug_do(
    481                 thread_desc * this_thrd = TL_GET( this_thread );
     481                $thread * this_thrd = TL_GET( this_thread );
    482482                if ( this.monitor_count != this_thrd->monitors.size ) {
    483483                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    533533
    534534        //Find the thread to run
    535         thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
     535        $thread * signallee = pop_head( this.blocked )->waiting_thread;
    536536        /* paranoid */ verify( signallee->next == 0p );
    537537        __set_owner( monitors, count, signallee );
     
    587587        // Create one!
    588588        __lock_size_t max = count_max( mask );
    589         monitor_desc * mon_storage[max];
     589        $monitor * mon_storage[max];
    590590        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    591591        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    605605        {
    606606                // Check if the entry queue
    607                 thread_desc * next; int index;
     607                $thread * next; int index;
    608608                [next, index] = search_entry_queue( mask, monitors, count );
    609609
     
    615615                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    616616
    617                                 monitor_desc * mon2dtor = accepted[0];
     617                                $monitor * mon2dtor = accepted[0];
    618618                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    619619
     
    709709// Utilities
    710710
    711 static inline void __set_owner( monitor_desc * this, thread_desc * owner ) {
     711static inline void __set_owner( $monitor * this, $thread * owner ) {
    712712        /* paranoid */ verify( this->lock.lock );
    713713
     
    719719}
    720720
    721 static inline void __set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
     721static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
    722722        /* paranoid */ verify ( monitors[0]->lock.lock );
    723723        /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     
    732732}
    733733
    734 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     734static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    735735        for( __lock_size_t i = 0; i < count; i++) {
    736736                storage[i]->mask = mask;
     
    738738}
    739739
    740 static inline void reset_mask( monitor_desc * this ) {
     740static inline void reset_mask( $monitor * this ) {
    741741        this->mask.accepted = 0p;
    742742        this->mask.data = 0p;
     
    744744}
    745745
    746 static inline thread_desc * next_thread( monitor_desc * this ) {
     746static inline $thread * next_thread( $monitor * this ) {
    747747        //Check the signaller stack
    748748        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    760760        // No signaller thread
    761761        // Get the next thread in the entry_queue
    762         thread_desc * new_owner = pop_head( this->entry_queue );
     762        $thread * new_owner = pop_head( this->entry_queue );
    763763        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    764764        /* paranoid */ verify( !new_owner || new_owner->next == 0p );
     
    768768}
    769769
    770 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     770static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
    771771        __acceptable_t * it = this->mask.data; // Optim
    772772        __lock_size_t count = this->mask.size;
     
    790790}
    791791
    792 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     792static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    793793        for( __lock_size_t i = 0; i < count; i++) {
    794794                (criteria[i]){ monitors[i], waiter };
     
    798798}
    799799
    800 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     800static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    801801        for( __lock_size_t i = 0; i < count; i++) {
    802802                (criteria[i]){ monitors[i], waiter };
     
    814814}
    815815
    816 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     816static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    817817        for( __lock_size_t i = 0; i < count; i++ ) {
    818818                __spinlock_t * l = &source[i]->lock;
     
    828828}
    829829
    830 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     830static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
    831831        for( __lock_size_t i = 0; i < count; i++ ) {
    832832                unlock( locks[i]->lock );
     
    835835
    836836static inline void save(
    837         monitor_desc * ctx [],
     837        $monitor * ctx [],
    838838        __lock_size_t count,
    839839        __attribute((unused)) __spinlock_t * locks [],
     
    848848
    849849static inline void restore(
    850         monitor_desc * ctx [],
     850        $monitor * ctx [],
    851851        __lock_size_t count,
    852852        __spinlock_t * locks [],
     
    866866// 2 - Checks if all the monitors are ready to run
    867867//     if so return the thread to run
    868 static inline thread_desc * check_condition( __condition_criterion_t * target ) {
     868static inline $thread * check_condition( __condition_criterion_t * target ) {
    869869        __condition_node_t * node = target->owner;
    870870        unsigned short count = node->count;
     
    889889
    890890static inline void brand_condition( condition & this ) {
    891         thread_desc * thrd = TL_GET( this_thread );
     891        $thread * thrd = TL_GET( this_thread );
    892892        if( !this.monitors ) {
    893893                // __cfaabi_dbg_print_safe( "Branding\n" );
     
    895895                this.monitor_count = thrd->monitors.size;
    896896
    897                 this.monitors = (monitor_desc **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     897                this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    898898                for( int i = 0; i < this.monitor_count; i++ ) {
    899899                        this.monitors[i] = thrd->monitors[i];
     
    902902}
    903903
    904 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
    905 
    906         __queue_t(thread_desc) & entry_queue = monitors[0]->entry_queue;
     904static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
     905
     906        __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
    907907
    908908        // For each thread in the entry-queue
    909         for(    thread_desc ** thrd_it = &entry_queue.head;
     909        for(    $thread ** thrd_it = &entry_queue.head;
    910910                *thrd_it != 1p;
    911911                thrd_it = &(*thrd_it)->next
     
    951951}
    952952
    953 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     953static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
    954954        __lock_size_t size = 0;
    955955        for( __lock_size_t i = 0; i < mask.size; i++ ) {
Note: See TracChangeset for help on using the changeset viewer.