Changeset 6ff4507


Ignore:
Timestamp:
Sep 21, 2017, 12:31:48 PM (7 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
a843067
Parents:
7453a68
Message:

Disable Werror since new warnings appeared
Aesthetic refactoring in monitor.c
Monitors are now properly aggregated in waitfor
Monitors masks are now always saved and restore (TODO check if this is too much)
Insert_unique is now generic

Location:
src/libcfa
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/Makefile.am

    r7453a68 r6ff4507  
    3636         ${AM_V_GEN}@BACKEND_CC@ @CFA_FLAGS@ -D__CFA_DEBUG__ -O0 -c -o $@ $<
    3737
    38 EXTRA_FLAGS = -g -Wall -Werror -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
     38EXTRA_FLAGS = -g -Wall -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
    3939
    4040AM_CCASFLAGS = @CFA_FLAGS@
  • src/libcfa/Makefile.in

    r7453a68 r6ff4507  
    416416ARFLAGS = cr
    417417lib_LIBRARIES = $(am__append_1) $(am__append_2)
    418 EXTRA_FLAGS = -g -Wall -Werror -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
     418EXTRA_FLAGS = -g -Wall -Wno-unused-function -imacros libcfa-prelude.c @CFA_FLAGS@
    419419AM_CCASFLAGS = @CFA_FLAGS@
    420420headers = fstream iostream iterator limits rational stdlib \
  • src/libcfa/concurrency/monitor.c

    r7453a68 r6ff4507  
    2424// Forward declarations
    2525static inline void set_owner( monitor_desc * this, thread_desc * owner );
     26static inline void set_owner( monitor_desc ** storage, short count, thread_desc * owner );
     27static inline void set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     28
    2629static inline thread_desc * next_thread( monitor_desc * this );
    2730static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     
    3235static inline void unlock_all( monitor_desc ** locks, unsigned short count );
    3336
    34 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count );
    35 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
     37static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
     38static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
    3639
    3740static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    3841static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
    3942
    40 static inline thread_desc * check_condition( __condition_criterion_t * );
    41 static inline void brand_condition( condition * );
    42 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
    43 
     43static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     44static inline void                 brand_condition   ( condition * );
    4445static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
    4546
    46 static inline short count_max( const __waitfor_mask_t & mask );
    47 static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask );
    48 static inline void  set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
     47forall(dtype T | sized( T ))
     48static inline short insert_unique( T ** array, short & size, T * val );
     49static inline short count_max    ( const __waitfor_mask_t & mask );
     50static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
    4951
    5052//-----------------------------------------------------------------------------
    5153// Useful defines
    52 #define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack       */ \
    53         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    54         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    55         init( count, monitors, &waiter, criteria );               /* Link everything together                                          */ \
    56 
    57 #define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack       */ \
    58         __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                   */ \
    59         __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up          */ \
    60         init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack              */ \
    61 
    62 #define monitor_ctx( mons, cnt )              /* Define that create the necessary struct for internal/external scheduling operations */ \
    63         monitor_desc ** monitors = mons;        /* Save the targeted monitors                                                          */ \
    64         unsigned short count = cnt;             /* Save the count to a local variable                                                  */ \
    65         unsigned int recursions[ count ];       /* Save the current recursion levels to restore them later                             */ \
    66         spinlock *   locks     [ count ];       /* We need to pass-in an array of locks to BlockInternal                               */ \
     54#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
     55        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     56        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     57        init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
     58
     59#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
     60        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
     61        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
     62        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
     63
     64#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
     65        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     66        unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
     67        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     68        __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
     69        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
     70
     71#define monitor_save    save   ( monitors, count, locks, recursions, masks )
     72#define monitor_restore restore( monitors, count, locks, recursions, masks )
     73
     74#define blockAndWake( thrd, cnt )                               /* Create the necessary information to use the signaller stack                         */ \
     75        monitor_save;                                             /* Save monitor states                                                                 */ \
     76        BlockInternal( locks, count, thrd, cnt );                 /* Everything is ready to go to sleep                                                  */ \
     77        monitor_restore;                                          /* We are back, restore the owners and recursions                                      */ \
     78
    6779
    6880//-----------------------------------------------------------------------------
     
    277289        append( &this->blocked, &waiter );
    278290
    279         // Lock all monitors (aggregates the lock them as well)
     291        // Lock all monitors (aggregates the locks as well)
    280292        lock_all( monitors, locks, count );
    281293
    282294        // Find the next thread(s) to run
    283         unsigned short thread_count = 0;
     295        short thread_count = 0;
    284296        thread_desc * threads[ count ];
    285297        for(int i = 0; i < count; i++) {
     
    290302        for( int i = 0; i < count; i++) {
    291303                thread_desc * new_owner = next_thread( monitors[i] );
    292                 thread_count = insert_unique( threads, thread_count, new_owner );
    293         }
    294 
    295         // Save monitor state
    296         save_recursion( monitors, recursions, count );
    297 
    298         // Everything is ready to go to sleep
    299         BlockInternal( locks, count, threads, thread_count );
    300 
    301 
    302         // WE WOKE UP
    303 
    304 
    305         // We are back, restore the owners and recursions
    306         lock_all( locks, count );
    307         restore_recursion( monitors, recursions, count );
    308         unlock_all( locks, count );
     304                insert_unique( threads, thread_count, new_owner );
     305        }
     306
     307        blockAndWake( threads, thread_count );
    309308}
    310309
     
    368367
    369368        //save contexts
    370         save_recursion( monitors, recursions, count );
     369        monitor_save;
    371370
    372371        //Find the thread to run
    373372        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
    374         for(int i = 0; i < count; i++) {
    375                 set_owner( monitors[i], signallee );
    376         }
     373        set_owner( monitors, count, signallee );
    377374
    378375        //Everything is ready to go to sleep
     
    383380
    384381
    385         //We are back, restore the owners and recursions
    386         lock_all( locks, count );
    387         restore_recursion( monitors, recursions, count );
    388         unlock_all( locks, count );
     382        //We are back, restore the masks and recursions
     383        monitor_restore;
    389384
    390385        return true;
     
    424419        monitor_ctx( mon_storage, actual_count );
    425420
    426         // Lock all monitors (aggregates the lock them as well)
     421        // Lock all monitors (aggregates the locks as well)
    427422        lock_all( monitors, locks, count );
    428423
     
    437432                        }
    438433                        else {
    439                                 save_recursion( monitors, recursions, count );
    440 
    441                                 // Everything is ready to go to sleep
    442                                 BlockInternal( locks, count, &next, 1 );
    443 
    444 
    445                                 //WE WOKE UP
    446 
    447 
    448                                 //We are back, restore the owners and recursions
    449                                 lock_all( locks, count );
    450                                 restore_recursion( monitors, recursions, count );
    451                                 unlock_all( locks, count );
     434                                blockAndWake( &next, 1 );
    452435                        }
    453436
     
    463446
    464447
    465         save_recursion( monitors, recursions, count );
     448        monitor_save;
    466449        set_mask( monitors, count, mask );
    467450
    468 
    469         // Everything is ready to go to sleep
    470         BlockInternal( locks, count );
    471 
    472 
     451        BlockInternal( locks, count );       // Everything is ready to go to sleep
    473452        //WE WOKE UP
    474 
    475 
    476         //We are back, restore the owners and recursions
    477         lock_all( locks, count );
    478         restore_recursion( monitors, recursions, count );
    479         unlock_all( locks, count );
    480 
    481         return mask.accepted;
     453        monitor_restore;                     //We are back, restore the masks and recursions
    482454}
    483455
     
    491463        //We are passing the monitor to someone else, which means recursion level is not 0
    492464        this->recursion = owner ? 1 : 0;
     465}
     466
     467static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
     468        for( int i = 0; i < count; i++ ) {
     469                set_owner( monitors[i], owner );
     470        }
     471}
     472
     473static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
     474        for(int i = 0; i < count; i++) {
     475                storage[i]->mask = mask;
     476        }
    493477}
    494478
     
    513497}
    514498
     499static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     500        __acceptable_t * it = this->mask.clauses; // Optim
     501        int count = this->mask.size;
     502
     503        // Check if there are any acceptable functions
     504        if( !it ) return -1;
     505
     506        // If this isn't the first monitor to test this, there is no reason to repeat the test.
     507        if( this != group[0] ) return group[0]->mask.accepted >= 0;
     508
     509        // For all acceptable functions check if this is the current function.
     510        for( short i = 0; i < count; i++, it++ ) {
     511                if( *it == group ) {
     512                        *this->mask.accepted = i;
     513                        return true;
     514                }
     515        }
     516
     517        // No function matched
     518        return false;
     519}
     520
    515521static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
    516522        for(int i = 0; i < count; i++) {
     
    556562}
    557563
    558 
    559 static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count ) {
     564static inline void save   ( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
    560565        for( int i = 0; i < count; i++ ) {
    561566                recursions[i] = ctx[i]->recursion;
    562         }
    563 }
    564 
    565 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ) {
     567                masks[i]      = ctx[i]->mask;
     568        }
     569}
     570
     571static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
     572        lock_all( locks, count );
    566573        for( int i = 0; i < count; i++ ) {
    567574                ctx[i]->recursion = recursions[i];
    568         }
     575                ctx[i]->mask      = masks[i];
     576        }
     577        unlock_all( locks, count );
    569578}
    570579
     
    607616                }
    608617        }
    609 }
    610 
    611 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ) {
    612         if( !val ) return end;
    613 
    614         for(int i = 0; i <= end; i++) {
    615                 if( thrds[i] == val ) return end;
    616         }
    617 
    618         thrds[end] = val;
    619         return end + 1;
    620 }
    621 
    622 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
    623         __acceptable_t * it = this->mask.clauses; // Optim
    624         int count = this->mask.size;
    625 
    626         // Check if there are any acceptable functions
    627         if( !it ) return -1;
    628 
    629         // If this isn't the first monitor to test this, there is no reason to repeat the test.
    630         if( this != group[0] ) return group[0]->mask.accepted >= 0;
    631 
    632         // For all acceptable functions check if this is the current function.
    633         for( short i = 0; i < count; i++, it++ ) {
    634                 if( *it == group ) {
    635                         *this->mask.accepted = i;
    636                         return true;
    637                 }
    638         }
    639 
    640         // No function matched
    641         return false;
    642618}
    643619
     
    668644}
    669645
     646forall(dtype T | sized( T ))
     647static inline short insert_unique( T ** array, short & size, T * val ) {
     648        if( !val ) return size;
     649
     650        for(int i = 0; i <= size; i++) {
     651                if( array[i] == val ) return size;
     652        }
     653
     654        array[size] = val;
     655        size = size + 1;
     656        return size;
     657}
     658
    670659static inline short count_max( const __waitfor_mask_t & mask ) {
    671660        short max = 0;
     
    677666
    678667static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
    679         #warning function not implemented
    680         return 0;
    681 }
    682 
    683 static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
    684         for(int i = 0; i < count; i++) {
    685                 storage[i]->mask = mask;
    686         }
    687 }
    688 
     668        short size = 0;
     669        for( int i = 0; i < mask.size; i++ ) {
     670                for( int j = 0; j < mask.clauses[i].size; j++) {
     671                        insert_unique( storage, size, mask.clauses[i].list[j] );
     672                }
     673        }
     674        qsort( storage, size );
     675        return size;
     676}
    689677
    690678void ?{}( __condition_blocked_queue_t & this ) {
Note: See TracChangeset for help on using the changeset viewer.