Ignore:
Timestamp:
Apr 25, 2021, 10:11:27 PM (5 months ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
arm-eh, jacob/cs343-translation, master, new-ast-unique-expr
Children:
24711a3, 5456537
Parents:
9b71679 (diff), c323837 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    r9b71679 raec68b6  
    110110static $thread * __next_thread(cluster * this);
    111111static $thread * __next_thread_slow(cluster * this);
     112static inline bool __must_unpark( $thread * thrd ) __attribute((nonnull(1)));
    112113static void __run_thread(processor * this, $thread * dst);
    113114static void __wake_one(cluster * cltr);
     
    118119
    119120extern void __cfa_io_start( processor * );
    120 extern void __cfa_io_drain( processor * );
     121extern bool __cfa_io_drain( processor * );
    121122extern void __cfa_io_flush( processor * );
    122123extern void __cfa_io_stop ( processor * );
    123 static inline void __maybe_io_drain( processor * );
     124static inline bool __maybe_io_drain( processor * );
    124125
    125126extern void __disable_interrupts_hard();
    126127extern void __enable_interrupts_hard();
     128
     129static inline void __disable_interrupts_checked() {
     130        /* paranoid */ verify( __preemption_enabled() );
     131        disable_interrupts();
     132        /* paranoid */ verify( ! __preemption_enabled() );
     133}
     134
     135static inline void __enable_interrupts_checked( bool poll = true ) {
     136        /* paranoid */ verify( ! __preemption_enabled() );
     137        enable_interrupts( poll );
     138        /* paranoid */ verify( __preemption_enabled() );
     139}
    127140
    128141//=============================================================================================
     
    336349                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    337350                        // The thread was preempted, reschedule it and reset the flag
    338                         __schedule_thread( thrd_dst );
     351                        schedule_thread$( thrd_dst );
    339352                        break RUNNING;
    340353                }
     
    426439        /* paranoid */ verify( ! __preemption_enabled() );
    427440        /* paranoid */ verify( kernelTLS().this_proc_id );
     441        /* paranoid */ verify( ready_schedule_islocked());
    428442        /* paranoid */ verify( thrd );
    429443        /* paranoid */ verify( thrd->state != Halted );
     
    444458        struct cluster * cl = thrd->curr_cluster;
    445459
    446         ready_schedule_lock();
    447                 // push the thread to the cluster ready-queue
    448                 push( cl, thrd );
    449 
    450                 // variable thrd is no longer safe to use
    451 
    452                 // wake the cluster using the save variable.
    453                 __wake_one( cl );
    454         ready_schedule_unlock();
     460        // push the thread to the cluster ready-queue
     461        push( cl, thrd );
     462
     463        // variable thrd is no longer safe to use
     464        thrd = 0xdeaddeaddeaddeadp;
     465
     466        // wake the cluster using the save variable.
     467        __wake_one( cl );
    455468
    456469        #if !defined(__CFA_NO_STATISTICS__)
     
    465478        #endif
    466479
    467         /* paranoid */ verify( ! __preemption_enabled() );
     480        /* paranoid */ verify( ready_schedule_islocked());
     481        /* paranoid */ verify( ! __preemption_enabled() );
     482}
     483
     484void schedule_thread$( $thread * thrd ) {
     485        ready_schedule_lock();
     486                __schedule_thread( thrd );
     487        ready_schedule_unlock();
    468488}
    469489
     
    496516}
    497517
    498 void unpark( $thread * thrd ) {
    499         if( !thrd ) return;
    500 
     518static inline bool __must_unpark( $thread * thrd ) {
    501519        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    502520        switch(old_ticket) {
    503521                case TICKET_RUNNING:
    504522                        // Wake won the race, the thread will reschedule/rerun itself
    505                         break;
     523                        return false;
    506524                case TICKET_BLOCKED:
    507525                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
    508526                        /* paranoid */ verify( thrd->state == Blocked );
    509 
    510                         {
    511                                 /* paranoid */ verify( publicTLS_get(this_proc_id) );
    512                                 disable_interrupts();
    513 
    514                                 /* paranoid */ verify( ! __preemption_enabled() );
    515 
    516                                 // Wake lost the race,
    517                                 __schedule_thread( thrd );
    518 
    519                                 /* paranoid */ verify( ! __preemption_enabled() );
    520 
    521                                 enable_interrupts_noPoll();
    522                                 /* paranoid */ verify( publicTLS_get(this_proc_id) );
    523                         }
    524 
    525                         break;
     527                        return true;
    526528                default:
    527529                        // This makes no sense, something is wrong abort
     
    530532}
    531533
     534void unpark( $thread * thrd ) {
     535        if( !thrd ) return;
     536
     537        if(__must_unpark(thrd)) {
     538                disable_interrupts();
     539                        // Wake lost the race,
     540                        schedule_thread$( thrd );
     541                enable_interrupts(false);
     542        }
     543}
     544
    532545void park( void ) {
    533         /* paranoid */ verify( __preemption_enabled() );
    534         disable_interrupts();
    535         /* paranoid */ verify( ! __preemption_enabled() );
    536         /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
    537 
    538         returnToKernel();
    539 
    540         /* paranoid */ verify( ! __preemption_enabled() );
    541         enable_interrupts( __cfaabi_dbg_ctx );
    542         /* paranoid */ verify( __preemption_enabled() );
     546        __disable_interrupts_checked();
     547                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
     548                returnToKernel();
     549        __enable_interrupts_checked();
    543550
    544551}
     
    580587// KERNEL ONLY
    581588bool force_yield( __Preemption_Reason reason ) {
    582         /* paranoid */ verify( __preemption_enabled() );
    583         disable_interrupts();
    584         /* paranoid */ verify( ! __preemption_enabled() );
    585 
    586         $thread * thrd = kernelTLS().this_thread;
    587         /* paranoid */ verify(thrd->state == Active);
    588 
    589         // SKULLDUGGERY: It is possible that we are preempting this thread just before
    590         // it was going to park itself. If that is the case and it is already using the
    591         // intrusive fields then we can't use them to preempt the thread
    592         // If that is the case, abandon the preemption.
    593         bool preempted = false;
    594         if(thrd->link.next == 0p) {
    595                 preempted = true;
    596                 thrd->preempted = reason;
    597                 returnToKernel();
    598         }
    599 
    600         /* paranoid */ verify( ! __preemption_enabled() );
    601         enable_interrupts_noPoll();
    602         /* paranoid */ verify( __preemption_enabled() );
    603 
     589        __disable_interrupts_checked();
     590                $thread * thrd = kernelTLS().this_thread;
     591                /* paranoid */ verify(thrd->state == Active);
     592
     593                // SKULLDUGGERY: It is possible that we are preempting this thread just before
     594                // it was going to park itself. If that is the case and it is already using the
     595                // intrusive fields then we can't use them to preempt the thread
     596                // If that is the case, abandon the preemption.
     597                bool preempted = false;
     598                if(thrd->link.next == 0p) {
     599                        preempted = true;
     600                        thrd->preempted = reason;
     601                        returnToKernel();
     602                }
     603        __enable_interrupts_checked( false );
    604604        return preempted;
    605605}
     
    646646        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    647647
    648         disable_interrupts();
     648        __disable_interrupts_checked();
    649649                /* paranoid */ verify( ! __preemption_enabled() );
    650650                eventfd_t val;
    651651                val = 1;
    652652                eventfd_write( this->idle, val );
    653         enable_interrupts( __cfaabi_dbg_ctx );
     653        __enable_interrupts_checked();
    654654}
    655655
     
    743743#endif
    744744
    745 static inline void __maybe_io_drain( processor * proc ) {
     745static inline bool __maybe_io_drain( processor * proc ) {
    746746        #if defined(CFA_HAVE_LINUX_IO_URING_H)
    747747                __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
     
    751751                unsigned head = *ctx->cq.head;
    752752                unsigned tail = *ctx->cq.tail;
    753                 if(head != tail) __cfa_io_drain( proc );
     753                if(head == tail) return false;
     754                return __cfa_io_drain( proc );
    754755        #endif
    755756}
Note: See TracChangeset for help on using the changeset viewer.