Ignore:
Timestamp:
Apr 19, 2022, 3:00:04 PM (3 years ago)
Author:
m3zulfiq <m3zulfiq@…>
Branches:
ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
Children:
5b84a321
Parents:
ba897d21 (diff), bb7c77d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

added benchmark and evaluations chapter to thesis

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.cfa

    rba897d21 r2e9b59b  
    1919// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
    2020
     21#pragma GCC diagnostic push
     22#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
     23
    2124//C Includes
    2225#include <errno.h>
     
    2528#include <signal.h>
    2629#include <unistd.h>
     30
    2731extern "C" {
    2832        #include <sys/eventfd.h>
     
    3135
    3236//CFA Includes
    33 #include "kernel_private.hfa"
     37#include "kernel/private.hfa"
    3438#include "preemption.hfa"
    3539#include "strstream.hfa"
     
    4044#define __CFA_INVOKE_PRIVATE__
    4145#include "invoke.h"
     46#pragma GCC diagnostic pop
    4247
    4348#if !defined(__CFA_NO_STATISTICS__)
     
    127132static void __wake_one(cluster * cltr);
    128133
    129 static void idle_sleep(processor * proc, io_future_t & future, iovec & iov);
     134static void idle_sleep(processor * proc);
    130135static bool mark_idle (__cluster_proc_list & idles, processor & proc);
    131136static void mark_awake(__cluster_proc_list & idles, processor & proc);
    132137
    133 extern void __cfa_io_start( processor * );
    134 extern bool __cfa_io_drain( processor * );
    135 extern bool __cfa_io_flush( processor *, int min_comp );
    136 extern void __cfa_io_stop ( processor * );
    137 static inline bool __maybe_io_drain( processor * );
     138extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
     139extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
     140extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
    138141
    139142#if defined(CFA_WITH_IO_URING_IDLE)
     
    159162        verify(this);
    160163
    161         io_future_t future; // used for idle sleep when io_uring is present
    162         future.self.ptr = 1p;  // mark it as already fulfilled so we know if there is a pending request or not
    163         eventfd_t idle_val;
    164         iovec idle_iovec = { &idle_val, sizeof(idle_val) };
    165 
    166         __cfa_io_start( this );
     164        /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
     165        /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
     166
     167        // used for idle sleep when io_uring is present
     168        // mark it as already fulfilled so we know if there is a pending request or not
     169        this->idle_wctx.ftr->self.ptr = 1p;
    167170
    168171        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
     
    189192                for() {
    190193                        // Check if there is pending io
    191                         __maybe_io_drain( this );
     194                        __cfa_io_drain( this );
    192195
    193196                        // Try to get the next thread
     
    195198
    196199                        if( !readyThread ) {
     200                                // there is no point in holding submissions if we are idle
    197201                                __IO_STATS__(true, io.flush.idle++; )
    198                                 __cfa_io_flush( this, 0 );
     202                                __cfa_io_flush( this );
     203
     204                                // drain again in case something showed up
     205                                __cfa_io_drain( this );
    199206
    200207                                readyThread = __next_thread( this->cltr );
     
    202209
    203210                        if( !readyThread ) for(5) {
     211                                readyThread = __next_thread_slow( this->cltr );
     212
     213                                if( readyThread ) break;
     214
     215                                // It's unlikely we still I/O to submit, but the arbiter could
    204216                                __IO_STATS__(true, io.flush.idle++; )
    205 
    206                                 readyThread = __next_thread_slow( this->cltr );
    207 
    208                                 if( readyThread ) break;
    209 
    210                                 __cfa_io_flush( this, 0 );
     217                                __cfa_io_flush( this );
     218
     219                                // drain again in case something showed up
     220                                __cfa_io_drain( this );
    211221                        }
    212222
     
    231241                                }
    232242
    233                                 idle_sleep( this, future, idle_iovec );
     243                                idle_sleep( this );
    234244
    235245                                // We were woken up, remove self from idle
     
    251261                        if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    252262
    253                         if(this->io.pending && !this->io.dirty) {
     263                        if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
    254264                                __IO_STATS__(true, io.flush.dirty++; )
    255                                 __cfa_io_flush( this, 0 );
     265                                __cfa_io_flush( this );
    256266                        }
    257267                }
     
    259269                __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
    260270        }
    261 
    262         for(int i = 0; !available(future); i++) {
    263                 if(i > 1000) __cfaabi_dbg_write( "ERROR: kernel has bin spinning on a flush after exit loop.\n", 60);
    264                 __cfa_io_flush( this, 1 );
    265         }
    266 
    267         __cfa_io_stop( this );
    268271
    269272        post( this->terminated );
     
    634637
    635638        int fd = 1;
    636         if( __atomic_load_n(&fdp->fd, __ATOMIC_SEQ_CST) != 1 ) {
    637                 fd = __atomic_exchange_n(&fdp->fd, 1, __ATOMIC_RELAXED);
     639        if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
     640                fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
    638641        }
    639642
    640643        switch(fd) {
     644                __attribute__((unused)) int ret;
    641645        case 0:
    642646                // If the processor isn't ready to sleep then the exchange will already wake it up
     
    656660                // If the processor was ready to sleep, we need to wake it up with an actual write
    657661                val = 1;
    658                 eventfd_write( fd, val );
     662                ret = eventfd_write( fd, val );
     663                /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
    659664
    660665                #if !defined(__CFA_NO_STATISTICS__)
     
    677682        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    678683
    679         this->idle_wctx.fd = 1;
     684        this->idle_wctx.sem = 1;
     685
     686        this->idle_wctx.wake__time = rdtscl();
    680687
    681688        eventfd_t val;
    682689        val = 1;
    683         eventfd_write( this->idle_fd, val );
    684 
    685         /* paranoid */ verify( ! __preemption_enabled() );
    686 }
    687 
    688 static void idle_sleep(processor * this, io_future_t & future, iovec & iov) {
     690        __attribute__((unused)) int ret = eventfd_write( this->idle_wctx.evfd, val );
     691
     692        /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
     693        /* paranoid */ verify( ! __preemption_enabled() );
     694}
     695
     696static void idle_sleep(processor * this) {
     697        /* paranoid */ verify( this->idle_wctx.evfd != 1 );
     698        /* paranoid */ verify( this->idle_wctx.evfd != 2 );
     699
    689700        // Tell everyone we are ready to go do sleep
    690701        for() {
    691                 int expected = this->idle_wctx.fd;
     702                int expected = this->idle_wctx.sem;
    692703
    693704                // Someone already told us to wake-up! No time for a nap.
     
    695706
    696707                // Try to mark that we are going to sleep
    697                 if(__atomic_compare_exchange_n(&this->idle_wctx.fd, &expected, this->idle_fd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     708                if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
    698709                        // Every one agreed, taking a nap
    699710                        break;
     
    713724                {
    714725                        eventfd_t val;
    715                         ssize_t ret = read( this->idle_fd, &val, sizeof(val) );
     726                        ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
    716727                        if(ret < 0) {
    717728                                switch((int)errno) {
     
    735746                #endif
    736747        #else
    737                 // Do we already have a pending read
    738                 if(available(future)) {
    739                         // There is no pending read, we need to add one
    740                         reset(future);
    741 
    742                         __kernel_read(this, future, iov, this->idle_fd );
    743                 }
    744 
    745                 __cfa_io_flush( this, 1 );
     748                __cfa_io_idle( this );
    746749        #endif
    747750}
     
    750753        __STATS__(true, ready.sleep.halts++; )
    751754
    752         proc.idle_wctx.fd = 0;
     755        proc.idle_wctx.sem = 0;
    753756
    754757        /* paranoid */ verify( ! __preemption_enabled() );
     
    831834#endif
    832835
    833 static inline bool __maybe_io_drain( processor * proc ) {
    834         bool ret = false;
    835         #if defined(CFA_HAVE_LINUX_IO_URING_H)
    836                 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd);
    837 
    838                 // Check if we should drain the queue
    839                 $io_context * ctx = proc->io.ctx;
    840                 unsigned head = *ctx->cq.head;
    841                 unsigned tail = *ctx->cq.tail;
    842                 if(head == tail) return false;
    843                 ready_schedule_lock();
    844                 ret = __cfa_io_drain( proc );
    845                 ready_schedule_unlock();
    846         #endif
    847         return ret;
    848 }
     836
    849837
    850838//-----------------------------------------------------------------------------
     
    903891        void print_stats_now( cluster & this, int flags ) {
    904892                crawl_cluster_stats( this );
    905                 __print_stats( this.stats, this.print_stats, "Cluster", this.name, (void*)&this );
     893                __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
    906894        }
    907895#endif
Note: See TracChangeset for help on using the changeset viewer.