Changeset 4069faad for libcfa


Ignore:
Timestamp:
May 1, 2020, 12:37:30 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
d45ed83
Parents:
9987d79
Message:

Fix error in benchmark where the wrong fd was used.
Changed behcnmark to use seperate cluster for I/O.
Changed some debug prints to use new versions with groups.
Fixed halting race condition leading to deadlock.

Location:
libcfa
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • libcfa/prelude/defines.hfa.in

    r9987d79 r4069faad  
    1616#undef HAVE_LINUX_IO_URING_H
    1717
    18 #define __CFA_IO_POLLING_USER__
     18// #define __CFA_IO_POLLING_USER__
    1919// #define __CFA_IO_POLLING_KERNEL__
  • libcfa/src/bits/locks.hfa

    r9987d79 r4069faad  
    113113
    114114        struct __bin_sem_t {
    115                 bool                    signaled;
    116115                pthread_mutex_t         lock;
    117116                pthread_cond_t          cond;
     117                int                     val;
    118118        };
    119119
    120120        static inline void ?{}(__bin_sem_t & this) with( this ) {
    121                 signaled = false;
    122121                pthread_mutex_init(&lock, NULL);
    123122                pthread_cond_init (&cond, NULL);
     123                val = 0;
    124124        }
    125125
     
    132132                verify(__cfaabi_dbg_in_kernel());
    133133                pthread_mutex_lock(&lock);
    134                         if(!signaled) {   // this must be a loop, not if!
     134                        while(val < 1) {
    135135                                pthread_cond_wait(&cond, &lock);
    136136                        }
    137                         signaled = false;
     137                        val -= 1;
    138138                pthread_mutex_unlock(&lock);
    139139        }
    140140
    141141        static inline bool post(__bin_sem_t & this) with( this ) {
     142                bool needs_signal = false;
     143
    142144                pthread_mutex_lock(&lock);
    143                         bool needs_signal = !signaled;
    144                         signaled = true;
     145                        if(val < 1) {
     146                                val += 1;
     147                                pthread_cond_signal(&cond);
     148                                needs_signal = true;
     149                        }
    145150                pthread_mutex_unlock(&lock);
    146 
    147                 if (needs_signal) pthread_cond_signal(&cond);
    148151
    149152                return needs_signal;
  • libcfa/src/concurrency/io.cfa

    r9987d79 r4069faad  
    1414//
    1515
     16// #define __CFA_DEBUG_PRINT_IO__
     17
    1618#include "kernel.hfa"
    1719
     
    210212        void __kernel_io_finish_start( cluster & this ) {
    211213                #if defined(__CFA_IO_POLLING_USER__)
    212                         (this.io.poller.fast){ this };
     214                        __cfadbg_print_safe(io, "Kernel I/O : Creating fast poller for cluter %p\n", &this);
     215                        (this.io.poller.fast){ "Fast IO Poller", this };
    213216                        __thrd_start( this.io.poller.fast, main );
    214217                #endif
    215218
    216219                // Create the poller thread
     220                __cfadbg_print_safe(io, "Kernel I/O : Creating slow poller for cluter %p\n", &this);
    217221                this.io.poller.slow.stack = __create_pthread( &this.io.poller.slow.kthrd, __io_poller_slow, &this );
    218222        }
    219223
    220224        void __kernel_io_prepare_stop( cluster & this ) {
     225                __cfadbg_print_safe(io, "Kernel I/O : Stopping pollers for cluster\n", &this);
    221226                // Notify the poller thread of the shutdown
    222227                __atomic_store_n(&this.io.done, true, __ATOMIC_SEQ_CST);
     
    233238                free( this.io.poller.slow.stack );
    234239
     240                __cfadbg_print_safe(io, "Kernel I/O : Slow poller stopped for cluster\n", &this);
     241
    235242                #if defined(__CFA_IO_POLLING_USER__)
    236243                        // unpark the fast io_poller
     
    238245
    239246                        ^(this.io.poller.fast){};
     247
     248                        __cfadbg_print_safe(io, "Kernel I/O : Fast poller stopped for cluster\n", &this);
    240249                #endif
    241250        }
     
    324333
    325334                        struct io_user_data * data = (struct io_user_data *)cqe.user_data;
    326                         // __cfaabi_bits_print_safe( STDERR_FILENO, "Performed reading io cqe %p, result %d for %p\n", data, cqe.res, data->thrd );
     335                        __cfadbg_print_safe( io, "Kernel I/O : Performed reading io cqe %p, result %d for %p\n", data, cqe.res, data->thrd );
    327336
    328337                        data->result = cqe.res;
     
    369378                                int count = __drain_io( ring, &mask, 1, true );
    370379                                if(count > 0) {
     380                                        __cfadbg_print_safe(io, "Kernel I/O : Moving to ring %p to fast poller\n", &ring);
    371381                                        __unpark( &ring.poller.fast.thrd __cfaabi_dbg_ctx2 );
    372382                                        wait( ring.poller.sem );
     
    398408                                else {
    399409                                        // We didn't get anything baton pass to the slow poller
     410                                        __cfadbg_print_safe(io, "Kernel I/O : Moving to ring %p to slow poller\n", &this.ring);
    400411                                        post( this.ring->poller.sem );
    401412                                        park( __cfaabi_dbg_ctx );
     
    464475                // Submit however, many entries need to be submitted
    465476                int ret = syscall( __NR_io_uring_enter, ring.fd, 1, 0, 0, 0p, 0);
    466                 // __cfaabi_bits_print_safe( STDERR_FILENO, "Performed io_submit, returned %d\n", ret );
    467477                if( ret < 0 ) {
    468478                        switch((int)errno) {
     
    481491                // Make sure that idx was submitted
    482492                // Be careful to not get false positive if we cycled the entire list or that someone else submitted for us
     493                __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret );
    483494        }
    484495
  • libcfa/src/concurrency/kernel.cfa

    r9987d79 r4069faad  
    1515
    1616#define __cforall_thread__
     17// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
    1718
    1819//C Includes
     
    4041#include "invoke.h"
    4142
     43
    4244//-----------------------------------------------------------------------------
    4345// Some assembly required
     
    230232        idle{};
    231233
    232         __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
     234        __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
    233235
    234236        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
    235237
    236         __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
     238        __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
    237239}
    238240
    239241void ^?{}(processor & this) with( this ){
    240242        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
    241                 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
     243                __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
    242244
    243245                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
     
    289291        verify(this);
    290292
    291         __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
     293        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
    292294
    293295        doregister(this->cltr, this);
     
    297299                preemption_scope scope = { this };
    298300
    299                 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
     301                __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
    300302
    301303                $thread * readyThread = 0p;
     
    323325                }
    324326
    325                 __cfaabi_dbg_print_safe("Kernel : core %p stopping\n", this);
     327                __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
    326328        }
    327329
     
    330332        V( this->terminated );
    331333
    332         __cfaabi_dbg_print_safe("Kernel : core %p terminated\n", this);
     334        __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
    333335
    334336        // HACK : the coroutine context switch expects this_thread to be set
     
    475477
    476478        //We now have a proper context from which to schedule threads
    477         __cfaabi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
     479        __cfadbg_print_safe(runtime_core, "Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
    478480
    479481        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
     
    486488
    487489        // Main routine of the core returned, the core is now fully terminated
    488         __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
     490        __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
    489491
    490492        return 0p;
     
    717719static void __kernel_startup(void) {
    718720        verify( ! kernelTLS.preemption_state.enabled );
    719         __cfaabi_dbg_print_safe("Kernel : Starting\n");
     721        __cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
    720722
    721723        __page_size = sysconf( _SC_PAGESIZE );
     
    728730        (*mainCluster){"Main Cluster"};
    729731
    730         __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
     732        __cfadbg_print_safe(runtime_core, "Kernel : Main cluster ready\n");
    731733
    732734        // Start by initializing the main thread
     
    738740        (*mainThread){ &info };
    739741
    740         __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
     742        __cfadbg_print_safe(runtime_core, "Kernel : Main thread ready\n");
    741743
    742744
     
    759761
    760762                runner{ &this };
    761                 __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
     763                __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
    762764        }
    763765
     
    834836        ^(__cfa_dbg_global_clusters.lock){};
    835837
    836         __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
     838        __cfadbg_print_safe(runtime_core, "Kernel : Shutdown complete\n");
    837839}
    838840
     
    859861
    860862        // We are ready to sleep
    861         __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
     863        __cfadbg_print_safe(runtime_core, "Kernel : Processor %p ready to sleep\n", this);
    862864        wait( idle );
    863865
    864866        // We have woken up
    865         __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
     867        __cfadbg_print_safe(runtime_core, "Kernel : Processor %p woke up and ready to run\n", this);
    866868
    867869        // Get ourself off the idle list
     
    879881static bool __wake_one(cluster * this, __attribute__((unused)) bool force) {
    880882        // if we don't want to force check if we know it's false
    881         if( !this->idles.head && !force ) return false;
     883        // if( !this->idles.head && !force ) return false;
    882884
    883885        // First, lock the cluster idle
     
    892894
    893895        // Wake them up
     896        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this->idles.head);
    894897        post( this->idles.head->idle );
    895898
     
    901904// Unconditionnaly wake a thread
    902905static bool __wake_proc(processor * this) {
     906        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    903907        return post( this->idle );
    904908}
Note: See TracChangeset for help on using the changeset viewer.