Changeset 78a580d


Ignore:
Timestamp:
Mar 22, 2022, 3:28:29 PM (2 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
Children:
ff7b2de
Parents:
8f01ad71
Message:

I/O now updates the timestamps when draining.
Timestamps are not used yet.

Location:
libcfa/src/concurrency
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/io.cfa

    r8f01ad71 r78a580d  
    4242        #include "kernel/fwd.hfa"
    4343        #include "kernel/private.hfa"
     44        #include "kernel/cluster.hfa"
    4445        #include "io/types.hfa"
    4546
     
    9394        extern void __kernel_unpark( thread$ * thrd, unpark_hint );
    9495
    95         bool __cfa_io_drain( $io_context * ctx ) {
     96        bool __cfa_io_drain( $io_context * ctx, cluster * cltr ) {
    9697                /* paranoid */ verify( ! __preemption_enabled() );
    9798                /* paranoid */ verify( ready_schedule_islocked() );
     
    112113                }
    113114
     115                unsigned long long ts_prev = ctx->cq.ts;
     116
    114117                for(i; count) {
    115118                        unsigned idx = (head + i) & mask;
     
    125128
    126129                __cfadbg_print_safe(io, "Kernel I/O : %u completed\n", count);
     130                unsigned long long ts_next = ctx->cq.ts = rdtscl();
    127131
    128132                // Mark to the kernel that the cqe has been seen
     
    134138
    135139                __atomic_unlock(&ctx->cq.lock);
     140
     141                touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next );
    136142
    137143                return true;
     
    143149                /* paranoid */ verify( proc->io.ctx );
    144150
    145                 __attribute__((unused)) cluster * cltr = proc->cltr;
     151                cluster * cltr = proc->cltr;
    146152                $io_context & ctx = *proc->io.ctx;
    147153
     
    183189
    184190                ready_schedule_lock();
    185                 bool ret = __cfa_io_drain( &ctx );
     191                bool ret = __cfa_io_drain( &ctx, cltr );
    186192                ready_schedule_unlock();
    187193                return ret;
  • libcfa/src/concurrency/io/setup.cfa

    r8f01ad71 r78a580d  
    6060        #include "fstream.hfa"
    6161        #include "kernel/private.hfa"
     62        #include "limits.hfa"
    6263        #include "thread.hfa"
    6364#pragma GCC diagnostic pop
     
    215216                // completion queue
    216217                cq.lock      = 0;
     218                cq.id        = MAX;
     219                cq.ts        = rdtscl();
    217220                cq.head      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head);
    218221                cq.tail      = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);
  • libcfa/src/concurrency/io/types.hfa

    r8f01ad71 r78a580d  
    7979        struct __cmp_ring_t {
    8080                volatile bool lock;
     81
     82                unsigned id;
     83
     84                unsigned long long ts;
    8185
    8286                // Head and tail of the ring
  • libcfa/src/concurrency/kernel.cfa

    r8f01ad71 r78a580d  
    136136static void mark_awake(__cluster_proc_list & idles, processor & proc);
    137137
    138 extern bool __cfa_io_drain( $io_context * );
     138extern bool __cfa_io_drain( $io_context *, cluster * cltr );
    139139extern bool __cfa_io_flush( processor *, int min_comp );
    140140static inline bool __maybe_io_drain( processor * );
     
    839839                if(head == tail) return false;
    840840                ready_schedule_lock();
    841                 ret = __cfa_io_drain( ctx );
     841                ret = __cfa_io_drain( ctx, proc->cltr );
    842842                ready_schedule_unlock();
    843843        #endif
  • libcfa/src/concurrency/kernel.hfa

    r8f01ad71 r78a580d  
    108108        struct {
    109109                $io_context * ctx;
    110                 unsigned id;
    111110                unsigned target;
    112111                volatile bool pending;
  • libcfa/src/concurrency/kernel/cluster.cfa

    r8f01ad71 r78a580d  
    2727
    2828#include "ready_subqueue.hfa"
     29#include "io/types.hfa"
    2930
    3031#include <errno.h>
     
    259260                it->rdq.id = valrq;
    260261                it->rdq.target = MAX;
    261                 it->io.id = valio;
     262                it->io.ctx->cq.id = valio;
    262263                it->io.target = MAX;
    263264                valrq += __shard_factor.readyq;
     
    278279        while(it) {
    279280                /* paranoid */ verifyf( it, "Unexpected null iterator\n");
    280                 /* paranoid */ verifyf( it->io.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count);
    281                 data[it->io.id] = it->io.ctx;
     281                /* paranoid */ verifyf( it->io.ctx->cq.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count);
     282                data[it->io.ctx->cq.id] = it->io.ctx;
    282283                it = &(*it)`next;
    283284        }
  • libcfa/src/concurrency/kernel/cluster.hfa

    r8f01ad71 r78a580d  
    1616#pragma once
    1717
     18#include "device/cpu.hfa"
    1819#include "kernel/private.hfa"
     20
     21#include "limits.hfa"
    1922
    2023//-----------------------------------------------------------------------
     
    3134        const unsigned long long ret = ((new_weight * new_val) + (old_weight * old_avg)) / total_weight;
    3235        return ret;
     36}
     37
     38static inline void touch_tsc(__timestamp_t * tscs, size_t idx, unsigned long long ts_prev, unsigned long long ts_next) {
     39        if (ts_next == MAX) return;
     40        unsigned long long now = rdtscl();
     41        unsigned long long pma = __atomic_load_n(&tscs[ idx ].ma, __ATOMIC_RELAXED);
     42        __atomic_store_n(&tscs[ idx ].tv, ts_next, __ATOMIC_RELAXED);
     43        __atomic_store_n(&tscs[ idx ].ma, moving_average(now, ts_prev, pma), __ATOMIC_RELAXED);
    3344}
    3445
  • libcfa/src/concurrency/kernel/startup.cfa

    r8f01ad71 r78a580d  
    233233        /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );
    234234
     235        __cfa_io_start( mainProcessor );
    235236        register_tls( mainProcessor );
    236         __cfa_io_start( mainProcessor );
    237237
    238238        // Start by initializing the main thread
     
    314314        mainProcessor->local_data = 0p;
    315315
     316        unregister_tls( mainProcessor );
    316317        __cfa_io_stop( mainProcessor );
    317         unregister_tls( mainProcessor );
    318318
    319319        // Destroy the main processor and its context in reverse order of construction
     
    364364        proc->local_data = &__cfaabi_tls;
    365365
     366        __cfa_io_start( proc );
    366367        register_tls( proc );
    367 
    368         __cfa_io_start( proc );
    369368
    370369        // used for idle sleep when io_uring is present
     
    401400        // Main routine of the core returned, the core is now fully terminated
    402401        __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
    403 
    404         __cfa_io_stop( proc );
    405402
    406403        #if !defined(__CFA_NO_STATISTICS__)
     
    417414
    418415        unregister_tls( proc );
     416        __cfa_io_stop( proc );
    419417
    420418        return 0p;
  • libcfa/src/concurrency/ready_queue.cfa

    r8f01ad71 r78a580d  
    2626#include "kernel/cluster.hfa"
    2727#include "kernel/private.hfa"
    28 
    29 #include "limits.hfa"
    3028
    3129// #include <errno.h>
     
    202200        // Actually pop the list
    203201        struct thread$ * thrd;
    204         unsigned long long tsc_before = ts(lane);
    205         unsigned long long tsv;
    206         [thrd, tsv] = pop(lane);
     202        unsigned long long ts_prev = ts(lane);
     203        unsigned long long ts_next;
     204        [thrd, ts_next] = pop(lane);
    207205
    208206        /* paranoid */ verify(thrd);
    209         /* paranoid */ verify(tsv);
     207        /* paranoid */ verify(ts_next);
    210208        /* paranoid */ verify(lane.lock);
    211209
     
    216214        __STATS( stats.success++; )
    217215
    218         if (tsv != MAX) {
    219                 unsigned long long now = rdtscl();
    220                 unsigned long long pma = __atomic_load_n(&readyQ.tscs[w].ma, __ATOMIC_RELAXED);
    221                 __atomic_store_n(&readyQ.tscs[w].tv, tsv, __ATOMIC_RELAXED);
    222                 __atomic_store_n(&readyQ.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
    223         }
     216        touch_tsc(readyQ.tscs, w, ts_prev, ts_next);
    224217
    225218        thrd->preferred = w / __shard_factor.readyq;
Note: See TracChangeset for help on using the changeset viewer.