Changeset 8fc652e0


Ignore:
Timestamp:
Nov 6, 2020, 11:22:57 AM (13 months ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
arm-eh, jacob/cs343-translation, master, new-ast-unique-expr
Children:
54dcab1
Parents:
be5e34b
Message:

Change usage of TLS to more strongly segregate in kernel and out of kernel usage.

Location:
libcfa/src/concurrency
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/io.cfa

    rbe5e34b r8fc652e0  
    7676
    7777        static inline bool next( __leaderlock_t & this ) {
    78                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     78                /* paranoid */ verify( ! __preemption_enabled() );
    7979                struct $thread * nextt;
    8080                for() {
     
    168168        // This is NOT thread-safe
    169169        static [int, bool] __drain_io( & struct __io_data ring ) {
    170                 /* paranoid */ verify( !kernelTLS.preemption_state.enabled );
     170                /* paranoid */ verify( ! __preemption_enabled() );
    171171
    172172                unsigned to_submit = 0;
     
    404404                                        return;
    405405                                }
    406                                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     406                                /* paranoid */ verify( ! __preemption_enabled() );
    407407                                __STATS__( true,
    408408                                        io.submit_q.leader += 1;
     
    442442
    443443                        #if defined(LEADER_LOCK)
    444                                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     444                                /* paranoid */ verify( ! __preemption_enabled() );
    445445                                next(ring.submit_q.submit_lock);
    446446                        #else
  • libcfa/src/concurrency/io/setup.cfa

    rbe5e34b r8fc652e0  
    149149                id.full_proc = false;
    150150                id.id = doregister(&id);
    151                 kernelTLS.this_proc_id = &id;
     151                __cfaabi_tls.this_proc_id = &id;
    152152                __cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" );
    153153
     
    179179                                __cfadbg_print_safe(io_core, "Kernel I/O : Unparking io poller %p\n", io_ctx);
    180180                                #if !defined( __CFA_NO_STATISTICS__ )
    181                                         kernelTLS.this_stats = io_ctx->self.curr_cluster->stats;
     181                                        __cfaabi_tls.this_stats = io_ctx->self.curr_cluster->stats;
    182182                                #endif
    183183                                post( io_ctx->sem );
  • libcfa/src/concurrency/kernel.cfa

    rbe5e34b r8fc652e0  
    122122        // Because of a bug, we couldn't initialized the seed on construction
    123123        // Do it here
    124         kernelTLS.rand_seed ^= rdtscl();
    125         kernelTLS.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
     124        __cfaabi_tls.rand_seed ^= rdtscl();
     125        __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
    126126        __tls_rand_advance_bck();
    127127
     
    217217                // and it make sense for it to be set in all other cases except here
    218218                // fake it
    219                 kernelTLS.this_thread = mainThread;
     219                __cfaabi_tls.this_thread = mainThread;
    220220        }
    221221
     
    230230// from the processor coroutine to the target thread
    231231static void __run_thread(processor * this, $thread * thrd_dst) {
    232         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     232        /* paranoid */ verify( ! __preemption_enabled() );
    233233        /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
    234234        /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
     
    247247
    248248                // Update global state
    249                 kernelTLS.this_thread = thrd_dst;
    250 
    251                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    252                 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst );
     249                kernelTLS().this_thread = thrd_dst;
     250
     251                /* paranoid */ verify( ! __preemption_enabled() );
     252                /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
    253253                /* paranoid */ verify( thrd_dst->context.SP );
    254254                /* paranoid */ verify( thrd_dst->state != Halted );
     
    267267                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
    268268                /* paranoid */ verify( thrd_dst->context.SP );
    269                 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst );
    270                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     269                /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
     270                /* paranoid */ verify( ! __preemption_enabled() );
    271271
    272272                // Reset global state
    273                 kernelTLS.this_thread = 0p;
     273                kernelTLS().this_thread = 0p;
    274274
    275275                // We just finished running a thread, there are a few things that could have happened.
     
    315315        proc_cor->state = Active;
    316316
    317         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     317        /* paranoid */ verify( ! __preemption_enabled() );
    318318}
    319319
    320320// KERNEL_ONLY
    321321void returnToKernel() {
    322         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    323         $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    324         $thread * thrd_src = kernelTLS.this_thread;
     322        /* paranoid */ verify( ! __preemption_enabled() );
     323        $coroutine * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
     324        $thread * thrd_src = kernelTLS().this_thread;
    325325
    326326        #if !defined(__CFA_NO_STATISTICS__)
    327                 struct processor * last_proc = kernelTLS.this_processor;
     327                struct processor * last_proc = kernelTLS().this_processor;
    328328        #endif
    329329
     
    345345
    346346        #if !defined(__CFA_NO_STATISTICS__)
    347                 if(last_proc != kernelTLS.this_processor) {
     347                if(last_proc != kernelTLS().this_processor) {
    348348                        __tls_stats()->ready.threads.migration++;
    349349                }
    350350        #endif
    351351
    352         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     352        /* paranoid */ verify( ! __preemption_enabled() );
    353353        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
    354354        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
     
    359359// KERNEL ONLY
    360360void __schedule_thread( $thread * thrd ) {
     361        /* paranoid */ verify( ! __preemption_enabled() );
    361362        /* paranoid */ verify( thrd );
    362363        /* paranoid */ verify( thrd->state != Halted );
    363         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    364         /* paranoid */ verify( kernelTLS.this_proc_id );
     364        /* paranoid */ verify( kernelTLS().this_proc_id );
    365365        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
    366366        /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     
    380380        ready_schedule_unlock();
    381381
    382         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     382        /* paranoid */ verify( ! __preemption_enabled() );
    383383}
    384384
    385385// KERNEL ONLY
    386386static inline $thread * __next_thread(cluster * this) with( *this ) {
    387         /* paranoid */ verify( kernelTLS.this_proc_id );
    388         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     387        /* paranoid */ verify( ! __preemption_enabled() );
     388        /* paranoid */ verify( kernelTLS().this_proc_id );
    389389
    390390        ready_schedule_lock();
     
    392392        ready_schedule_unlock();
    393393
    394         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    395         /* paranoid */ verify( kernelTLS.this_proc_id );
     394        /* paranoid */ verify( kernelTLS().this_proc_id );
     395        /* paranoid */ verify( ! __preemption_enabled() );
    396396        return thrd;
    397397}
     
    399399// KERNEL ONLY
    400400static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
    401         /* paranoid */ verify( kernelTLS.this_proc_id );
    402         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     401        /* paranoid */ verify( ! __preemption_enabled() );
     402        /* paranoid */ verify( kernelTLS().this_proc_id );
    403403
    404404        ready_schedule_lock();
     
    406406        ready_schedule_unlock();
    407407
    408         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    409         /* paranoid */ verify( kernelTLS.this_proc_id );
     408        /* paranoid */ verify( kernelTLS().this_proc_id );
     409        /* paranoid */ verify( ! __preemption_enabled() );
    410410        return thrd;
    411411}
     
    414414        if( !thrd ) return;
    415415
    416         /* paranoid */ verify( kernelTLS.this_proc_id );
    417         bool full = kernelTLS.this_proc_id->full_proc;
    418         if(full) disable_interrupts();
    419 
    420         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    421416        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    422417        switch(old_ticket) {
     
    428423                        /* paranoid */ verify( thrd->state == Blocked );
    429424
    430                         // Wake lost the race,
    431                         __schedule_thread( thrd );
     425                        {
     426                                /* paranoid */ verify( publicTLS_get(this_proc_id) );
     427                                bool full = publicTLS_get(this_proc_id)->full_proc;
     428                                if(full) disable_interrupts();
     429
     430                                /* paranoid */ verify( ! __preemption_enabled() );
     431
     432                                // Wake lost the race,
     433                                __schedule_thread( thrd );
     434
     435                                /* paranoid */ verify( ! __preemption_enabled() );
     436
     437                                if(full) enable_interrupts( __cfaabi_dbg_ctx );
     438                                /* paranoid */ verify( publicTLS_get(this_proc_id) );
     439                        }
     440
    432441                        break;
    433442                default:
     
    435444                        abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
    436445        }
    437         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    438 
    439         if(full) enable_interrupts( __cfaabi_dbg_ctx );
    440         /* paranoid */ verify( kernelTLS.this_proc_id );
    441446}
    442447
    443448void park( void ) {
    444         /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     449        /* paranoid */ verify( __preemption_enabled() );
    445450        disable_interrupts();
    446         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    447         /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
     451        /* paranoid */ verify( ! __preemption_enabled() );
     452        /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
    448453
    449454        returnToKernel();
    450455
    451         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     456        /* paranoid */ verify( ! __preemption_enabled() );
    452457        enable_interrupts( __cfaabi_dbg_ctx );
    453         /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     458        /* paranoid */ verify( __preemption_enabled() );
    454459
    455460}
     
    460465        // Should never return
    461466        void __cfactx_thrd_leave() {
    462                 $thread * thrd = TL_GET( this_thread );
     467                $thread * thrd = active_thread();
    463468                $monitor * this = &thrd->self_mon;
    464469
     
    473478
    474479                // Leave the thread
    475                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     480                /* paranoid */ verify( ! __preemption_enabled() );
    476481                returnToKernel();
    477482                abort();
     
    483488// KERNEL ONLY
    484489bool force_yield( __Preemption_Reason reason ) {
    485         /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     490        /* paranoid */ verify( __preemption_enabled() );
    486491        disable_interrupts();
    487         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    488 
    489         $thread * thrd = kernelTLS.this_thread;
     492        /* paranoid */ verify( ! __preemption_enabled() );
     493
     494        $thread * thrd = kernelTLS().this_thread;
    490495        /* paranoid */ verify(thrd->state == Active);
    491496
     
    501506        }
    502507
    503         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     508        /* paranoid */ verify( ! __preemption_enabled() );
    504509        enable_interrupts_noPoll();
    505         /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     510        /* paranoid */ verify( __preemption_enabled() );
    506511
    507512        return preempted;
     
    513518// Wake a thread from the front if there are any
    514519static void __wake_one(cluster * this) {
    515         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     520        /* paranoid */ verify( ! __preemption_enabled() );
    516521        /* paranoid */ verify( ready_schedule_islocked() );
    517522
     
    533538
    534539        /* paranoid */ verify( ready_schedule_islocked() );
    535         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     540        /* paranoid */ verify( ! __preemption_enabled() );
    536541
    537542        return;
     
    543548
    544549        disable_interrupts();
    545                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     550                /* paranoid */ verify( ! __preemption_enabled() );
    546551                post( this->idle );
    547552        enable_interrupts( __cfaabi_dbg_ctx );
     
    549554
    550555static void push  (__cluster_idles & this, processor & proc) {
    551         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     556        /* paranoid */ verify( ! __preemption_enabled() );
    552557        lock( this );
    553558                this.idle++;
     
    556561                insert_first(this.list, proc);
    557562        unlock( this );
    558         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     563        /* paranoid */ verify( ! __preemption_enabled() );
    559564}
    560565
    561566static void remove(__cluster_idles & this, processor & proc) {
    562         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     567        /* paranoid */ verify( ! __preemption_enabled() );
    563568        lock( this );
    564569                this.idle--;
     
    567572                remove(proc);
    568573        unlock( this );
    569         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     574        /* paranoid */ verify( ! __preemption_enabled() );
    570575}
    571576
     
    611616        }
    612617
    613         return kernelTLS.this_thread;
     618        return __cfaabi_tls.this_thread;
    614619}
    615620
     
    636641
    637642int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
    638         return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
     643        return get_coroutine(kernelTLS().this_thread) == get_coroutine(mainThread) ? 4 : 2;
    639644}
    640645
     
    668673        if ( count < 0 ) {
    669674                // queue current task
    670                 append( waiting, kernelTLS.this_thread );
     675                append( waiting, active_thread() );
    671676
    672677                // atomically release spin lock and block
     
    718723                void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
    719724                        this.prev_name = prev_name;
    720                         this.prev_thrd = kernelTLS.this_thread;
     725                        this.prev_thrd = kernelTLS().this_thread;
    721726                }
    722727        }
  • libcfa/src/concurrency/kernel.hfa

    rbe5e34b r8fc652e0  
    275275static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
    276276
    277 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
    278 static inline struct cluster   * active_cluster  () { return TL_GET( this_processor )->cltr; }
     277static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
     278static inline struct cluster   * active_cluster  () { return publicTLS_get( this_processor )->cltr; }
    279279
    280280#if !defined(__CFA_NO_STATISTICS__)
     281        void print_stats_now( cluster & this, int flags );
     282
    281283        static inline void print_stats_at_exit( cluster & this, int flags ) {
    282284                this.print_stats |= flags;
  • libcfa/src/concurrency/kernel/fwd.hfa

    rbe5e34b r8fc652e0  
    5555                                uint64_t bck_seed;
    5656                        } ready_rng;
    57                 } kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
     57                } __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" )));
    5858
     59                extern bool __preemption_enabled();
    5960
     61                static inline KernelThreadData & kernelTLS( void ) {
     62                        /* paranoid */ verify( ! __preemption_enabled() );
     63                        return __cfaabi_tls;
     64                }
     65
     66                extern uintptr_t __cfatls_get( unsigned long int member );
     67                // #define publicTLS_get( member ) ((typeof(__cfaabi_tls.member))__cfatls_get( __builtin_offsetof(KernelThreadData, member) ))
     68                #define publicTLS_get( member ) (__cfaabi_tls.member)
     69                // extern forall(otype T) T __cfatls_get( T * member, T value );
     70                // #define publicTLS_set( member, value ) __cfatls_set( (typeof(member)*)__builtin_offsetof(KernelThreadData, member), value );
    6071
    6172                static inline uint64_t __tls_rand() {
    6273                        #if defined(__SIZEOF_INT128__)
    63                                 return __lehmer64( kernelTLS.rand_seed );
     74                                return __lehmer64( kernelTLS().rand_seed );
    6475                        #else
    65                                 return __xorshift64( kernelTLS.rand_seed );
     76                                return __xorshift64( kernelTLS().rand_seed );
    6677                        #endif
    6778                }
     
    7586                static inline unsigned __tls_rand_fwd() {
    7687
    77                         kernelTLS.ready_rng.fwd_seed = (A * kernelTLS.ready_rng.fwd_seed + C) & (M - 1);
    78                         return kernelTLS.ready_rng.fwd_seed >> D;
     88                        kernelTLS().ready_rng.fwd_seed = (A * kernelTLS().ready_rng.fwd_seed + C) & (M - 1);
     89                        return kernelTLS().ready_rng.fwd_seed >> D;
    7990                }
    8091
    8192                static inline unsigned __tls_rand_bck() {
    82                         unsigned int r = kernelTLS.ready_rng.bck_seed >> D;
    83                         kernelTLS.ready_rng.bck_seed = AI * (kernelTLS.ready_rng.bck_seed - C) & (M - 1);
     93                        unsigned int r = kernelTLS().ready_rng.bck_seed >> D;
     94                        kernelTLS().ready_rng.bck_seed = AI * (kernelTLS().ready_rng.bck_seed - C) & (M - 1);
    8495                        return r;
    8596                }
     
    92103
    93104                static inline void __tls_rand_advance_bck(void) {
    94                         kernelTLS.ready_rng.bck_seed = kernelTLS.ready_rng.fwd_seed;
     105                        kernelTLS().ready_rng.bck_seed = kernelTLS().ready_rng.fwd_seed;
    95106                }
    96107        }
    97108
    98         #if 0 // def __ARM_ARCH
    99                 // function prototypes are only really used by these macros on ARM
    100                 void disable_global_interrupts();
    101                 void enable_global_interrupts();
    102109
    103                 #define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \
    104                         disable_global_interrupts(); \
    105                         target = kernelTLS.member; \
    106                         enable_global_interrupts(); \
    107                         target; } )
    108                 #define TL_SET( member, value ) disable_global_interrupts(); \
    109                         kernelTLS.member = value; \
    110                         enable_global_interrupts();
    111         #else
    112                 #define TL_GET( member ) kernelTLS.member
    113                 #define TL_SET( member, value ) kernelTLS.member = value;
    114         #endif
    115110
    116111        extern void disable_interrupts();
     
    121116                extern void park( void );
    122117                extern void unpark( struct $thread * this );
    123                 static inline struct $thread * active_thread () { return TL_GET( this_thread ); }
     118                static inline struct $thread * active_thread () {
     119                        struct $thread * t = publicTLS_get( this_thread );
     120                        /* paranoid */ verify( t );
     121                        return t;
     122                }
    124123
    125124                extern bool force_yield( enum __Preemption_Reason );
     
    140139                #if !defined(__CFA_NO_STATISTICS__)
    141140                        static inline struct __stats_t * __tls_stats() {
    142                                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    143                                 /* paranoid */ verify( kernelTLS.this_stats );
    144                                 return kernelTLS.this_stats;
     141                                /* paranoid */ verify( ! __preemption_enabled() );
     142                                /* paranoid */ verify( kernelTLS().this_stats );
     143                                return kernelTLS().this_stats;
    145144                        }
    146145
  • libcfa/src/concurrency/kernel/startup.cfa

    rbe5e34b r8fc652e0  
    118118//-----------------------------------------------------------------------------
    119119// Global state
    120 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) @= {
     120thread_local struct KernelThreadData __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" ))) @= {
    121121        NULL,                                                                                           // cannot use 0p
    122122        NULL,
     
    156156// Kernel boot procedures
    157157static void __kernel_startup(void) {
    158         verify( ! kernelTLS.preemption_state.enabled );
     158        /* paranoid */ verify( ! __preemption_enabled() );
    159159        __cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
    160160
     
    212212
    213213        //initialize the global state variables
    214         kernelTLS.this_processor = mainProcessor;
    215         kernelTLS.this_proc_id   = (__processor_id_t*)mainProcessor;
    216         kernelTLS.this_thread    = mainThread;
     214        __cfaabi_tls.this_processor = mainProcessor;
     215        __cfaabi_tls.this_proc_id   = (__processor_id_t*)mainProcessor;
     216        __cfaabi_tls.this_thread    = mainThread;
    217217
    218218        #if !defined( __CFA_NO_STATISTICS__ )
    219                 kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats;
    220                 __init_stats( kernelTLS.this_stats );
     219                __cfaabi_tls.this_stats = (__stats_t *)& storage_mainProcStats;
     220                __init_stats( __cfaabi_tls.this_stats );
    221221        #endif
    222222
     
    234234        // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
    235235        // mainThread is on the ready queue when this call is made.
    236         __kernel_first_resume( kernelTLS.this_processor );
     236        __kernel_first_resume( __cfaabi_tls.this_processor );
    237237
    238238
     
    251251        __cfadbg_print_safe(runtime_core, "Kernel : Started\n--------------------------------------------------\n\n");
    252252
    253         verify( ! kernelTLS.preemption_state.enabled );
     253        /* paranoid */ verify( ! __preemption_enabled() );
    254254        enable_interrupts( __cfaabi_dbg_ctx );
    255         verify( TL_GET( preemption_state.enabled ) );
     255        /* paranoid */ verify( __preemption_enabled() );
     256
    256257}
    257258
     
    262263        mainCluster->io.ctxs = 0p;
    263264
    264         /* paranoid */ verify( TL_GET( preemption_state.enabled ) );
     265        /* paranoid */ verify( __preemption_enabled() );
    265266        disable_interrupts();
    266         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     267        /* paranoid */ verify( ! __preemption_enabled() );
    267268
    268269        __cfadbg_print_safe(runtime_core, "\n--------------------------------------------------\nKernel : Shutting down\n");
     
    272273        // which is currently here
    273274        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    274         __kernel_last_resume( kernelTLS.this_processor );
     275        __kernel_last_resume( __cfaabi_tls.this_processor );
    275276        mainThread->self_cor.state = Halted;
    276277
     
    321322                __stats_t local_stats;
    322323                __init_stats( &local_stats );
    323                 kernelTLS.this_stats = &local_stats;
     324                __cfaabi_tls.this_stats = &local_stats;
    324325        #endif
    325326
    326327        processor * proc = (processor *) arg;
    327         kernelTLS.this_processor = proc;
    328         kernelTLS.this_proc_id   = (__processor_id_t*)proc;
    329         kernelTLS.this_thread    = 0p;
    330         kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
     328        __cfaabi_tls.this_processor = proc;
     329        __cfaabi_tls.this_proc_id   = (__processor_id_t*)proc;
     330        __cfaabi_tls.this_thread    = 0p;
     331        __cfaabi_tls.preemption_state.[enabled, disable_count] = [false, 1];
    331332        // SKULLDUGGERY: We want to create a context for the processor coroutine
    332333        // which is needed for the 2-step context switch. However, there is no reason
     
    340341
    341342        //Set global state
    342         kernelTLS.this_thread = 0p;
     343        __cfaabi_tls.this_thread = 0p;
    343344
    344345        //We now have a proper context from which to schedule threads
     
    370371        $coroutine * dst = get_coroutine(this->runner);
    371372
    372         verify( ! kernelTLS.preemption_state.enabled );
    373 
    374         kernelTLS.this_thread->curr_cor = dst;
     373        /* paranoid */ verify( ! __preemption_enabled() );
     374
     375        __cfaabi_tls.this_thread->curr_cor = dst;
    375376        __stack_prepare( &dst->stack, 65000 );
    376377        __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
    377378
    378         verify( ! kernelTLS.preemption_state.enabled );
     379        /* paranoid */ verify( ! __preemption_enabled() );
    379380
    380381        dst->last = &src->self_cor;
     
    394395        /* paranoid */ verify(src->state == Active);
    395396
    396         verify( ! kernelTLS.preemption_state.enabled );
     397        /* paranoid */ verify( ! __preemption_enabled() );
    397398}
    398399
     
    402403        $coroutine * dst = get_coroutine(this->runner);
    403404
    404         verify( ! kernelTLS.preemption_state.enabled );
    405         verify( dst->starter == src );
    406         verify( dst->context.SP );
     405        /* paranoid */ verify( ! __preemption_enabled() );
     406        /* paranoid */ verify( dst->starter == src );
     407        /* paranoid */ verify( dst->context.SP );
    407408
    408409        // SKULLDUGGERY in debug the processors check that the
     
    546547
    547548                P( terminated );
    548                 verify( kernelTLS.this_processor != &this);
     549                /* paranoid */ verify( active_processor() != &this);
    549550        }
    550551
     
    696697#if defined(__CFA_WITH_VERIFY__)
    697698static bool verify_fwd_bck_rng(void) {
    698         kernelTLS.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&verify_fwd_bck_rng);
     699        __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&verify_fwd_bck_rng);
    699700
    700701        unsigned values[10];
  • libcfa/src/concurrency/kernel_private.hfa

    rbe5e34b r8fc652e0  
    3838#endif
    3939;
     40
     41extern bool __preemption_enabled();
    4042
    4143//release/wake-up the following resources
     
    181183//  creating/destroying queues
    182184static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
    183         /*paranoid*/ verify( kernelTLS.this_proc_id );
    184 
    185         unsigned iproc = kernelTLS.this_proc_id->id;
    186         /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
     185        /* paranoid */ verify( ! __preemption_enabled() );
     186        /* paranoid */ verify( kernelTLS().this_proc_id );
     187
     188        unsigned iproc = kernelTLS().this_proc_id->id;
     189        /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
    187190        /*paranoid*/ verify(iproc < ready);
    188191
     
    207210
    208211static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
    209         /*paranoid*/ verify( kernelTLS.this_proc_id );
    210 
    211         unsigned iproc = kernelTLS.this_proc_id->id;
    212         /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
     212        /* paranoid */ verify( ! __preemption_enabled() );
     213        /* paranoid */ verify( kernelTLS().this_proc_id );
     214
     215        unsigned iproc = kernelTLS().this_proc_id->id;
     216        /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
    213217        /*paranoid*/ verify(iproc < ready);
    214218        /*paranoid*/ verify(data[iproc].lock);
     
    223227#ifdef __CFA_WITH_VERIFY__
    224228        static inline bool ready_schedule_islocked(void) {
    225                 /*paranoid*/ verify( kernelTLS.this_proc_id );
    226                 __processor_id_t * proc = kernelTLS.this_proc_id;
     229                /* paranoid */ verify( ! __preemption_enabled() );
     230                /*paranoid*/ verify( kernelTLS().this_proc_id );
     231                __processor_id_t * proc = kernelTLS().this_proc_id;
    227232                return __scheduler_lock->data[proc->id].owned;
    228233        }
  • libcfa/src/concurrency/monitor.cfa

    rbe5e34b r8fc652e0  
    8282// Enter single monitor
    8383static void __enter( $monitor * this, const __monitor_group_t & group ) {
     84        $thread * thrd = active_thread();
     85
    8486        // Lock the monitor spinlock
    8587        lock( this->lock __cfaabi_dbg_ctx2 );
    86         // Interrupts disable inside critical section
    87         $thread * thrd = kernelTLS.this_thread;
    8888
    8989        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     
    126126                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    127127
    128                 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     128                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    129129                return;
    130130        }
     
    132132        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    133133
    134         /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     134        /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    135135        /* paranoid */ verify( this->lock.lock );
    136136
     
    141141
    142142static void __dtor_enter( $monitor * this, fptr_t func, bool join ) {
     143        $thread * thrd = active_thread();
     144
    143145        // Lock the monitor spinlock
    144146        lock( this->lock __cfaabi_dbg_ctx2 );
    145         // Interrupts disable inside critical section
    146         $thread * thrd = kernelTLS.this_thread;
    147147
    148148        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     
    155155                __set_owner( this, thrd );
    156156
    157                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     157                verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    158158
    159159                unlock( this->lock );
     
    174174                this->owner = thrd;
    175175
    176                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     176                verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    177177
    178178                unlock( this->lock );
     
    200200
    201201                // Release the next thread
    202                 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     202                /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    203203                unpark( urgent->owner->waiting_thread );
    204204
     
    207207
    208208                // Some one was waiting for us, enter
    209                 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     209                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    210210        }
    211211        else {
     
    224224                park();
    225225
    226                 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     226                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    227227                return;
    228228        }
     
    237237        lock( this->lock __cfaabi_dbg_ctx2 );
    238238
    239         __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    240 
    241         /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     239        __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", active_thread(), this, this->owner);
     240
     241        /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    242242
    243243        // Leaving a recursion level, decrement the counter
     
    270270void __dtor_leave( $monitor * this, bool join ) {
    271271        __cfaabi_dbg_debug_do(
    272                 if( TL_GET( this_thread ) != this->owner ) {
    273                         abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
     272                if( active_thread() != this->owner ) {
     273                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner);
    274274                }
    275275                if( this->recursion != 1  && !join ) {
     
    287287        /* paranoid */ verify( this->lock.lock );
    288288        /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    289         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     289        /* paranoid */ verify( ! __preemption_enabled() );
    290290        /* paranoid */ verify( thrd->state == Halted );
    291291        /* paranoid */ verify( this->recursion == 1 );
     
    303303        // Unpark the next owner if needed
    304304        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    305         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     305        /* paranoid */ verify( ! __preemption_enabled() );
    306306        /* paranoid */ verify( thrd->state == Halted );
    307307        unpark( new_owner );
     
    327327// Sorts monitors before entering
    328328void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
    329         $thread * thrd = TL_GET( this_thread );
     329        $thread * thrd = active_thread();
    330330
    331331        // Store current array
     
    362362
    363363        // Restore thread context
    364         TL_GET( this_thread )->monitors = this.prev;
     364        active_thread()->monitors = this.prev;
    365365}
    366366
     
    369369void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func, bool join ) {
    370370        // optimization
    371         $thread * thrd = TL_GET( this_thread );
     371        $thread * thrd = active_thread();
    372372
    373373        // Store current array
     
    392392
    393393        // Restore thread context
    394         TL_GET( this_thread )->monitors = this.prev;
     394        active_thread()->monitors = this.prev;
    395395}
    396396
     
    432432
    433433        // Create the node specific to this wait operation
    434         wait_ctx( TL_GET( this_thread ), user_info );
     434        wait_ctx( active_thread(), user_info );
    435435
    436436        // Append the current wait operation to the ones already queued on the condition
     
    483483        //Some more checking in debug
    484484        __cfaabi_dbg_debug_do(
    485                 $thread * this_thrd = TL_GET( this_thread );
     485                $thread * this_thrd = active_thread();
    486486                if ( this.monitor_count != this_thrd->monitors.size ) {
    487487                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    531531
    532532        // Create the node specific to this wait operation
    533         wait_ctx_primed( kernelTLS.this_thread, 0 )
     533        wait_ctx_primed( active_thread(), 0 )
    534534
    535535        //save contexts
     
    630630
    631631                                // Create the node specific to this wait operation
    632                                 wait_ctx_primed( kernelTLS.this_thread, 0 );
     632                                wait_ctx_primed( active_thread(), 0 );
    633633
    634634                                // Save monitor states
     
    682682
    683683        // Create the node specific to this wait operation
    684         wait_ctx_primed( kernelTLS.this_thread, 0 );
     684        wait_ctx_primed( active_thread(), 0 );
    685685
    686686        monitor_save;
     
    688688
    689689        for( __lock_size_t i = 0; i < count; i++) {
    690                 verify( monitors[i]->owner == kernelTLS.this_thread );
     690                verify( monitors[i]->owner == active_thread() );
    691691        }
    692692
     
    724724static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
    725725        /* paranoid */ verify ( monitors[0]->lock.lock );
    726         /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     726        /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] );
    727727        monitors[0]->owner        = owner;
    728728        monitors[0]->recursion    = 1;
    729729        for( __lock_size_t i = 1; i < count; i++ ) {
    730730                /* paranoid */ verify ( monitors[i]->lock.lock );
    731                 /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] );
     731                /* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] );
    732732                monitors[i]->owner        = owner;
    733733                monitors[i]->recursion    = 0;
     
    755755                //regardless of if we are ready to baton pass,
    756756                //we need to set the monitor as in use
    757                 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     757                /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    758758                __set_owner( this,  urgent->owner->waiting_thread );
    759759
     
    764764        // Get the next thread in the entry_queue
    765765        $thread * new_owner = pop_head( this->entry_queue );
    766         /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     766        /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    767767        /* paranoid */ verify( !new_owner || new_owner->link.next == 0p );
    768768        __set_owner( this, new_owner );
     
    892892
    893893static inline void brand_condition( condition & this ) {
    894         $thread * thrd = TL_GET( this_thread );
     894        $thread * thrd = active_thread();
    895895        if( !this.monitors ) {
    896896                // __cfaabi_dbg_print_safe( "Branding\n" );
  • libcfa/src/concurrency/preemption.cfa

    rbe5e34b r8fc652e0  
    164164//=============================================================================================
    165165
     166//----------
     167// special case for preemption since used often
     168bool __preemption_enabled() {
     169        // create a assembler label before
     170        // marked as clobber all to avoid movement
     171        asm volatile("__cfaasm_check_before:":::"memory");
     172
     173        // access tls as normal
     174        bool enabled = __cfaabi_tls.preemption_state.enabled;
     175
     176        // create a assembler label after
     177        // marked as clobber all to avoid movement
     178        asm volatile("__cfaasm_check_after:":::"memory");
     179        return enabled;
     180}
     181
     182//----------
     183// Get data from the TLS block
     184uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
     185uintptr_t __cfatls_get( unsigned long int offset ) {
     186        // create a assembler label before
     187        // marked as clobber all to avoid movement
     188        asm volatile("__cfaasm_get_before:":::"memory");
     189
     190        // access tls as normal (except for pointer arithmetic)
     191        uintptr_t val = *(uintptr_t*)((uintptr_t)&__cfaabi_tls + offset);
     192
     193        // create a assembler label after
     194        // marked as clobber all to avoid movement
     195        asm volatile("__cfaasm_get_after:":::"memory");
     196        return val;
     197}
     198
     199// //----------
     200// // Write data to the TLS block
     201// // sadly it looses the type information and can only write 1 word at a time
     202// // use with __builtin_offsetof
     203// void __cfatls_set(uintptr_t offset, void * value) __attribute__((__noinline__));
     204// void __cfatls_set(uintptr_t offset, void * value) {
     205//     // create a assembler label before
     206//     // marked as clobber all to avoid movement
     207//     asm volatile("__cfaasm_set_before:":::"memory");
     208
     209//     // access tls as normal (except for type information)
     210//     *(void**)(offset + (uintptr_t)&my_tls) = value;
     211
     212//     // create a assembler label after
     213//     // marked as clobber all to avoid movement
     214//     asm volatile("__cfaasm_set_after:":::"memory");
     215// }
     216
     217// //----------
     218// #include <stdio.h>
     219// int main() {
     220//     // Get the information
     221//     // Must use inline assembly to get access to label
     222//     // C is annoying here because this could easily be a static const but "initializer element is not a compile-time constant"
     223//     // The big advantage of this approach is that there is 0 overhead for the read and writes function
     224//     void * __cfaasm_addr_get_before = ({ void * value; asm("movq $__cfaasm_get_before, %[v]\n\t" : [v]"=r"(value) ); value; });
     225//     void * __cfaasm_addr_get_after  = ({ void * value; asm("movq $__cfaasm_get_after , %[v]\n\t" : [v]"=r"(value) ); value; });
     226//     void * __cfaasm_addr_set_before = ({ void * value; asm("movq $__cfaasm_set_before, %[v]\n\t" : [v]"=r"(value) ); value; });
     227//     void * __cfaasm_addr_set_after  = ({ void * value; asm("movq $__cfaasm_set_after , %[v]\n\t" : [v]"=r"(value) ); value; });
     228
     229//     printf("%p to %p\n", __cfaasm_addr_get_before, __cfaasm_addr_get_after);
     230//     printf("%p to %p\n", __cfaasm_addr_set_before, __cfaasm_addr_set_after);
     231//     return 0;
     232// }
     233
    166234__cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; )
    167235
     
    169237        // Disable interrupts by incrementing the counter
    170238        void disable_interrupts() {
    171                 with( kernelTLS.preemption_state ) {
     239                // create a assembler label before
     240                // marked as clobber all to avoid movement
     241                asm volatile("__cfaasm_disable_before:":::"memory");
     242
     243                with( __cfaabi_tls.preemption_state ) {
    172244                        #if GCC_VERSION > 50000
    173245                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     
    186258                        verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
    187259                }
     260
     261                // create a assembler label after
     262                // marked as clobber all to avoid movement
     263                asm volatile("__cfaasm_disable_after:":::"memory");
    188264        }
    189265
     
    191267        // If counter reaches 0, execute any pending __cfactx_switch
    192268        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    193                 processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
     269                // create a assembler label before
     270                // marked as clobber all to avoid movement
     271                asm volatile("__cfaasm_enable_before:":::"memory");
     272
     273                processor   * proc = __cfaabi_tls.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
    194274                /* paranoid */ verify( proc );
    195275
    196                 with( kernelTLS.preemption_state ){
     276                with( __cfaabi_tls.preemption_state ){
    197277                        unsigned short prev = disable_count;
    198278                        disable_count -= 1;
     
    221301                // For debugging purposes : keep track of the last person to enable the interrupts
    222302                __cfaabi_dbg_debug_do( proc->last_enable = caller; )
     303
     304                // create a assembler label after
     305                // marked as clobber all to avoid movement
     306                asm volatile("__cfaasm_enable_after:":::"memory");
    223307        }
    224308
     
    226310        // Don't execute any pending __cfactx_switch even if counter reaches 0
    227311        void enable_interrupts_noPoll() {
    228                 unsigned short prev = kernelTLS.preemption_state.disable_count;
    229                 kernelTLS.preemption_state.disable_count -= 1;
     312                // create a assembler label before
     313                // marked as clobber all to avoid movement
     314                asm volatile("__cfaasm_nopoll_before:":::"memory");
     315
     316                unsigned short prev = __cfaabi_tls.preemption_state.disable_count;
     317                __cfaabi_tls.preemption_state.disable_count -= 1;
    230318                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    231319                if( prev == 1 ) {
    232320                        #if GCC_VERSION > 50000
    233                         static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
     321                        static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free");
    234322                        #endif
    235323                        // Set enabled flag to true
    236324                        // should be atomic to avoid preemption in the middle of the operation.
    237325                        // use memory order RELAXED since there is no inter-thread on this variable requirements
    238                         __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);
     326                        __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED);
    239327
    240328                        // Signal the compiler that a fence is needed but only for signal handlers
    241329                        __atomic_signal_fence(__ATOMIC_RELEASE);
    242330                }
     331
     332                // create a assembler label after
     333                // marked as clobber all to avoid movement
     334                asm volatile("__cfaasm_nopoll_after:":::"memory");
    243335        }
    244336}
     
    275367static void timeout( $thread * this ) {
    276368        #if !defined( __CFA_NO_STATISTICS__ )
    277                 kernelTLS.this_stats = this->curr_cluster->stats;
     369                kernelTLS().this_stats = this->curr_cluster->stats;
    278370        #endif
    279371        unpark( this );
     
    286378static inline bool preemption_ready() {
    287379        // Check if preemption is safe
    288         bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;
     380        bool ready = __cfaabi_tls.preemption_state.enabled && ! __cfaabi_tls.preemption_state.in_progress;
    289381
    290382        // Adjust the pending flag accordingly
    291         kernelTLS.this_processor->pending_preemption = !ready;
     383        __cfaabi_tls.this_processor->pending_preemption = !ready;
    292384        return ready;
    293385}
     
    303395
    304396        // Start with preemption disabled until ready
    305         kernelTLS.preemption_state.enabled = false;
    306         kernelTLS.preemption_state.disable_count = 1;
     397        __cfaabi_tls.preemption_state.enabled = false;
     398        __cfaabi_tls.preemption_state.disable_count = 1;
    307399
    308400        // Initialize the event kernel
     
    362454// Kernel Signal Handlers
    363455//=============================================================================================
     456struct asm_region {
     457        void * before;
     458        void * after;
     459};
     460
     461//-----------------------------------------------------------------------------
     462// Some assembly required
     463#if defined( __i386 )
     464        #define __cfaasm_label( label ) \
     465                ({ \
     466                        struct asm_region region; \
     467                        asm( \
     468                                "movl $__cfaasm_" #label "_before, %[vb]\n\t" \
     469                                "movl $__cfaasm_" #label "_after , %[va]\n\t" \
     470                                 : [vb]"=r"(region.before), [vb]"=r"(region.before) \
     471                        ); \
     472                        region; \
     473                });
     474#elif defined( __x86_64 )
     475        #ifdef __PIC__
     476                #define PLT "@PLT"
     477        #else
     478                #define PLT ""
     479        #endif
     480        #define __cfaasm_label( label ) \
     481                ({ \
     482                        struct asm_region region; \
     483                        asm( \
     484                                "movq $__cfaasm_" #label "_before" PLT ", %[vb]\n\t" \
     485                                "movq $__cfaasm_" #label "_after"  PLT ", %[va]\n\t" \
     486                                 : [vb]"=r"(region.before), [va]"=r"(region.after) \
     487                        ); \
     488                        region; \
     489                });
     490#elif defined( __aarch64__ )
     491        #error __cfaasm_label undefined for arm
     492#else
     493        #error unknown hardware architecture
     494#endif
    364495
    365496// Context switch signal handler
    366497// Receives SIGUSR1 signal and causes the current thread to yield
    367498static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
    368         __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); )
     499        void * ip = (void *)(cxt->uc_mcontext.CFA_REG_IP);
     500        __cfaabi_dbg_debug_do( last_interrupt = ip; )
    369501
    370502        // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it,
     
    372504        // before the kernel thread has even started running. When that happens, an interrupt
    373505        // with a null 'this_processor' will be caught, just ignore it.
    374         if(! kernelTLS.this_processor ) return;
     506        if(! __cfaabi_tls.this_processor ) return;
    375507
    376508        choose(sfp->si_value.sival_int) {
    377509                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    378                 case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
     510                case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
    379511                default:
    380512                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    384516        if( !preemption_ready() ) { return; }
    385517
    386         __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
     518        struct asm_region region;
     519        region = __cfaasm_label( get     ); if( ip >= region.before && ip <= region.after ) return;
     520        region = __cfaasm_label( check   ); if( ip >= region.before && ip <= region.after ) return;
     521        region = __cfaasm_label( disable ); if( ip >= region.before && ip <= region.after ) return;
     522        region = __cfaasm_label( enable  ); if( ip >= region.before && ip <= region.after ) return;
     523        region = __cfaasm_label( nopoll  ); if( ip >= region.before && ip <= region.after ) return;
     524
     525        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
    387526
    388527        // Sync flag : prevent recursive calls to the signal handler
    389         kernelTLS.preemption_state.in_progress = true;
     528        __cfaabi_tls.preemption_state.in_progress = true;
    390529
    391530        // Clear sighandler mask before context switching.
     
    397536        }
    398537
    399         // TODO: this should go in finish action
    400538        // Clear the in progress flag
    401         kernelTLS.preemption_state.in_progress = false;
     539        __cfaabi_tls.preemption_state.in_progress = false;
    402540
    403541        // Preemption can occur here
     
    416554        id.full_proc = false;
    417555        id.id = doregister(&id);
    418         kernelTLS.this_proc_id = &id;
     556        __cfaabi_tls.this_proc_id = &id;
    419557
    420558        // Block sigalrms to control when they arrive
     
    484622
    485623void __cfaabi_check_preemption() {
    486         bool ready = kernelTLS.preemption_state.enabled;
     624        bool ready = __preemption_enabled();
    487625        if(!ready) { abort("Preemption should be ready"); }
    488626
     
    507645#ifdef __CFA_WITH_VERIFY__
    508646bool __cfaabi_dbg_in_kernel() {
    509         return !kernelTLS.preemption_state.enabled;
     647        return !__preemption_enabled();
    510648}
    511649#endif
  • libcfa/src/concurrency/ready_queue.cfa

    rbe5e34b r8fc652e0  
    150150//  queues or removing them.
    151151uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
    152         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     152        /* paranoid */ verify( ! __preemption_enabled() );
    153153
    154154        // Step 1 : lock global lock
     
    166166        }
    167167
    168         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     168        /* paranoid */ verify( ! __preemption_enabled() );
    169169        return s;
    170170}
    171171
    172172void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
    173         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     173        /* paranoid */ verify( ! __preemption_enabled() );
    174174
    175175        // Step 1 : release local locks
     
    188188        __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
    189189
    190         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     190        /* paranoid */ verify( ! __preemption_enabled() );
    191191}
    192192
     
    252252                preferred =
    253253                        //*
    254                         kernelTLS.this_processor ? kernelTLS.this_processor->id * 4 : -1;
     254                        kernelTLS().this_processor ? kernelTLS().this_processor->id * 4 : -1;
    255255                        /*/
    256256                        thrd->link.preferred * 4;
     
    331331                // Don't bother trying locally too much
    332332                int local_tries = 8;
    333                 preferred = kernelTLS.this_processor->id * 4;
     333                preferred = kernelTLS().this_processor->id * 4;
    334334        #endif
    335335
Note: See TracChangeset for help on using the changeset viewer.