Ignore:
Timestamp:
Nov 6, 2020, 11:22:57 AM (3 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
54dcab1
Parents:
be5e34b
Message:

Change usage of TLS to more strongly segregate in kernel and out of kernel usage.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/preemption.cfa

    rbe5e34b r8fc652e0  
    164164//=============================================================================================
    165165
     166//----------
     167// special case for preemption since used often
     168bool __preemption_enabled() {
     169        // create a assembler label before
     170        // marked as clobber all to avoid movement
     171        asm volatile("__cfaasm_check_before:":::"memory");
     172
     173        // access tls as normal
     174        bool enabled = __cfaabi_tls.preemption_state.enabled;
     175
     176        // create a assembler label after
     177        // marked as clobber all to avoid movement
     178        asm volatile("__cfaasm_check_after:":::"memory");
     179        return enabled;
     180}
     181
     182//----------
     183// Get data from the TLS block
     184uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
     185uintptr_t __cfatls_get( unsigned long int offset ) {
     186        // create a assembler label before
     187        // marked as clobber all to avoid movement
     188        asm volatile("__cfaasm_get_before:":::"memory");
     189
     190        // access tls as normal (except for pointer arithmetic)
     191        uintptr_t val = *(uintptr_t*)((uintptr_t)&__cfaabi_tls + offset);
     192
     193        // create a assembler label after
     194        // marked as clobber all to avoid movement
     195        asm volatile("__cfaasm_get_after:":::"memory");
     196        return val;
     197}
     198
     199// //----------
     200// // Write data to the TLS block
     201// // sadly it looses the type information and can only write 1 word at a time
     202// // use with __builtin_offsetof
     203// void __cfatls_set(uintptr_t offset, void * value) __attribute__((__noinline__));
     204// void __cfatls_set(uintptr_t offset, void * value) {
     205//     // create a assembler label before
     206//     // marked as clobber all to avoid movement
     207//     asm volatile("__cfaasm_set_before:":::"memory");
     208
     209//     // access tls as normal (except for type information)
     210//     *(void**)(offset + (uintptr_t)&my_tls) = value;
     211
     212//     // create a assembler label after
     213//     // marked as clobber all to avoid movement
     214//     asm volatile("__cfaasm_set_after:":::"memory");
     215// }
     216
     217// //----------
     218// #include <stdio.h>
     219// int main() {
     220//     // Get the information
     221//     // Must use inline assembly to get access to label
     222//     // C is annoying here because this could easily be a static const but "initializer element is not a compile-time constant"
     223//     // The big advantage of this approach is that there is 0 overhead for the read and writes function
     224//     void * __cfaasm_addr_get_before = ({ void * value; asm("movq $__cfaasm_get_before, %[v]\n\t" : [v]"=r"(value) ); value; });
     225//     void * __cfaasm_addr_get_after  = ({ void * value; asm("movq $__cfaasm_get_after , %[v]\n\t" : [v]"=r"(value) ); value; });
     226//     void * __cfaasm_addr_set_before = ({ void * value; asm("movq $__cfaasm_set_before, %[v]\n\t" : [v]"=r"(value) ); value; });
     227//     void * __cfaasm_addr_set_after  = ({ void * value; asm("movq $__cfaasm_set_after , %[v]\n\t" : [v]"=r"(value) ); value; });
     228
     229//     printf("%p to %p\n", __cfaasm_addr_get_before, __cfaasm_addr_get_after);
     230//     printf("%p to %p\n", __cfaasm_addr_set_before, __cfaasm_addr_set_after);
     231//     return 0;
     232// }
     233
    166234__cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; )
    167235
     
    169237        // Disable interrupts by incrementing the counter
    170238        void disable_interrupts() {
    171                 with( kernelTLS.preemption_state ) {
     239                // create a assembler label before
     240                // marked as clobber all to avoid movement
     241                asm volatile("__cfaasm_disable_before:":::"memory");
     242
     243                with( __cfaabi_tls.preemption_state ) {
    172244                        #if GCC_VERSION > 50000
    173245                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     
    186258                        verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
    187259                }
     260
     261                // create a assembler label after
     262                // marked as clobber all to avoid movement
     263                asm volatile("__cfaasm_disable_after:":::"memory");
    188264        }
    189265
     
    191267        // If counter reaches 0, execute any pending __cfactx_switch
    192268        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    193                 processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
     269                // create a assembler label before
     270                // marked as clobber all to avoid movement
     271                asm volatile("__cfaasm_enable_before:":::"memory");
     272
     273                processor   * proc = __cfaabi_tls.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
    194274                /* paranoid */ verify( proc );
    195275
    196                 with( kernelTLS.preemption_state ){
     276                with( __cfaabi_tls.preemption_state ){
    197277                        unsigned short prev = disable_count;
    198278                        disable_count -= 1;
     
    221301                // For debugging purposes : keep track of the last person to enable the interrupts
    222302                __cfaabi_dbg_debug_do( proc->last_enable = caller; )
     303
     304                // create a assembler label after
     305                // marked as clobber all to avoid movement
     306                asm volatile("__cfaasm_enable_after:":::"memory");
    223307        }
    224308
     
    226310        // Don't execute any pending __cfactx_switch even if counter reaches 0
    227311        void enable_interrupts_noPoll() {
    228                 unsigned short prev = kernelTLS.preemption_state.disable_count;
    229                 kernelTLS.preemption_state.disable_count -= 1;
     312                // create a assembler label before
     313                // marked as clobber all to avoid movement
     314                asm volatile("__cfaasm_nopoll_before:":::"memory");
     315
     316                unsigned short prev = __cfaabi_tls.preemption_state.disable_count;
     317                __cfaabi_tls.preemption_state.disable_count -= 1;
    230318                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    231319                if( prev == 1 ) {
    232320                        #if GCC_VERSION > 50000
    233                         static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
     321                        static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free");
    234322                        #endif
    235323                        // Set enabled flag to true
    236324                        // should be atomic to avoid preemption in the middle of the operation.
    237325                        // use memory order RELAXED since there is no inter-thread on this variable requirements
    238                         __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);
     326                        __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED);
    239327
    240328                        // Signal the compiler that a fence is needed but only for signal handlers
    241329                        __atomic_signal_fence(__ATOMIC_RELEASE);
    242330                }
     331
     332                // create a assembler label after
     333                // marked as clobber all to avoid movement
     334                asm volatile("__cfaasm_nopoll_after:":::"memory");
    243335        }
    244336}
     
    275367static void timeout( $thread * this ) {
    276368        #if !defined( __CFA_NO_STATISTICS__ )
    277                 kernelTLS.this_stats = this->curr_cluster->stats;
     369                kernelTLS().this_stats = this->curr_cluster->stats;
    278370        #endif
    279371        unpark( this );
     
    286378static inline bool preemption_ready() {
    287379        // Check if preemption is safe
    288         bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;
     380        bool ready = __cfaabi_tls.preemption_state.enabled && ! __cfaabi_tls.preemption_state.in_progress;
    289381
    290382        // Adjust the pending flag accordingly
    291         kernelTLS.this_processor->pending_preemption = !ready;
     383        __cfaabi_tls.this_processor->pending_preemption = !ready;
    292384        return ready;
    293385}
     
    303395
    304396        // Start with preemption disabled until ready
    305         kernelTLS.preemption_state.enabled = false;
    306         kernelTLS.preemption_state.disable_count = 1;
     397        __cfaabi_tls.preemption_state.enabled = false;
     398        __cfaabi_tls.preemption_state.disable_count = 1;
    307399
    308400        // Initialize the event kernel
     
    362454// Kernel Signal Handlers
    363455//=============================================================================================
     456struct asm_region {
     457        void * before;
     458        void * after;
     459};
     460
     461//-----------------------------------------------------------------------------
     462// Some assembly required
     463#if defined( __i386 )
     464        #define __cfaasm_label( label ) \
     465                ({ \
     466                        struct asm_region region; \
     467                        asm( \
     468                                "movl $__cfaasm_" #label "_before, %[vb]\n\t" \
     469                                "movl $__cfaasm_" #label "_after , %[va]\n\t" \
     470                                 : [vb]"=r"(region.before), [vb]"=r"(region.before) \
     471                        ); \
     472                        region; \
     473                });
     474#elif defined( __x86_64 )
     475        #ifdef __PIC__
     476                #define PLT "@PLT"
     477        #else
     478                #define PLT ""
     479        #endif
     480        #define __cfaasm_label( label ) \
     481                ({ \
     482                        struct asm_region region; \
     483                        asm( \
     484                                "movq $__cfaasm_" #label "_before" PLT ", %[vb]\n\t" \
     485                                "movq $__cfaasm_" #label "_after"  PLT ", %[va]\n\t" \
     486                                 : [vb]"=r"(region.before), [va]"=r"(region.after) \
     487                        ); \
     488                        region; \
     489                });
     490#elif defined( __aarch64__ )
     491        #error __cfaasm_label undefined for arm
     492#else
     493        #error unknown hardware architecture
     494#endif
    364495
    365496// Context switch signal handler
    366497// Receives SIGUSR1 signal and causes the current thread to yield
    367498static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
    368         __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); )
     499        void * ip = (void *)(cxt->uc_mcontext.CFA_REG_IP);
     500        __cfaabi_dbg_debug_do( last_interrupt = ip; )
    369501
    370502        // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it,
     
    372504        // before the kernel thread has even started running. When that happens, an interrupt
    373505        // with a null 'this_processor' will be caught, just ignore it.
    374         if(! kernelTLS.this_processor ) return;
     506        if(! __cfaabi_tls.this_processor ) return;
    375507
    376508        choose(sfp->si_value.sival_int) {
    377509                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    378                 case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
     510                case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
    379511                default:
    380512                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    384516        if( !preemption_ready() ) { return; }
    385517
    386         __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
     518        struct asm_region region;
     519        region = __cfaasm_label( get     ); if( ip >= region.before && ip <= region.after ) return;
     520        region = __cfaasm_label( check   ); if( ip >= region.before && ip <= region.after ) return;
     521        region = __cfaasm_label( disable ); if( ip >= region.before && ip <= region.after ) return;
     522        region = __cfaasm_label( enable  ); if( ip >= region.before && ip <= region.after ) return;
     523        region = __cfaasm_label( nopoll  ); if( ip >= region.before && ip <= region.after ) return;
     524
     525        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
    387526
    388527        // Sync flag : prevent recursive calls to the signal handler
    389         kernelTLS.preemption_state.in_progress = true;
     528        __cfaabi_tls.preemption_state.in_progress = true;
    390529
    391530        // Clear sighandler mask before context switching.
     
    397536        }
    398537
    399         // TODO: this should go in finish action
    400538        // Clear the in progress flag
    401         kernelTLS.preemption_state.in_progress = false;
     539        __cfaabi_tls.preemption_state.in_progress = false;
    402540
    403541        // Preemption can occur here
     
    416554        id.full_proc = false;
    417555        id.id = doregister(&id);
    418         kernelTLS.this_proc_id = &id;
     556        __cfaabi_tls.this_proc_id = &id;
    419557
    420558        // Block sigalrms to control when they arrive
     
    484622
    485623void __cfaabi_check_preemption() {
    486         bool ready = kernelTLS.preemption_state.enabled;
     624        bool ready = __preemption_enabled();
    487625        if(!ready) { abort("Preemption should be ready"); }
    488626
     
    507645#ifdef __CFA_WITH_VERIFY__
    508646bool __cfaabi_dbg_in_kernel() {
    509         return !kernelTLS.preemption_state.enabled;
     647        return !__preemption_enabled();
    510648}
    511649#endif
Note: See TracChangeset for help on using the changeset viewer.