Changeset b10affd


Ignore:
Timestamp:
Mar 30, 2018, 7:21:28 PM (4 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
aaron-thesis, arm-eh, cleanup-dtors, deferred_resn, demangler, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, with_gc
Children:
273cde6
Parents:
fae90d5
Message:

thread-local storage converted to structure and thread-local macros for access

Location:
src/libcfa
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/bits/locks.h

    rfae90d5 rb10affd  
    1010// Created On       : Tue Oct 31 15:14:38 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Dec  8 16:02:22 2017
    13 // Update Count     : 1
     12// Last Modified On : Fri Mar 30 18:18:13 2018
     13// Update Count     : 9
    1414//
    1515
     
    6464
    6565        extern void yield( unsigned int );
    66         extern thread_local struct thread_desc *    volatile this_thread;
    67         extern thread_local struct processor *      volatile this_processor;
    6866
    6967        static inline void ?{}( __spinlock_t & this ) {
     
    7674                if( result ) {
    7775                        disable_interrupts();
    78                         __cfaabi_dbg_debug_do(
    79                                 this.prev_name = caller;
    80                                 this.prev_thrd = this_thread;
    81                         )
     76                        // __cfaabi_dbg_debug_do(
     77                        //      this.prev_name = caller;
     78                        //      this.prev_thrd = TL_GET( this_thread );
     79                        // )
    8280                }
    8381                return result;
     
    107105                }
    108106                disable_interrupts();
    109                 __cfaabi_dbg_debug_do(
    110                         this.prev_name = caller;
    111                         this.prev_thrd = this_thread;
    112                 )
     107                // __cfaabi_dbg_debug_do(
     108                //      this.prev_name = caller;
     109                //      this.prev_thrd = TL_GET( this_thread );
     110                // )
    113111        }
    114112
  • src/libcfa/concurrency/coroutine

    rfae90d5 rb10affd  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Aug 30 07:58:29 2017
    13 // Update Count     : 3
     12// Last Modified On : Fri Mar 30 18:23:45 2018
     13// Update Count     : 8
    1414//
    1515
     
    6060}
    6161
    62 // Get current coroutine
    63 extern thread_local coroutine_desc * volatile this_coroutine;
    64 
    6562// Private wrappers for context switch and stack creation
    6663extern void CoroutineCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
     
    6966// Suspend implementation inlined for performance
    7067static inline void suspend() {
    71         coroutine_desc * src = this_coroutine;          // optimization
     68        coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
    7269
    7370        assertf( src->last != 0,
     
    8683forall(dtype T | is_coroutine(T))
    8784static inline void resume(T & cor) {
    88         coroutine_desc * src = this_coroutine;          // optimization
     85        coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
    8986        coroutine_desc * dst = get_coroutine(cor);
    9087
     
    111108
    112109static inline void resume(coroutine_desc * dst) {
    113         coroutine_desc * src = this_coroutine;          // optimization
     110        coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
    114111
    115112        // not resuming self ?
  • src/libcfa/concurrency/coroutine.c

    rfae90d5 rb10affd  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Feb  8 16:10:31 2018
    13 // Update Count     : 4
     12// Last Modified On : Fri Mar 30 17:20:57 2018
     13// Update Count     : 9
    1414//
    1515
     
    9999// Wrapper for co
    100100void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    101         verify( preemption_state.enabled || this_processor->do_terminate );
     101        verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate );
    102102        disable_interrupts();
    103103
     
    106106
    107107        // set new coroutine that task is executing
    108         this_coroutine = dst;
     108        TL_SET( this_coroutine, dst );
    109109
    110110        // context switch to specified coroutine
     
    117117
    118118        enable_interrupts( __cfaabi_dbg_ctx );
    119         verify( preemption_state.enabled || this_processor->do_terminate );
     119        verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate );
    120120} //ctxSwitchDirect
    121121
     
    172172
    173173        void __leave_coroutine(void) {
    174                 coroutine_desc * src = this_coroutine;          // optimization
     174                coroutine_desc * src = TL_GET( this_coroutine ); // optimization
    175175
    176176                assertf( src->starter != 0,
  • src/libcfa/concurrency/invoke.h

    rfae90d5 rb10affd  
    1010// Created On       : Tue Jan 17 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Feb  9 14:41:55 2018
    13 // Update Count     : 6
     12// Last Modified On : Fri Mar 30 14:28:31 2018
     13// Update Count     : 29
    1414//
    1515
     
    1717#include "bits/defs.h"
    1818#include "bits/locks.h"
     19
     20#define TL_GET( member ) kernelThreadData.member
     21#define TL_SET( member, value ) kernelThreadData.member = value;
    1922
    2023#ifdef __cforall
     
    3033                static inline struct thread_desc             * & get_next( struct thread_desc             & this );
    3134                static inline struct __condition_criterion_t * & get_next( struct __condition_criterion_t & this );
     35
     36                extern thread_local struct KernelThreadData {
     37                        struct coroutine_desc * volatile this_coroutine;
     38                        struct thread_desc    * volatile this_thread;
     39                        struct processor      * volatile this_processor;
     40
     41                        struct {
     42                                volatile unsigned short disable_count;
     43                                volatile bool enabled;
     44                                volatile bool in_progress;
     45                        } preemption_state;
     46                } kernelThreadData;
    3247        }
    3348        #endif
    3449
     50        static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); }
     51        static inline struct thread_desc * volatile active_thread() { return TL_GET( this_thread ); }
     52        static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); }
     53
    3554        struct coStack_t {
    36                 // size of stack
    37                 size_t size;
    38 
    39                 // pointer to stack
    40                 void *storage;
    41 
    42                 // stack grows towards stack limit
    43                 void *limit;
    44 
    45                 // base of stack
    46                 void *base;
    47 
    48                 // address of cfa_context_t
    49                 void *context;
    50 
    51                 // address of top of storage
    52                 void *top;
    53 
    54                 // whether or not the user allocated the stack
    55                 bool userStack;
     55                size_t size;                                                                    // size of stack
     56                void * storage;                                                                 // pointer to stack
     57                void * limit;                                                                   // stack grows towards stack limit
     58                void * base;                                                                    // base of stack
     59                void * context;                                                                 // address of cfa_context_t
     60                void * top;                                                                             // address of top of storage
     61                bool userStack;                                                                 // whether or not the user allocated the stack
    5662        };
    5763
     
    5965
    6066        struct coroutine_desc {
    61                 // stack information of the coroutine
    62                 struct coStack_t stack;
    63 
    64                 // textual name for coroutine/task, initialized by uC++ generated code
    65                 const char *name;
    66 
    67                 // copy of global UNIX variable errno
    68                 int errno_;
    69 
    70                 // current execution status for coroutine
    71                 enum coroutine_state state;
    72 
    73                 // first coroutine to resume this one
    74                 struct coroutine_desc * starter;
    75 
    76                 // last coroutine to resume this one
    77                 struct coroutine_desc * last;
     67                struct coStack_t stack;                                                 // stack information of the coroutine
     68                const char * name;                                                              // textual name for coroutine/task, initialized by uC++ generated code
     69                int errno_;                                                                             // copy of global UNIX variable errno
     70                enum coroutine_state state;                                             // current execution status for coroutine
     71                struct coroutine_desc * starter;                                // first coroutine to resume this one
     72                struct coroutine_desc * last;                                   // last coroutine to resume this one
    7873        };
    7974
  • src/libcfa/concurrency/kernel.c

    rfae90d5 rb10affd  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Feb  8 23:52:19 2018
    13 // Update Count     : 5
     12// Last Modified On : Fri Mar 30 18:26:11 2018
     13// Update Count     : 23
    1414//
    1515
     
    5252// Global state
    5353
    54 thread_local coroutine_desc * volatile this_coroutine;
    55 thread_local thread_desc *    volatile this_thread;
    56 thread_local processor *      volatile this_processor;
    57 
    5854// volatile thread_local bool preemption_in_progress = 0;
    5955// volatile thread_local bool preemption_enabled = false;
    6056// volatile thread_local unsigned short disable_preempt_count = 1;
    6157
    62 volatile thread_local __cfa_kernel_preemption_state_t preemption_state = { false, false, 1 };
     58thread_local struct KernelThreadData kernelThreadData = {
     59        NULL,
     60        NULL,
     61        NULL,
     62        { 1, false, false }
     63};
    6364
    6465//-----------------------------------------------------------------------------
     
    172173                terminate(&this);
    173174                verify(this.do_terminate);
    174                 verify(this_processor != &this);
     175                verify(TL_GET( this_processor ) != &this);
    175176                P( terminated );
    176                 verify(this_processor != &this);
     177                verify(TL_GET( this_processor ) != &this);
    177178                pthread_join( kernel_thread, NULL );
    178179        }
     
    213214                        if(readyThread)
    214215                        {
    215                                 verify( !preemption_state.enabled );
     216                                verify( ! TL_GET( preemption_state ).enabled );
    216217
    217218                                runThread(this, readyThread);
    218219
    219                                 verify( !preemption_state.enabled );
     220                                verify( ! TL_GET( preemption_state ).enabled );
    220221
    221222                                //Some actions need to be taken from the kernel
     
    249250
    250251        //Update global state
    251         this_thread = dst;
     252        TL_SET( this_thread, dst );
    252253
    253254        // Context Switch to the thread
     
    257258
    258259void returnToKernel() {
    259         coroutine_desc * proc_cor = get_coroutine(this_processor->runner);
    260         coroutine_desc * thrd_cor = this_thread->curr_cor = this_coroutine;
     260        coroutine_desc * proc_cor = get_coroutine(TL_GET( this_processor )->runner);
     261        coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine );
    261262        ThreadCtxSwitch(thrd_cor, proc_cor);
    262263}
     
    266267void finishRunning(processor * this) with( this->finish ) {
    267268        if( action_code == Release ) {
    268                 verify( !preemption_state.enabled );
     269                verify( ! TL_GET( preemption_state ).enabled );
    269270                unlock( *lock );
    270271        }
     
    273274        }
    274275        else if( action_code == Release_Schedule ) {
    275                 verify( !preemption_state.enabled );
     276                verify( ! TL_GET( preemption_state ).enabled );
    276277                unlock( *lock );
    277278                ScheduleThread( thrd );
    278279        }
    279280        else if( action_code == Release_Multi ) {
    280                 verify( !preemption_state.enabled );
     281                verify( ! TL_GET( preemption_state ).enabled );
    281282                for(int i = 0; i < lock_count; i++) {
    282283                        unlock( *locks[i] );
     
    307308void * CtxInvokeProcessor(void * arg) {
    308309        processor * proc = (processor *) arg;
    309         this_processor = proc;
    310         this_coroutine = NULL;
    311         this_thread = NULL;
    312         preemption_state.enabled = false;
    313         preemption_state.disable_count = 1;
     310        TL_SET( this_processor, proc );
     311        TL_SET( this_coroutine, NULL );
     312        TL_SET( this_thread, NULL );
     313        TL_GET( preemption_state ).enabled = false;
     314        TL_GET( preemption_state ).disable_count = 1;
    314315        // SKULLDUGGERY: We want to create a context for the processor coroutine
    315316        // which is needed for the 2-step context switch. However, there is no reason
     
    323324
    324325        //Set global state
    325         this_coroutine = get_coroutine(proc->runner);
    326         this_thread = NULL;
     326        TL_SET( this_coroutine, get_coroutine(proc->runner) );
     327        TL_SET( this_thread, NULL );
    327328
    328329        //We now have a proper context from which to schedule threads
     
    352353
    353354void kernel_first_resume(processor * this) {
    354         coroutine_desc * src = this_coroutine;
     355        coroutine_desc * src = TL_GET( this_coroutine );
    355356        coroutine_desc * dst = get_coroutine(this->runner);
    356357
    357         verify( !preemption_state.enabled );
     358        verify( ! TL_GET( preemption_state ).enabled );
    358359
    359360        create_stack(&dst->stack, dst->stack.size);
    360361        CtxStart(&this->runner, CtxInvokeCoroutine);
    361362
    362         verify( !preemption_state.enabled );
     363        verify( ! TL_GET( preemption_state ).enabled );
    363364
    364365        dst->last = src;
     
    369370
    370371        // set new coroutine that task is executing
    371         this_coroutine = dst;
     372        TL_SET( this_coroutine, dst );
    372373
    373374        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     
    386387        src->state = Active;
    387388
    388         verify( !preemption_state.enabled );
     389        verify( ! TL_GET( preemption_state ).enabled );
    389390}
    390391
     
    392393// Scheduler routines
    393394void ScheduleThread( thread_desc * thrd ) {
    394         // if( !thrd ) return;
     395        // if( ! thrd ) return;
    395396        verify( thrd );
    396397        verify( thrd->self_cor.state != Halted );
    397398
    398         verify( !preemption_state.enabled );
     399        verify( ! TL_GET( preemption_state ).enabled );
    399400
    400401        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    401402
    402         with( *this_processor->cltr ) {
     403        with( *TL_GET( this_processor )->cltr ) {
    403404                lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
    404405                append( ready_queue, thrd );
     
    406407        }
    407408
    408         verify( !preemption_state.enabled );
     409        verify( ! TL_GET( preemption_state ).enabled );
    409410}
    410411
    411412thread_desc * nextThread(cluster * this) with( *this ) {
    412         verify( !preemption_state.enabled );
     413        verify( ! TL_GET( preemption_state ).enabled );
    413414        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    414415        thread_desc * head = pop_head( ready_queue );
    415416        unlock( ready_queue_lock );
    416         verify( !preemption_state.enabled );
     417        verify( ! TL_GET( preemption_state ).enabled );
    417418        return head;
    418419}
     
    420421void BlockInternal() {
    421422        disable_interrupts();
    422         verify( !preemption_state.enabled );
     423        verify( ! TL_GET( preemption_state ).enabled );
    423424        returnToKernel();
    424         verify( !preemption_state.enabled );
     425        verify( ! TL_GET( preemption_state ).enabled );
    425426        enable_interrupts( __cfaabi_dbg_ctx );
    426427}
     
    428429void BlockInternal( __spinlock_t * lock ) {
    429430        disable_interrupts();
    430         this_processor->finish.action_code = Release;
    431         this_processor->finish.lock        = lock;
    432 
    433         verify( !preemption_state.enabled );
     431        TL_GET( this_processor )->finish.action_code = Release;
     432        TL_GET( this_processor )->finish.lock        = lock;
     433
     434        verify( ! TL_GET( preemption_state ).enabled );
    434435        returnToKernel();
    435         verify( !preemption_state.enabled );
     436        verify( ! TL_GET( preemption_state ).enabled );
    436437
    437438        enable_interrupts( __cfaabi_dbg_ctx );
     
    440441void BlockInternal( thread_desc * thrd ) {
    441442        disable_interrupts();
    442         this_processor->finish.action_code = Schedule;
    443         this_processor->finish.thrd        = thrd;
    444 
    445         verify( !preemption_state.enabled );
     443        TL_GET( this_processor )->finish.action_code = Schedule;
     444        TL_GET( this_processor )->finish.thrd        = thrd;
     445
     446        verify( ! TL_GET( preemption_state ).enabled );
    446447        returnToKernel();
    447         verify( !preemption_state.enabled );
     448        verify( ! TL_GET( preemption_state ).enabled );
    448449
    449450        enable_interrupts( __cfaabi_dbg_ctx );
     
    453454        assert(thrd);
    454455        disable_interrupts();
    455         this_processor->finish.action_code = Release_Schedule;
    456         this_processor->finish.lock        = lock;
    457         this_processor->finish.thrd        = thrd;
    458 
    459         verify( !preemption_state.enabled );
     456        TL_GET( this_processor )->finish.action_code = Release_Schedule;
     457        TL_GET( this_processor )->finish.lock        = lock;
     458        TL_GET( this_processor )->finish.thrd        = thrd;
     459
     460        verify( ! TL_GET( preemption_state ).enabled );
    460461        returnToKernel();
    461         verify( !preemption_state.enabled );
     462        verify( ! TL_GET( preemption_state ).enabled );
    462463
    463464        enable_interrupts( __cfaabi_dbg_ctx );
     
    466467void BlockInternal(__spinlock_t * locks [], unsigned short count) {
    467468        disable_interrupts();
    468         this_processor->finish.action_code = Release_Multi;
    469         this_processor->finish.locks       = locks;
    470         this_processor->finish.lock_count  = count;
    471 
    472         verify( !preemption_state.enabled );
     469        TL_GET( this_processor )->finish.action_code = Release_Multi;
     470        TL_GET( this_processor )->finish.locks       = locks;
     471        TL_GET( this_processor )->finish.lock_count  = count;
     472
     473        verify( ! TL_GET( preemption_state ).enabled );
    473474        returnToKernel();
    474         verify( !preemption_state.enabled );
     475        verify( ! TL_GET( preemption_state ).enabled );
    475476
    476477        enable_interrupts( __cfaabi_dbg_ctx );
     
    479480void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    480481        disable_interrupts();
    481         this_processor->finish.action_code = Release_Multi_Schedule;
    482         this_processor->finish.locks       = locks;
    483         this_processor->finish.lock_count  = lock_count;
    484         this_processor->finish.thrds       = thrds;
    485         this_processor->finish.thrd_count  = thrd_count;
    486 
    487         verify( !preemption_state.enabled );
     482        TL_GET( this_processor )->finish.action_code = Release_Multi_Schedule;
     483        TL_GET( this_processor )->finish.locks       = locks;
     484        TL_GET( this_processor )->finish.lock_count  = lock_count;
     485        TL_GET( this_processor )->finish.thrds       = thrds;
     486        TL_GET( this_processor )->finish.thrd_count  = thrd_count;
     487
     488        verify( ! TL_GET( preemption_state ).enabled );
    488489        returnToKernel();
    489         verify( !preemption_state.enabled );
     490        verify( ! TL_GET( preemption_state ).enabled );
    490491
    491492        enable_interrupts( __cfaabi_dbg_ctx );
     
    493494
    494495void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
    495         verify( !preemption_state.enabled );
    496         this_processor->finish.action_code = thrd ? Release_Schedule : Release;
    497         this_processor->finish.lock        = lock;
    498         this_processor->finish.thrd        = thrd;
     496        verify( ! TL_GET( preemption_state ).enabled );
     497        TL_GET( this_processor )->finish.action_code = thrd ? Release_Schedule : Release;
     498        TL_GET( this_processor )->finish.lock        = lock;
     499        TL_GET( this_processor )->finish.thrd        = thrd;
    499500
    500501        returnToKernel();
     
    507508// Kernel boot procedures
    508509void kernel_startup(void) {
    509         verify( !preemption_state.enabled );
     510        verify( ! TL_GET( preemption_state ).enabled );
    510511        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    511512
     
    531532
    532533        //initialize the global state variables
    533         this_processor = mainProcessor;
    534         this_thread = mainThread;
    535         this_coroutine = &mainThread->self_cor;
     534        TL_SET( this_processor, mainProcessor );
     535        TL_SET( this_thread, mainThread );
     536        TL_SET( this_coroutine, &mainThread->self_cor );
    536537
    537538        // Enable preemption
     
    545546        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    546547        // mainThread is on the ready queue when this call is made.
    547         kernel_first_resume( this_processor );
     548        kernel_first_resume( TL_GET( this_processor ) );
    548549
    549550
     
    552553        __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
    553554
    554         verify( !preemption_state.enabled );
     555        verify( ! TL_GET( preemption_state ).enabled );
    555556        enable_interrupts( __cfaabi_dbg_ctx );
    556         verify( preemption_state.enabled );
     557        verify( TL_GET( preemption_state ).enabled );
    557558}
    558559
     
    560561        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    561562
    562         verify( preemption_state.enabled );
     563        verify( TL_GET( preemption_state ).enabled );
    563564        disable_interrupts();
    564         verify( !preemption_state.enabled );
     565        verify( ! TL_GET( preemption_state ).enabled );
    565566
    566567        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
     
    602603
    603604        // first task to abort ?
    604         if ( !kernel_abort_called ) {                   // not first task to abort ?
     605        if ( ! kernel_abort_called ) {                  // not first task to abort ?
    605606                kernel_abort_called = true;
    606607                unlock( kernel_abort_lock );
     
    617618        }
    618619
    619         return this_thread;
     620        return TL_GET( this_thread );
    620621}
    621622
     
    626627        __cfaabi_dbg_bits_write( abort_text, len );
    627628
    628         if ( thrd != this_coroutine ) {
    629                 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine );
     629        if ( thrd != TL_GET( this_coroutine ) ) {
     630                len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) );
    630631                __cfaabi_dbg_bits_write( abort_text, len );
    631632        }
     
    636637
    637638int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
    638         return get_coroutine(this_thread) == get_coroutine(mainThread) ? 4 : 2;
     639        return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2;
    639640}
    640641
     
    666667        if ( count < 0 ) {
    667668                // queue current task
    668                 append( waiting, (thread_desc *)this_thread );
     669                append( waiting, (thread_desc *)TL_GET( this_thread ) );
    669670
    670671                // atomically release spin lock and block
  • src/libcfa/concurrency/kernel_private.h

    rfae90d5 rb10affd  
    1010// Created On       : Mon Feb 13 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Jul 22 09:58:09 2017
    13 // Update Count     : 2
     12// Last Modified On : Thu Mar 29 14:06:40 2018
     13// Update Count     : 3
    1414//
    1515
     
    6666extern event_kernel_t * event_kernel;
    6767
    68 extern thread_local coroutine_desc * volatile this_coroutine;
    69 extern thread_local thread_desc *    volatile this_thread;
    70 extern thread_local processor *      volatile this_processor;
     68//extern thread_local coroutine_desc * volatile this_coroutine;
     69//extern thread_local thread_desc *    volatile this_thread;
     70//extern thread_local processor *      volatile this_processor;
    7171
    7272// extern volatile thread_local bool preemption_in_progress;
  • src/libcfa/concurrency/monitor.c

    rfae90d5 rb10affd  
    1010// Created On       : Thd Feb 23 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Feb 16 14:49:53 2018
    13 // Update Count     : 5
     12// Last Modified On : Fri Mar 30 14:30:26 2018
     13// Update Count     : 9
    1414//
    1515
     
    8585                // Lock the monitor spinlock
    8686                lock( this->lock __cfaabi_dbg_ctx2 );
    87                 thread_desc * thrd = this_thread;
     87                thread_desc * thrd = TL_GET( this_thread );
    8888
    8989                __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     
    134134                // Lock the monitor spinlock
    135135                lock( this->lock __cfaabi_dbg_ctx2 );
    136                 thread_desc * thrd = this_thread;
     136                thread_desc * thrd = TL_GET( this_thread );
    137137
    138138                __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     
    168168
    169169                        // Create the node specific to this wait operation
    170                         wait_ctx_primed( this_thread, 0 )
     170                        wait_ctx_primed( TL_GET( this_thread ), 0 )
    171171
    172172                        // Some one else has the monitor, wait for him to finish and then run
     
    179179                        __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    180180
    181                         wait_ctx( this_thread, 0 )
     181                        wait_ctx( TL_GET( this_thread ), 0 )
    182182                        this->dtor_node = &waiter;
    183183
     
    199199                lock( this->lock __cfaabi_dbg_ctx2 );
    200200
    201                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
    202 
    203                 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this );
     201                __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", TL_GET( this_thread ), this, this->owner);
     202
     203                verifyf( TL_GET( this_thread ) == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", TL_GET( this_thread ), this->owner, this->recursion, this );
    204204
    205205                // Leaving a recursion level, decrement the counter
     
    227227        void __leave_dtor_monitor_desc( monitor_desc * this ) {
    228228                __cfaabi_dbg_debug_do(
    229                         if( this_thread != this->owner ) {
    230                                 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner);
     229                        if( TL_GET( this_thread ) != this->owner ) {
     230                                abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    231231                        }
    232232                        if( this->recursion != 1 ) {
     
    297297
    298298        // Save previous thread context
    299         this.prev = this_thread->monitors;
     299        this.prev = TL_GET( this_thread )->monitors;
    300300
    301301        // Update thread context (needed for conditions)
    302         (this_thread->monitors){m, count, func};
     302        (TL_GET( this_thread )->monitors){m, count, func};
    303303
    304304        // __cfaabi_dbg_print_safe( "MGUARD : enter %d\n", count);
     
    322322
    323323        // Restore thread context
    324         this_thread->monitors = this.prev;
     324        TL_GET( this_thread )->monitors = this.prev;
    325325}
    326326
     
    332332
    333333        // Save previous thread context
    334         this.prev = this_thread->monitors;
     334        this.prev = TL_GET( this_thread )->monitors;
    335335
    336336        // Update thread context (needed for conditions)
    337         (this_thread->monitors){m, 1, func};
     337        (TL_GET( this_thread )->monitors){m, 1, func};
    338338
    339339        __enter_monitor_dtor( this.m, func );
     
    346346
    347347        // Restore thread context
    348         this_thread->monitors = this.prev;
     348        TL_GET( this_thread )->monitors = this.prev;
    349349}
    350350
     
    386386
    387387        // Create the node specific to this wait operation
    388         wait_ctx( this_thread, user_info );
     388        wait_ctx( TL_GET( this_thread ), user_info );
    389389
    390390        // Append the current wait operation to the ones already queued on the condition
     
    425425        //Some more checking in debug
    426426        __cfaabi_dbg_debug_do(
    427                 thread_desc * this_thrd = this_thread;
     427                thread_desc * this_thrd = TL_GET( this_thread );
    428428                if ( this.monitor_count != this_thrd->monitors.size ) {
    429429                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    473473
    474474        // Create the node specific to this wait operation
    475         wait_ctx_primed( this_thread, 0 )
     475        wait_ctx_primed( TL_GET( this_thread ), 0 )
    476476
    477477        //save contexts
     
    566566
    567567                                // Create the node specific to this wait operation
    568                                 wait_ctx_primed( this_thread, 0 );
     568                                wait_ctx_primed( TL_GET( this_thread ), 0 );
    569569
    570570                                // Save monitor states
     
    612612
    613613        // Create the node specific to this wait operation
    614         wait_ctx_primed( this_thread, 0 );
     614        wait_ctx_primed( TL_GET( this_thread ), 0 );
    615615
    616616        monitor_save;
     
    618618
    619619        for( __lock_size_t i = 0; i < count; i++) {
    620                 verify( monitors[i]->owner == this_thread );
     620                verify( monitors[i]->owner == TL_GET( this_thread ) );
    621621        }
    622622
     
    812812
    813813static inline void brand_condition( condition & this ) {
    814         thread_desc * thrd = this_thread;
     814        thread_desc * thrd = TL_GET( this_thread );
    815815        if( !this.monitors ) {
    816816                // __cfaabi_dbg_print_safe( "Branding\n" );
  • src/libcfa/concurrency/preemption.c

    rfae90d5 rb10affd  
    1010// Created On       : Mon Jun 5 14:20:42 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Mar 27 11:28:51 2018
    13 // Update Count     : 24
     12// Last Modified On : Fri Mar 30 17:27:43 2018
     13// Update Count     : 31
    1414//
    1515
     
    150150        // Disable interrupts by incrementing the counter
    151151        void disable_interrupts() {
    152                 preemption_state.enabled = false;
    153                 __attribute__((unused)) unsigned short new_val = preemption_state.disable_count + 1;
    154                 preemption_state.disable_count = new_val;
     152                TL_GET( preemption_state ).enabled = false;
     153                __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1;
     154                TL_GET( preemption_state ).disable_count = new_val;
    155155                verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
    156156        }
     
    159159        // If counter reaches 0, execute any pending CtxSwitch
    160160        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    161                 processor   * proc = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
    162                 thread_desc * thrd = this_thread;         // Cache the thread now since interrupts can start happening after the atomic add
    163 
    164                 unsigned short prev = preemption_state.disable_count;
    165                 preemption_state.disable_count -= 1;
     161                processor   * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add
     162                thread_desc * thrd = TL_GET( this_thread );       // Cache the thread now since interrupts can start happening after the atomic add
     163
     164                unsigned short prev = TL_GET( preemption_state ).disable_count;
     165                TL_GET( preemption_state ).disable_count -= 1;
    166166                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
    167167
    168168                // Check if we need to prempt the thread because an interrupt was missed
    169169                if( prev == 1 ) {
    170                         preemption_state.enabled = true;
     170                        TL_GET( preemption_state ).enabled = true;
    171171                        if( proc->pending_preemption ) {
    172172                                proc->pending_preemption = false;
     
    182182        // Don't execute any pending CtxSwitch even if counter reaches 0
    183183        void enable_interrupts_noPoll() {
    184                 unsigned short prev = preemption_state.disable_count;
    185                 preemption_state.disable_count -= 1;
     184                unsigned short prev = TL_GET( preemption_state ).disable_count;
     185                TL_GET( preemption_state ).disable_count -= 1;
    186186                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    187187                if( prev == 1 ) {
    188                         preemption_state.enabled = true;
     188                        TL_GET( preemption_state ).enabled = true;
    189189                }
    190190        }
     
    236236// If false : preemption is unsafe and marked as pending
    237237static inline bool preemption_ready() {
    238         bool ready = preemption_state.enabled && !preemption_state.in_progress; // Check if preemption is safe
    239         this_processor->pending_preemption = !ready;                        // Adjust the pending flag accordingly
     238        bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe
     239        TL_GET( this_processor )->pending_preemption = !ready;                  // Adjust the pending flag accordingly
    240240        return ready;
    241241}
     
    251251
    252252        // Start with preemption disabled until ready
    253         preemption_state.enabled = false;
    254         preemption_state.disable_count = 1;
     253        TL_GET( preemption_state ).enabled = false;
     254        TL_GET( preemption_state ).disable_count = 1;
    255255
    256256        // Initialize the event kernel
     
    317317        // before the kernel thread has even started running. When that happens an iterrupt
    318318        // we a null 'this_processor' will be caught, just ignore it.
    319         if(!this_processor) return;
     319        if(!TL_GET( this_processor )) return;
    320320
    321321        choose(sfp->si_value.sival_int) {
    322322                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    323                 case PREEMPT_TERMINATE: verify(this_processor->do_terminate);
     323                case PREEMPT_TERMINATE: verify(TL_GET( this_processor )->do_terminate);
    324324                default:
    325325                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    331331        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread);
    332332
    333         preemption_state.in_progress = true;                      // Sync flag : prevent recursive calls to the signal handler
     333        TL_GET( preemption_state ).in_progress = true;  // Sync flag : prevent recursive calls to the signal handler
    334334        signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
    335         preemption_state.in_progress = false;                    // Clear the in progress flag
     335        TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag
    336336
    337337        // Preemption can occur here
    338338
    339         BlockInternal( (thread_desc*)this_thread );        // Do the actual CtxSwitch
     339        BlockInternal( (thread_desc*)TL_GET( this_thread ) ); // Do the actual CtxSwitch
    340340}
    341341
  • src/libcfa/concurrency/thread

    rfae90d5 rb10affd  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Jul 22 09:59:40 2017
    13 // Update Count     : 3
     12// Last Modified On : Thu Mar 29 14:07:11 2018
     13// Update Count     : 4
    1414//
    1515
     
    5252}
    5353
    54 extern thread_local thread_desc * volatile this_thread;
     54//extern thread_local thread_desc * volatile this_thread;
    5555
    5656forall( dtype T | is_thread(T) )
  • src/libcfa/concurrency/thread.c

    rfae90d5 rb10affd  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Jul 21 22:34:46 2017
    13 // Update Count     : 1
     12// Last Modified On : Fri Mar 30 17:19:52 2018
     13// Update Count     : 8
    1414//
    1515
     
    2626}
    2727
    28 extern volatile thread_local processor * this_processor;
     28//extern volatile thread_local processor * this_processor;
    2929
    3030//-----------------------------------------------------------------------------
     
    7575        coroutine_desc* thrd_c = get_coroutine(this);
    7676        thread_desc   * thrd_h = get_thread   (this);
    77         thrd_c->last = this_coroutine;
     77        thrd_c->last = TL_GET( this_coroutine );
    7878
    7979        // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
     
    8181        disable_interrupts();
    8282        create_stack(&thrd_c->stack, thrd_c->stack.size);
    83         this_coroutine = thrd_c;
     83        TL_SET( this_coroutine, thrd_c );
    8484        CtxStart(&this, CtxInvokeThread);
    8585        assert( thrd_c->last->stack.context );
     
    9292extern "C" {
    9393        void __finish_creation(void) {
    94                 coroutine_desc* thrd_c = this_coroutine;
     94                coroutine_desc* thrd_c = TL_GET( this_coroutine );
    9595                ThreadCtxSwitch( thrd_c, thrd_c->last );
    9696        }
     
    9898
    9999void yield( void ) {
    100         verify( preemption_state.enabled );
    101         BlockInternal( this_thread );
    102         verify( preemption_state.enabled );
     100        verify( TL_GET( preemption_state ).enabled );
     101        BlockInternal( TL_GET( this_thread ) );
     102        verify( TL_GET( preemption_state ).enabled );
    103103}
    104104
     
    116116        // set new coroutine that the processor is executing
    117117        // and context switch to it
    118         this_coroutine = dst;
     118        TL_SET( this_coroutine, dst );
    119119        assert( src->stack.context );
    120120        CtxSwitch( src->stack.context, dst->stack.context );
    121         this_coroutine = src;
     121        TL_SET( this_coroutine, src );
    122122
    123123        // set state of new coroutine to active
Note: See TracChangeset for help on using the changeset viewer.