Changeset b4a835d


Ignore:
Timestamp:
May 8, 2018, 9:27:25 PM (7 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
Children:
df22130
Parents:
3d60c08 (diff), afd550c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg2:software/cfa/cfa-cc

Location:
src
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • src/Common/Heap.cc

    r3d60c08 rb4a835d  
    130130                                abort();
    131131                        }
    132 #endif
     132#endif // RTLD_NEXT
    133133                } // if
    134134
     
    144144                fptr = (generic_fptr_t)dlsym( library, symbol );
    145145#endif // _GNU_SOURCE
     146
     147                error = dlerror();
     148                if ( error ) {
     149                        std::cerr << "interpose_symbol : internal error, " << error << std::endl;
     150                        abort();
     151                }
     152
     153                return fptr;
     154        }
    146155
    147156        extern "C" {
  • src/Common/PassVisitor.proto.h

    r3d60c08 rb4a835d  
    4747
    4848        operator bool() { return m_ref ? *m_ref : true; }
    49         bool operator=( bool val ) { return *m_ref = val; }
     49        bool operator=( bool val ) { assert(m_ref); return *m_ref = val; }
    5050
    5151private:
     
    5353        friend class ChildrenGuard;
    5454
    55         bool * set( bool & val ) {
     55        bool * set( bool * val ) {
    5656                bool * prev = m_ref;
    57                 m_ref = &val;
     57                m_ref = val;
    5858                return prev;
    5959        }
     
    6767        ChildrenGuard( bool_ref * ref )
    6868                : m_val ( true )
    69                 , m_prev( ref ? ref->set( m_val ) : nullptr )
     69                , m_prev( ref ? ref->set( &m_val ) : nullptr )
    7070                , m_ref ( ref )
    7171        {}
     
    7373        ~ChildrenGuard() {
    7474                if( m_ref ) {
    75                         m_ref->set( *m_prev );
     75                        m_ref->set( m_prev );
    7676                }
    7777        }
  • src/libcfa/bits/containers.h

    r3d60c08 rb4a835d  
    186186#endif
    187187
     188
     189//-----------------------------------------------------------------------------
     190// Doubly Linked List
     191//-----------------------------------------------------------------------------
     192#ifdef __cforall
     193        trait is_db_node(dtype T) {
     194                T*& get_next( T& );
     195                T*& get_prev( T& );
     196        };
     197#endif
     198
     199#ifdef __cforall
     200        forall(dtype TYPE | is_db_node(TYPE))
     201        #define T TYPE
     202#else
     203        #define T void
     204#endif
     205struct __dllist {
     206        T * head;
     207};
     208#undef T
     209
     210#ifdef __cforall
     211#define __dllist_t(T) __dllist(T)
     212#else
     213#define __dllist_t(T) struct __dllist
     214#endif
     215
     216#ifdef __cforall
     217
     218        forall(dtype T | is_db_node(T))
     219        static inline void ?{}( __dllist(T) & this ) with( this ) {
     220                head{ NULL };
     221        }
     222
     223        // forall(dtype T | is_db_node(T) | sized(T))
     224        // static inline void push_front( __dllist(T) & this, T & node ) with( this ) {
     225        //      if ( head ) {
     226        //              get_next( node ) = head;
     227        //              get_prev( node ) = get_prev( *head );
     228        //              // inserted node must be consistent before it is seen
     229        //              // prevent code movement across barrier
     230        //              asm( "" : : : "memory" );
     231        //              get_prev( *head ) = node;
     232        //              T & prev = *get_prev( node );
     233        //              get_next( prev ) = node;
     234        //      }
     235        //      else {
     236        //              get_next( node ) = &node;
     237        //              get_prev( node ) = &node;
     238        //      }
     239
     240        //      // prevent code movement across barrier
     241        //      asm( "" : : : "memory" );
     242        //      head = val;
     243        // }
     244
     245        // forall(dtype T | is_db_node(T) | sized(T))
     246        // static inline T * remove( __dllist(T) & this, T & node ) with( this ) {
     247        //      if ( &node == head ) {
     248        //              if ( get_next( *head ) == head ) {
     249        //                      head = NULL;
     250        //              }
     251        //              else {
     252        //                      head = get_next( *head );
     253        //              }
     254        //      }
     255        //      get_prev( *get_next( node ) ) = get_prev( node );
     256        //      get_next( *get_prev( node ) ) = get_next( node );
     257        //      get_next( node ) = NULL;
     258        //      get_prev( node ) = NULL;
     259        // }
     260#endif
     261
    188262//-----------------------------------------------------------------------------
    189263// Tools
  • src/libcfa/concurrency/coroutine

    r3d60c08 rb4a835d  
    7272// Suspend implementation inlined for performance
    7373static inline void suspend() {
    74         coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
     74        // optimization : read TLS once and reuse it
     75        // Safety note: this is preemption safe since if
     76        // preemption occurs after this line, the pointer
     77        // will also migrate which means this value will
     78        // stay in syn with the TLS
     79        coroutine_desc * src = TL_GET( this_coroutine );
    7580
    7681        assertf( src->last != 0,
     
    8994forall(dtype T | is_coroutine(T))
    9095static inline void resume(T & cor) {
    91         coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
     96        // optimization : read TLS once and reuse it
     97        // Safety note: this is preemption safe since if
     98        // preemption occurs after this line, the pointer
     99        // will also migrate which means this value will
     100        // stay in syn with the TLS
     101        coroutine_desc * src = TL_GET( this_coroutine );
    92102        coroutine_desc * dst = get_coroutine(cor);
    93103
     
    107117                dst->last = src;
    108118                dst->starter = dst->starter ? dst->starter : src;
    109         } // if
     119        }
    110120
    111121        // always done for performance testing
     
    114124
    115125static inline void resume(coroutine_desc * dst) {
    116         coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
     126        // optimization : read TLS once and reuse it
     127        // Safety note: this is preemption safe since if
     128        // preemption occurs after this line, the pointer
     129        // will also migrate which means this value will
     130        // stay in syn with the TLS
     131        coroutine_desc * src = TL_GET( this_coroutine );
    117132
    118133        // not resuming self ?
     
    125140                // set last resumer
    126141                dst->last = src;
    127         } // if
     142        }
    128143
    129144        // always done for performance testing
  • src/libcfa/concurrency/coroutine.c

    r3d60c08 rb4a835d  
    8484// Wrapper for co
    8585void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    86       verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate );
     86      // Safety note : This could cause some false positives due to preemption
     87      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    8788      disable_interrupts();
    8889
     
    9192
    9293      // set new coroutine that task is executing
    93       TL_SET( this_coroutine, dst );
     94      kernelTLS.this_coroutine = dst;
    9495
    9596      // context switch to specified coroutine
     
    102103
    103104      enable_interrupts( __cfaabi_dbg_ctx );
    104       verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate );
     105      // Safety note : This could cause some false positives due to preemption
     106      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    105107} //ctxSwitchDirect
    106108
  • src/libcfa/concurrency/invoke.c

    r3d60c08 rb4a835d  
    6969        // Fetch the thread handle from the user defined thread structure
    7070        struct thread_desc* thrd = get_thread( this );
     71        thrd->self_cor.last = NULL;
    7172
    7273        // Officially start the thread by enabling preemption
  • src/libcfa/concurrency/invoke.h

    r3d60c08 rb4a835d  
    1818#include "bits/locks.h"
    1919
    20 #define TL_GET( member ) kernelThreadData.member
    21 #define TL_SET( member, value ) kernelThreadData.member = value;
     20#define TL_GET( member ) kernelTLS.member
     21#define TL_SET( member, value ) kernelTLS.member = value;
    2222
    2323#ifdef __cforall
     
    4444                                volatile bool in_progress;
    4545                        } preemption_state;
    46                 } kernelThreadData;
     46                } kernelTLS;
    4747        }
    4848
    4949        static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); }
    50         static inline struct thread_desc * volatile active_thread() { return TL_GET( this_thread ); }
    51         static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); }
     50        static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
     51        static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
    5252        #endif
    5353
  • src/libcfa/concurrency/kernel

    r3d60c08 rb4a835d  
    5353        // Preemption rate on this cluster
    5454        Duration preemption_rate;
     55
     56        // List of idle processors
     57        // __dllist_t(struct processor) idles;
    5558};
    5659
     
    124127        bool pending_preemption;
    125128
     129        struct {
     130                pthread_mutex_t lock;
     131                pthread_cond_t  cond;
     132        } idle;
     133
    126134#ifdef __CFA_DEBUG__
    127135        // Last function to enable preemption on this processor
  • src/libcfa/concurrency/kernel.c

    r3d60c08 rb4a835d  
    5656// volatile thread_local unsigned short disable_preempt_count = 1;
    5757
    58 thread_local struct KernelThreadData kernelThreadData = {
     58thread_local struct KernelThreadData kernelTLS = {
    5959        NULL,
    6060        NULL,
     
    155155                terminate(&this);
    156156                verify(this.do_terminate);
    157                 verify(TL_GET( this_processor ) != &this);
     157                verify( kernelTLS.this_processor != &this);
    158158                P( terminated );
    159                 verify(TL_GET( this_processor ) != &this);
     159                verify( kernelTLS.this_processor != &this);
    160160                pthread_join( kernel_thread, NULL );
    161161        }
     
    196196                        if(readyThread)
    197197                        {
    198                                 verify( ! TL_GET( preemption_state ).enabled );
     198                                verify( ! kernelTLS.preemption_state.enabled );
    199199
    200200                                runThread(this, readyThread);
    201201
    202                                 verify( ! TL_GET( preemption_state ).enabled );
     202                                verify( ! kernelTLS.preemption_state.enabled );
    203203
    204204                                //Some actions need to be taken from the kernel
     
    221221}
    222222
     223// KERNEL ONLY
    223224// runThread runs a thread by context switching
    224225// from the processor coroutine to the target thread
     
    228229        coroutine_desc * thrd_cor = dst->curr_cor;
    229230
    230         //Reset the terminating actions here
     231        // Reset the terminating actions here
    231232        this->finish.action_code = No_Action;
    232233
    233         //Update global state
    234         TL_SET( this_thread, dst );
     234        // Update global state
     235        kernelTLS.this_thread = dst;
    235236
    236237        // Context Switch to the thread
     
    239240}
    240241
     242// KERNEL_ONLY
    241243void returnToKernel() {
    242         coroutine_desc * proc_cor = get_coroutine(TL_GET( this_processor )->runner);
    243         coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine );
     244        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
     245        coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;
    244246        ThreadCtxSwitch(thrd_cor, proc_cor);
    245247}
    246248
     249// KERNEL_ONLY
    247250// Once a thread has finished running, some of
    248251// its final actions must be executed from the kernel
    249252void finishRunning(processor * this) with( this->finish ) {
    250253        if( action_code == Release ) {
    251                 verify( ! TL_GET( preemption_state ).enabled );
     254                verify( ! kernelTLS.preemption_state.enabled );
    252255                unlock( *lock );
    253256        }
     
    256259        }
    257260        else if( action_code == Release_Schedule ) {
    258                 verify( ! TL_GET( preemption_state ).enabled );
     261                verify( ! kernelTLS.preemption_state.enabled );
    259262                unlock( *lock );
    260263                ScheduleThread( thrd );
    261264        }
    262265        else if( action_code == Release_Multi ) {
    263                 verify( ! TL_GET( preemption_state ).enabled );
     266                verify( ! kernelTLS.preemption_state.enabled );
    264267                for(int i = 0; i < lock_count; i++) {
    265268                        unlock( *locks[i] );
     
    285288}
    286289
     290// KERNEL_ONLY
    287291// Context invoker for processors
    288292// This is the entry point for processors (kernel threads)
     
    290294void * CtxInvokeProcessor(void * arg) {
    291295        processor * proc = (processor *) arg;
    292         TL_SET( this_processor, proc );
    293         TL_SET( this_coroutine, NULL );
    294         TL_SET( this_thread, NULL );
    295         TL_GET( preemption_state ).[enabled, disable_count] = [false, 1];
     296        kernelTLS.this_processor = proc;
     297        kernelTLS.this_coroutine = NULL;
     298        kernelTLS.this_thread    = NULL;
     299        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
    296300        // SKULLDUGGERY: We want to create a context for the processor coroutine
    297301        // which is needed for the 2-step context switch. However, there is no reason
     
    305309
    306310        //Set global state
    307         TL_SET( this_coroutine, get_coroutine(proc->runner) );
    308         TL_SET( this_thread, NULL );
     311        kernelTLS.this_coroutine = get_coroutine(proc->runner);
     312        kernelTLS.this_thread    = NULL;
    309313
    310314        //We now have a proper context from which to schedule threads
     
    333337}
    334338
     339// KERNEL_ONLY
    335340void kernel_first_resume(processor * this) {
    336         coroutine_desc * src = TL_GET( this_coroutine );
     341        coroutine_desc * src = kernelTLS.this_coroutine;
    337342        coroutine_desc * dst = get_coroutine(this->runner);
    338343
    339         verify( ! TL_GET( preemption_state ).enabled );
     344        verify( ! kernelTLS.preemption_state.enabled );
    340345
    341346        create_stack(&dst->stack, dst->stack.size);
    342347        CtxStart(&this->runner, CtxInvokeCoroutine);
    343348
    344         verify( ! TL_GET( preemption_state ).enabled );
     349        verify( ! kernelTLS.preemption_state.enabled );
    345350
    346351        dst->last = src;
     
    351356
    352357        // set new coroutine that task is executing
    353         TL_SET( this_coroutine, dst );
     358        kernelTLS.this_coroutine = dst;
    354359
    355360        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     
    368373        src->state = Active;
    369374
    370         verify( ! TL_GET( preemption_state ).enabled );
     375        verify( ! kernelTLS.preemption_state.enabled );
    371376}
    372377
    373378//-----------------------------------------------------------------------------
    374379// Scheduler routines
     380
     381// KERNEL ONLY
    375382void ScheduleThread( thread_desc * thrd ) {
    376         // if( ! thrd ) return;
    377383        verify( thrd );
    378384        verify( thrd->self_cor.state != Halted );
    379385
    380         verify( ! TL_GET( preemption_state ).enabled );
     386        verify( ! kernelTLS.preemption_state.enabled );
    381387
    382388        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
     
    388394        }
    389395
    390         verify( ! TL_GET( preemption_state ).enabled );
    391 }
    392 
     396        verify( ! kernelTLS.preemption_state.enabled );
     397}
     398
     399// KERNEL ONLY
    393400thread_desc * nextThread(cluster * this) with( *this ) {
    394         verify( ! TL_GET( preemption_state ).enabled );
     401        verify( ! kernelTLS.preemption_state.enabled );
    395402        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    396403        thread_desc * head = pop_head( ready_queue );
    397404        unlock( ready_queue_lock );
    398         verify( ! TL_GET( preemption_state ).enabled );
     405        verify( ! kernelTLS.preemption_state.enabled );
    399406        return head;
    400407}
     
    402409void BlockInternal() {
    403410        disable_interrupts();
    404         verify( ! TL_GET( preemption_state ).enabled );
     411        verify( ! kernelTLS.preemption_state.enabled );
    405412        returnToKernel();
    406         verify( ! TL_GET( preemption_state ).enabled );
     413        verify( ! kernelTLS.preemption_state.enabled );
    407414        enable_interrupts( __cfaabi_dbg_ctx );
    408415}
     
    410417void BlockInternal( __spinlock_t * lock ) {
    411418        disable_interrupts();
    412         with( *TL_GET( this_processor ) ) {
     419        with( *kernelTLS.this_processor ) {
    413420                finish.action_code = Release;
    414421                finish.lock        = lock;
    415422        }
    416423
    417         verify( ! TL_GET( preemption_state ).enabled );
     424        verify( ! kernelTLS.preemption_state.enabled );
    418425        returnToKernel();
    419         verify( ! TL_GET( preemption_state ).enabled );
     426        verify( ! kernelTLS.preemption_state.enabled );
    420427
    421428        enable_interrupts( __cfaabi_dbg_ctx );
     
    424431void BlockInternal( thread_desc * thrd ) {
    425432        disable_interrupts();
    426         with( *TL_GET( this_processor ) ) {
     433        with( * kernelTLS.this_processor ) {
    427434                finish.action_code = Schedule;
    428435                finish.thrd        = thrd;
    429436        }
    430437
    431         verify( ! TL_GET( preemption_state ).enabled );
     438        verify( ! kernelTLS.preemption_state.enabled );
    432439        returnToKernel();
    433         verify( ! TL_GET( preemption_state ).enabled );
     440        verify( ! kernelTLS.preemption_state.enabled );
    434441
    435442        enable_interrupts( __cfaabi_dbg_ctx );
     
    439446        assert(thrd);
    440447        disable_interrupts();
    441         with( *TL_GET( this_processor ) ) {
     448        with( * kernelTLS.this_processor ) {
    442449                finish.action_code = Release_Schedule;
    443450                finish.lock        = lock;
     
    445452        }
    446453
    447         verify( ! TL_GET( preemption_state ).enabled );
     454        verify( ! kernelTLS.preemption_state.enabled );
    448455        returnToKernel();
    449         verify( ! TL_GET( preemption_state ).enabled );
     456        verify( ! kernelTLS.preemption_state.enabled );
    450457
    451458        enable_interrupts( __cfaabi_dbg_ctx );
     
    454461void BlockInternal(__spinlock_t * locks [], unsigned short count) {
    455462        disable_interrupts();
    456         with( *TL_GET( this_processor ) ) {
     463        with( * kernelTLS.this_processor ) {
    457464                finish.action_code = Release_Multi;
    458465                finish.locks       = locks;
     
    460467        }
    461468
    462         verify( ! TL_GET( preemption_state ).enabled );
     469        verify( ! kernelTLS.preemption_state.enabled );
    463470        returnToKernel();
    464         verify( ! TL_GET( preemption_state ).enabled );
     471        verify( ! kernelTLS.preemption_state.enabled );
    465472
    466473        enable_interrupts( __cfaabi_dbg_ctx );
     
    469476void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    470477        disable_interrupts();
    471         with( *TL_GET( this_processor ) ) {
     478        with( *kernelTLS.this_processor ) {
    472479                finish.action_code = Release_Multi_Schedule;
    473480                finish.locks       = locks;
     
    477484        }
    478485
    479         verify( ! TL_GET( preemption_state ).enabled );
     486        verify( ! kernelTLS.preemption_state.enabled );
    480487        returnToKernel();
    481         verify( ! TL_GET( preemption_state ).enabled );
     488        verify( ! kernelTLS.preemption_state.enabled );
    482489
    483490        enable_interrupts( __cfaabi_dbg_ctx );
    484491}
    485492
     493// KERNEL ONLY
    486494void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
    487         verify( ! TL_GET( preemption_state ).enabled );
    488         with( *TL_GET( this_processor ) ) {
     495        verify( ! kernelTLS.preemption_state.enabled );
     496        with( * kernelTLS.this_processor ) {
    489497                finish.action_code = thrd ? Release_Schedule : Release;
    490498                finish.lock        = lock;
     
    501509// Kernel boot procedures
    502510void kernel_startup(void) {
    503         verify( ! TL_GET( preemption_state ).enabled );
     511        verify( ! kernelTLS.preemption_state.enabled );
    504512        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    505513
     
    547555
    548556        //initialize the global state variables
    549         TL_SET( this_processor, mainProcessor );
    550         TL_SET( this_thread, mainThread );
    551         TL_SET( this_coroutine, &mainThread->self_cor );
     557        kernelTLS.this_processor = mainProcessor;
     558        kernelTLS.this_thread    = mainThread;
     559        kernelTLS.this_coroutine = &mainThread->self_cor;
    552560
    553561        // Enable preemption
     
    561569        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    562570        // mainThread is on the ready queue when this call is made.
    563         kernel_first_resume( TL_GET( this_processor ) );
     571        kernel_first_resume( kernelTLS.this_processor );
    564572
    565573
     
    568576        __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
    569577
    570         verify( ! TL_GET( preemption_state ).enabled );
     578        verify( ! kernelTLS.preemption_state.enabled );
    571579        enable_interrupts( __cfaabi_dbg_ctx );
    572         verify( TL_GET( preemption_state ).enabled );
     580        verify( TL_GET( preemption_state.enabled ) );
    573581}
    574582
     
    576584        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    577585
    578         verify( TL_GET( preemption_state ).enabled );
     586        verify( TL_GET( preemption_state.enabled ) );
    579587        disable_interrupts();
    580         verify( ! TL_GET( preemption_state ).enabled );
     588        verify( ! kernelTLS.preemption_state.enabled );
    581589
    582590        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
     
    604612
    605613//=============================================================================================
     614// Kernel Quiescing
     615//=============================================================================================
     616
     617// void halt(processor * this) with( this ) {
     618//      pthread_mutex_lock( &idle.lock );
     619
     620
     621
     622//      // SKULLDUGGERY: Even if spurious wake-up is a thing
     623//      // spuriously waking up a kernel thread is not a big deal
     624//      // if it is very rare.
     625//      pthread_cond_wait( &idle.cond, &idle.lock);
     626//      pthread_mutex_unlock( &idle.lock );
     627// }
     628
     629// void wake(processor * this) with( this ) {
     630//      pthread_mutex_lock  (&idle.lock);
     631//      pthread_cond_signal (&idle.cond);
     632//      pthread_mutex_unlock(&idle.lock);
     633// }
     634
     635//=============================================================================================
    606636// Unexpected Terminating logic
    607637//=============================================================================================
     
    612642static bool kernel_abort_called = false;
    613643
    614 void * kernel_abort    (void) __attribute__ ((__nothrow__)) {
     644void * kernel_abort(void) __attribute__ ((__nothrow__)) {
    615645        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
    616646        // the globalAbort flag is true.
     
    633663        }
    634664
    635         return TL_GET( this_thread );
     665        return kernelTLS.this_thread;
    636666}
    637667
     
    642672        __cfaabi_dbg_bits_write( abort_text, len );
    643673
    644         if ( get_coroutine(thrd) != TL_GET( this_coroutine ) ) {
    645                 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) );
     674        if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) {
     675                len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine );
    646676                __cfaabi_dbg_bits_write( abort_text, len );
    647677        }
     
    652682
    653683int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
    654         return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2;
     684        return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
    655685}
    656686
     
    682712        if ( count < 0 ) {
    683713                // queue current task
    684                 append( waiting, (thread_desc *)TL_GET( this_thread ) );
     714                append( waiting, kernelTLS.this_thread );
    685715
    686716                // atomically release spin lock and block
     
    742772        void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {
    743773                this.prev_name = prev_name;
    744                 this.prev_thrd = TL_GET( this_thread );
     774                this.prev_thrd = kernelTLS.this_thread;
    745775        }
    746776)
  • src/libcfa/concurrency/monitor.c

    r3d60c08 rb4a835d  
    8585                // Lock the monitor spinlock
    8686                lock( this->lock __cfaabi_dbg_ctx2 );
    87                 thread_desc * thrd = TL_GET( this_thread );
     87                // Interrupts disable inside critical section
     88                thread_desc * thrd = kernelTLS.this_thread;
    8889
    8990                __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     
    134135                // Lock the monitor spinlock
    135136                lock( this->lock __cfaabi_dbg_ctx2 );
    136                 thread_desc * thrd = TL_GET( this_thread );
     137                // Interrupts disable inside critical section
     138                thread_desc * thrd = kernelTLS.this_thread;
    137139
    138140                __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     
    168170
    169171                        // Create the node specific to this wait operation
    170                         wait_ctx_primed( TL_GET( this_thread ), 0 )
     172                        wait_ctx_primed( thrd, 0 )
    171173
    172174                        // Some one else has the monitor, wait for him to finish and then run
     
    179181                        __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    180182
    181                         wait_ctx( TL_GET( this_thread ), 0 )
     183                        wait_ctx( thrd, 0 )
    182184                        this->dtor_node = &waiter;
    183185
     
    199201                lock( this->lock __cfaabi_dbg_ctx2 );
    200202
    201                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", TL_GET( this_thread ), this, this->owner);
    202 
    203                 verifyf( TL_GET( this_thread ) == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", TL_GET( this_thread ), this->owner, this->recursion, this );
     203                __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
     204
     205                verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    204206
    205207                // Leaving a recursion level, decrement the counter
     
    289291// Sorts monitors before entering
    290292void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
     293        thread_desc * thrd = TL_GET( this_thread );
     294
    291295        // Store current array
    292296        this.m = m;
     
    297301
    298302        // Save previous thread context
    299         this.prev = TL_GET( this_thread )->monitors;
     303        this.prev = thrd->monitors;
    300304
    301305        // Update thread context (needed for conditions)
    302         (TL_GET( this_thread )->monitors){m, count, func};
     306        (thrd->monitors){m, count, func};
    303307
    304308        // __cfaabi_dbg_print_safe( "MGUARD : enter %d\n", count);
     
    328332// Sorts monitors before entering
    329333void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     334        // optimization
     335        thread_desc * thrd = TL_GET( this_thread );
     336
    330337        // Store current array
    331338        this.m = *m;
    332339
    333340        // Save previous thread context
    334         this.prev = TL_GET( this_thread )->monitors;
     341        this.prev = thrd->monitors;
    335342
    336343        // Update thread context (needed for conditions)
    337         (TL_GET( this_thread )->monitors){m, 1, func};
     344        (thrd->monitors){m, 1, func};
    338345
    339346        __enter_monitor_dtor( this.m, func );
     
    473480
    474481        // Create the node specific to this wait operation
    475         wait_ctx_primed( TL_GET( this_thread ), 0 )
     482        wait_ctx_primed( kernelTLS.this_thread, 0 )
    476483
    477484        //save contexts
     
    566573
    567574                                // Create the node specific to this wait operation
    568                                 wait_ctx_primed( TL_GET( this_thread ), 0 );
     575                                wait_ctx_primed( kernelTLS.this_thread, 0 );
    569576
    570577                                // Save monitor states
     
    612619
    613620        // Create the node specific to this wait operation
    614         wait_ctx_primed( TL_GET( this_thread ), 0 );
     621        wait_ctx_primed( kernelTLS.this_thread, 0 );
    615622
    616623        monitor_save;
     
    618625
    619626        for( __lock_size_t i = 0; i < count; i++) {
    620                 verify( monitors[i]->owner == TL_GET( this_thread ) );
     627                verify( monitors[i]->owner == kernelTLS.this_thread );
    621628        }
    622629
  • src/libcfa/concurrency/preemption.c

    r3d60c08 rb4a835d  
    149149        // Disable interrupts by incrementing the counter
    150150        void disable_interrupts() {
    151                 with( TL_GET( preemption_state ) ) {
     151                with( kernelTLS.preemption_state ) {
    152152                        enabled = false;
    153153                        __attribute__((unused)) unsigned short new_val = disable_count + 1;
     
    160160        // If counter reaches 0, execute any pending CtxSwitch
    161161        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    162                 processor   * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add
    163                 thread_desc * thrd = TL_GET( this_thread );       // Cache the thread now since interrupts can start happening after the atomic add
    164 
    165                 with( TL_GET( preemption_state ) ){
     162                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add
     163                thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic add
     164
     165                with( kernelTLS.preemption_state ){
    166166                        unsigned short prev = disable_count;
    167167                        disable_count -= 1;
     
    185185        // Don't execute any pending CtxSwitch even if counter reaches 0
    186186        void enable_interrupts_noPoll() {
    187                 unsigned short prev = TL_GET( preemption_state ).disable_count;
    188                 TL_GET( preemption_state ).disable_count -= 1;
     187                unsigned short prev = kernelTLS.preemption_state.disable_count;
     188                kernelTLS.preemption_state.disable_count -= 1;
    189189                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    190190                if( prev == 1 ) {
    191                         TL_GET( preemption_state ).enabled = true;
     191                        kernelTLS.preemption_state.enabled = true;
    192192                }
    193193        }
     
    234234}
    235235
    236 
     236// KERNEL ONLY
    237237// Check if a CtxSwitch signal handler shoud defer
    238238// If true  : preemption is safe
    239239// If false : preemption is unsafe and marked as pending
    240240static inline bool preemption_ready() {
    241         bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe
    242         TL_GET( this_processor )->pending_preemption = !ready;                  // Adjust the pending flag accordingly
     241        // Check if preemption is safe
     242        bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;
     243
     244        // Adjust the pending flag accordingly
     245        kernelTLS.this_processor->pending_preemption = !ready;
    243246        return ready;
    244247}
     
    254257
    255258        // Start with preemption disabled until ready
    256         TL_GET( preemption_state ).enabled = false;
    257         TL_GET( preemption_state ).disable_count = 1;
     259        kernelTLS.preemption_state.enabled = false;
     260        kernelTLS.preemption_state.disable_count = 1;
    258261
    259262        // Initialize the event kernel
     
    320323        // before the kernel thread has even started running. When that happens an iterrupt
    321324        // we a null 'this_processor' will be caught, just ignore it.
    322         if(!TL_GET( this_processor )) return;
     325        if(! kernelTLS.this_processor ) return;
    323326
    324327        choose(sfp->si_value.sival_int) {
    325328                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    326                 case PREEMPT_TERMINATE: verify(TL_GET( this_processor )->do_terminate);
     329                case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);
    327330                default:
    328331                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    332335        if( !preemption_ready() ) { return; }
    333336
    334         __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", TL_GET( this_processor ), TL_GET( this_thread ) );
    335 
    336         TL_GET( preemption_state ).in_progress = true;  // Sync flag : prevent recursive calls to the signal handler
    337         signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
    338         TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag
     337        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread );
     338
     339        // Sync flag : prevent recursive calls to the signal handler
     340        kernelTLS.preemption_state.in_progress = true;
     341
     342        // We are about to CtxSwitch out of the signal handler, let other handlers in
     343        signal_unblock( SIGUSR1 );
     344
     345        // TODO: this should go in finish action
     346        // Clear the in progress flag
     347        kernelTLS.preemption_state.in_progress = false;
    339348
    340349        // Preemption can occur here
    341350
    342         BlockInternal( (thread_desc*)TL_GET( this_thread ) ); // Do the actual CtxSwitch
     351        BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch
    343352}
    344353
     
    409418
    410419void __cfaabi_check_preemption() {
    411         bool ready = TL_GET( preemption_state ).enabled;
     420        bool ready = kernelTLS.preemption_state.enabled;
    412421        if(!ready) { abort("Preemption should be ready"); }
    413422
  • src/libcfa/concurrency/thread.c

    r3d60c08 rb4a835d  
    8181        disable_interrupts();
    8282        create_stack(&thrd_c->stack, thrd_c->stack.size);
    83         TL_SET( this_coroutine, thrd_c );
     83        kernelTLS.this_coroutine = thrd_c;
    8484        CtxStart(&this, CtxInvokeThread);
    8585        assert( thrd_c->last->stack.context );
     
    9191
    9292extern "C" {
     93        // KERNEL ONLY
    9394        void __finish_creation(void) {
    94                 coroutine_desc* thrd_c = TL_GET( this_coroutine );
     95                coroutine_desc* thrd_c = kernelTLS.this_coroutine;
    9596                ThreadCtxSwitch( thrd_c, thrd_c->last );
    9697        }
     
    9899
    99100void yield( void ) {
    100         verify( TL_GET( preemption_state ).enabled );
     101        // Safety note : This could cause some false positives due to preemption
     102      verify( TL_GET( preemption_state.enabled ) );
    101103        BlockInternal( TL_GET( this_thread ) );
    102         verify( TL_GET( preemption_state ).enabled );
     104        // Safety note : This could cause some false positives due to preemption
     105      verify( TL_GET( preemption_state.enabled ) );
    103106}
    104107
     
    109112}
    110113
     114// KERNEL ONLY
    111115void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    112116        // set state of current coroutine to inactive
     
    116120        // set new coroutine that the processor is executing
    117121        // and context switch to it
    118         TL_SET( this_coroutine, dst );
     122        kernelTLS.this_coroutine = dst;
    119123        assert( src->stack.context );
    120124        CtxSwitch( src->stack.context, dst->stack.context );
    121         TL_SET( this_coroutine, src );
     125        kernelTLS.this_coroutine = src;
    122126
    123127        // set state of new coroutine to active
  • src/main.cc

    r3d60c08 rb4a835d  
    371371                } // if
    372372                return 1;
    373         } // try
     373        } catch(...) {
     374                std::exception_ptr eptr = std::current_exception();
     375                try {
     376                        if (eptr) {
     377                                std::rethrow_exception(eptr);
     378                        }
     379                        else {
     380                                std::cerr << "Exception Uncaught and Unkown" << std::endl;
     381                        }
     382                } catch(const std::exception& e) {
     383                        std::cerr << "Unaught Exception \"" << e.what() << "\"\n";
     384                }
     385                return 1;
     386        }// try
    374387
    375388        deleteAll( translationUnit );
Note: See TracChangeset for help on using the changeset viewer.