Ignore:
Timestamp:
Feb 25, 2020, 1:17:33 PM (6 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum, stuck-waitfor-destruct
Children:
7dc2e015
Parents:
9fb8f01 (diff), dd9e1ca (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

resolve conflict

Location:
libcfa/src/concurrency
Files:
19 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/CtxSwitch-arm.S

    r9fb8f01 r3d5701e  
    1313        .text
    1414        .align  2
    15         .global CtxSwitch
    16         .type   CtxSwitch, %function
     15        .global __cfactx_switch
     16        .type   __cfactx_switch, %function
    1717
    18 CtxSwitch:
     18__cfactx_switch:
    1919        @ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification)
    2020        @ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved
     
    5252        mov r15, r14
    5353        #endif // R9_SPECIAL
    54        
     54
    5555        .text
    5656        .align  2
    57         .global CtxInvokeStub
    58         .type   CtxInvokeStub, %function
     57        .global __cfactx_invoke_stub
     58        .type   __cfactx_invoke_stub, %function
    5959
    60 CtxInvokeStub:
     60__cfactx_invoke_stub:
    6161        ldmfd r13!, {r0-r1}
    6262        mov r15, r1
  • libcfa/src/concurrency/CtxSwitch-i386.S

    r9fb8f01 r3d5701e  
    4343        .text
    4444        .align 2
    45         .globl CtxSwitch
    46         .type  CtxSwitch, @function
    47 CtxSwitch:
     45        .globl __cfactx_switch
     46        .type  __cfactx_switch, @function
     47__cfactx_switch:
    4848
    4949        // Copy the "from" context argument from the stack to register eax
     
    8383
    8484        ret
    85         .size  CtxSwitch, .-CtxSwitch
     85        .size  __cfactx_switch, .-__cfactx_switch
    8686
    8787// Local Variables: //
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    r9fb8f01 r3d5701e  
    4444        .text
    4545        .align 2
    46         .globl CtxSwitch
    47         .type  CtxSwitch, @function
    48 CtxSwitch:
     46        .globl __cfactx_switch
     47        .type  __cfactx_switch, @function
     48__cfactx_switch:
    4949
    5050        // Save volatile registers on the stack.
     
    7777
    7878        ret
    79         .size  CtxSwitch, .-CtxSwitch
     79        .size  __cfactx_switch, .-__cfactx_switch
    8080
    8181//-----------------------------------------------------------------------------
     
    8383        .text
    8484        .align 2
    85         .globl CtxInvokeStub
    86         .type    CtxInvokeStub, @function
    87 CtxInvokeStub:
     85        .globl __cfactx_invoke_stub
     86        .type    __cfactx_invoke_stub, @function
     87__cfactx_invoke_stub:
    8888        movq %rbx, %rdi
    89         jmp *%r12
    90         .size  CtxInvokeStub, .-CtxInvokeStub
     89        movq %r12, %rsi
     90        jmp *%r13
     91        .size  __cfactx_invoke_stub, .-__cfactx_invoke_stub
    9192
    9293// Local Variables: //
  • libcfa/src/concurrency/alarm.cfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Fri Jun 2 11:31:25 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri May 25 06:25:47 2018
    13 // Update Count     : 67
     12// Last Modified On : Sun Jan  5 08:41:36 2020
     13// Update Count     : 69
    1414//
    1515
     
    3939
    4040void __kernel_set_timer( Duration alarm ) {
    41         verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm.tv);
    42         setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL );
     41        verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm`ns);
     42        setitimer( ITIMER_REAL, &(itimerval){ alarm }, 0p );
    4343}
    4444
     
    4747//=============================================================================================
    4848
    49 void ?{}( alarm_node_t & this, thread_desc * thrd, Time alarm, Duration period ) with( this ) {
     49void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ) with( this ) {
    5050        this.thrd = thrd;
    5151        this.alarm = alarm;
     
    113113                        this->tail = &this->head;
    114114                }
    115                 head->next = NULL;
     115                head->next = 0p;
    116116        }
    117117        verify( validate( this ) );
     
    127127                this->tail = it;
    128128        }
    129         n->next = NULL;
     129        n->next = 0p;
    130130
    131131        verify( validate( this ) );
  • libcfa/src/concurrency/alarm.hfa

    r9fb8f01 r3d5701e  
    2323#include "time.hfa"
    2424
    25 struct thread_desc;
     25struct $thread;
    2626struct processor;
    2727
     
    4343
    4444        union {
    45                 thread_desc * thrd;     // thrd who created event
     45                $thread * thrd; // thrd who created event
    4646                processor * proc;               // proc who created event
    4747        };
     
    5353typedef alarm_node_t ** __alarm_it_t;
    5454
    55 void ?{}( alarm_node_t & this, thread_desc * thrd, Time alarm, Duration period );
     55void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period );
    5656void ?{}( alarm_node_t & this, processor   * proc, Time alarm, Duration period );
    5757void ^?{}( alarm_node_t & this );
  • libcfa/src/concurrency/coroutine.cfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 30 17:20:57 2018
    13 // Update Count     : 9
     12// Last Modified On : Tue Feb  4 12:29:25 2020
     13// Update Count     : 16
    1414//
    1515
     
    3737
    3838extern "C" {
    39         void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     39        void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__));
    4040        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    4141        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     
    8989}
    9090
    91 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {
    92         (this.context){NULL, NULL};
     91void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ) with( this ) {
     92        (this.context){0p, 0p};
    9393        (this.stack){storage, storageSize};
    9494        this.name = name;
    9595        state = Start;
    96         starter = NULL;
    97         last = NULL;
    98         cancellation = NULL;
    99 }
    100 
    101 void ^?{}(coroutine_desc& this) {
     96        starter = 0p;
     97        last = 0p;
     98        cancellation = 0p;
     99}
     100
     101void ^?{}($coroutine& this) {
    102102        if(this.state != Halted && this.state != Start && this.state != Primed) {
    103                 coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    104                 coroutine_desc * dst = &this;
     103                $coroutine * src = TL_GET( this_thread )->curr_cor;
     104                $coroutine * dst = &this;
    105105
    106106                struct _Unwind_Exception storage;
     
    115115                }
    116116
    117                 CoroutineCtxSwitch( src, dst );
     117                $ctx_switch( src, dst );
    118118        }
    119119}
     
    123123forall(dtype T | is_coroutine(T))
    124124void prime(T& cor) {
    125         coroutine_desc* this = get_coroutine(cor);
     125        $coroutine* this = get_coroutine(cor);
    126126        assert(this->state == Start);
    127127
     
    131131
    132132[void *, size_t] __stack_alloc( size_t storageSize ) {
    133         static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
     133        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    134134        assert(__page_size != 0l);
    135135        size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
     
    157157
    158158void __stack_prepare( __stack_info_t * this, size_t create_size ) {
    159         static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
     159        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    160160        bool userStack;
    161161        void * storage;
     
    187187// is not inline (We can't inline Cforall in C)
    188188extern "C" {
    189         void __suspend_internal(void) {
    190                 suspend();
    191         }
    192 
    193         void __leave_coroutine( coroutine_desc * src ) {
    194                 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
     189        void __cfactx_cor_leave( struct $coroutine * src ) {
     190                $coroutine * starter = src->cancellation != 0 ? src->last : src->starter;
    195191
    196192                src->state = Halted;
     
    205201                        src->name, src, starter->name, starter );
    206202
    207                 CoroutineCtxSwitch( src, starter );
     203                $ctx_switch( src, starter );
     204        }
     205
     206        struct $coroutine * __cfactx_cor_finish(void) {
     207                struct $coroutine * cor = kernelTLS.this_thread->curr_cor;
     208
     209                if(cor->state == Primed) {
     210                        suspend();
     211                }
     212
     213                cor->state = Active;
     214
     215                return cor;
    208216        }
    209217}
  • libcfa/src/concurrency/coroutine.hfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Jun 21 17:49:39 2019
    13 // Update Count     : 9
     12// Last Modified On : Tue Feb  4 12:29:26 2020
     13// Update Count     : 11
    1414//
    1515
     
    2525trait is_coroutine(dtype T) {
    2626      void main(T & this);
    27       coroutine_desc * get_coroutine(T & this);
     27      $coroutine * get_coroutine(T & this);
    2828};
    2929
    30 #define DECL_COROUTINE(X) static inline coroutine_desc* get_coroutine(X& this) { return &this.__cor; } void main(X& this)
     30#define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this)
    3131
    3232//-----------------------------------------------------------------------------
     
    3535// void ^?{}( coStack_t & this );
    3636
    37 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize );
    38 void ^?{}( coroutine_desc & this );
     37void  ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize );
     38void ^?{}( $coroutine & this );
    3939
    40 static inline void ?{}( coroutine_desc & this)                                       { this{ "Anonymous Coroutine", NULL, 0 }; }
    41 static inline void ?{}( coroutine_desc & this, size_t stackSize)                     { this{ "Anonymous Coroutine", NULL, stackSize }; }
    42 static inline void ?{}( coroutine_desc & this, void * storage, size_t storageSize )  { this{ "Anonymous Coroutine", storage, storageSize }; }
    43 static inline void ?{}( coroutine_desc & this, const char * name)                    { this{ name, NULL, 0 }; }
    44 static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, NULL, stackSize }; }
     40static inline void ?{}( $coroutine & this)                                       { this{ "Anonymous Coroutine", 0p, 0 }; }
     41static inline void ?{}( $coroutine & this, size_t stackSize)                     { this{ "Anonymous Coroutine", 0p, stackSize }; }
     42static inline void ?{}( $coroutine & this, void * storage, size_t storageSize )  { this{ "Anonymous Coroutine", storage, storageSize }; }
     43static inline void ?{}( $coroutine & this, const char name[])                    { this{ name, 0p, 0 }; }
     44static inline void ?{}( $coroutine & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }
    4545
    4646//-----------------------------------------------------------------------------
     
    5454void prime(T & cor);
    5555
    56 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; }
     56static inline struct $coroutine * active_coroutine() { return TL_GET( this_thread )->curr_cor; }
    5757
    5858//-----------------------------------------------------------------------------
     
    6161// Start coroutine routines
    6262extern "C" {
    63       forall(dtype T | is_coroutine(T))
    64       void CtxInvokeCoroutine(T * this);
     63        void __cfactx_invoke_coroutine(void (*main)(void *), void * this);
    6564
    66       forall(dtype T | is_coroutine(T))
    67       void CtxStart(T * this, void ( *invoke)(T *));
     65        forall(dtype T)
     66        void __cfactx_start(void (*main)(T &), struct $coroutine * cor, T & this, void (*invoke)(void (*main)(void *), void *));
    6867
    69         extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     68        extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__));
    7069
    71         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
     70        extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
    7271}
    7372
    7473// Private wrappers for context switch and stack creation
    7574// Wrapper for co
    76 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
     75static inline void $ctx_switch( $coroutine * src, $coroutine * dst ) __attribute__((nonnull (1, 2))) {
    7776        // set state of current coroutine to inactive
    7877        src->state = src->state == Halted ? Halted : Inactive;
     
    8382        // context switch to specified coroutine
    8483        verify( dst->context.SP );
    85         CtxSwitch( &src->context, &dst->context );
    86         // when CtxSwitch returns we are back in the src coroutine
     84        __cfactx_switch( &src->context, &dst->context );
     85        // when __cfactx_switch returns we are back in the src coroutine
    8786
    8887        // set state of new coroutine to active
    8988        src->state = Active;
    9089
    91         if( unlikely(src->cancellation != NULL) ) {
    92                 _CtxCoroutine_Unwind(src->cancellation, src);
     90        if( unlikely(src->cancellation != 0p) ) {
     91                __cfactx_coroutine_unwind(src->cancellation, src);
    9392        }
    9493}
     
    103102        // will also migrate which means this value will
    104103        // stay in syn with the TLS
    105         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     104        $coroutine * src = TL_GET( this_thread )->curr_cor;
    106105
    107106        assertf( src->last != 0,
     
    114113                src->name, src, src->last->name, src->last );
    115114
    116         CoroutineCtxSwitch( src, src->last );
     115        $ctx_switch( src, src->last );
    117116}
    118117
     
    125124        // will also migrate which means this value will
    126125        // stay in syn with the TLS
    127         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    128         coroutine_desc * dst = get_coroutine(cor);
     126        $coroutine * src = TL_GET( this_thread )->curr_cor;
     127        $coroutine * dst = get_coroutine(cor);
    129128
    130         if( unlikely(dst->context.SP == NULL) ) {
     129        if( unlikely(dst->context.SP == 0p) ) {
     130                TL_GET( this_thread )->curr_cor = dst;
    131131                __stack_prepare(&dst->stack, 65000);
    132                 CtxStart(&cor, CtxInvokeCoroutine);
     132                __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine);
     133                TL_GET( this_thread )->curr_cor = src;
    133134        }
    134135
     
    146147
    147148        // always done for performance testing
    148         CoroutineCtxSwitch( src, dst );
     149        $ctx_switch( src, dst );
    149150
    150151        return cor;
    151152}
    152153
    153 static inline void resume(coroutine_desc * dst) {
     154static inline void resume( $coroutine * dst ) __attribute__((nonnull (1))) {
    154155        // optimization : read TLS once and reuse it
    155156        // Safety note: this is preemption safe since if
     
    157158        // will also migrate which means this value will
    158159        // stay in syn with the TLS
    159         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     160        $coroutine * src = TL_GET( this_thread )->curr_cor;
    160161
    161162        // not resuming self ?
     
    171172
    172173        // always done for performance testing
    173         CoroutineCtxSwitch( src, dst );
     174        $ctx_switch( src, dst );
    174175}
    175176
  • libcfa/src/concurrency/invoke.c

    r9fb8f01 r3d5701e  
    2929// Called from the kernel when starting a coroutine or task so must switch back to user mode.
    3030
    31 extern void __suspend_internal(void);
    32 extern void __leave_coroutine( struct coroutine_desc * );
    33 extern void __finish_creation( struct thread_desc * );
    34 extern void __leave_thread_monitor( struct thread_desc * this );
     31extern struct $coroutine * __cfactx_cor_finish(void);
     32extern void __cfactx_cor_leave ( struct $coroutine * );
     33extern void __cfactx_thrd_leave();
     34
    3535extern void disable_interrupts() OPTIONAL_THREAD;
    3636extern void enable_interrupts( __cfaabi_dbg_ctx_param );
    3737
    38 void CtxInvokeCoroutine(
     38void __cfactx_invoke_coroutine(
    3939        void (*main)(void *),
    40         struct coroutine_desc *(*get_coroutine)(void *),
    4140        void *this
    4241) {
    43         struct coroutine_desc* cor = get_coroutine( this );
     42        // Finish setting up the coroutine by setting its state
     43        struct $coroutine * cor = __cfactx_cor_finish();
    4444
    45         if(cor->state == Primed) {
    46                 __suspend_internal();
    47         }
    48 
    49         cor->state = Active;
    50 
     45        // Call the main of the coroutine
    5146        main( this );
    5247
    5348        //Final suspend, should never return
    54         __leave_coroutine( cor );
     49        __cfactx_cor_leave( cor );
    5550        __cabi_abort( "Resumed dead coroutine" );
    5651}
    5752
    58 static _Unwind_Reason_Code _CtxCoroutine_UnwindStop(
     53static _Unwind_Reason_Code __cfactx_coroutine_unwindstop(
    5954        __attribute((__unused__)) int version,
    6055        _Unwind_Action actions,
     
    6762                // We finished unwinding the coroutine,
    6863                // leave it
    69                 __leave_coroutine( param );
     64                __cfactx_cor_leave( param );
    7065                __cabi_abort( "Resumed dead coroutine" );
    7166        }
     
    7570}
    7671
    77 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
    78 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
    79         _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, cor );
     72void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) __attribute__ ((__noreturn__));
     73void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) {
     74        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
    8075        printf("UNWIND ERROR %d after force unwind\n", ret);
    8176        abort();
    8277}
    8378
    84 void CtxInvokeThread(
    85         void (*dtor)(void *),
     79void __cfactx_invoke_thread(
    8680        void (*main)(void *),
    87         struct thread_desc *(*get_thread)(void *),
    8881        void *this
    8982) {
    90         // Fetch the thread handle from the user defined thread structure
    91         struct thread_desc* thrd = get_thread( this );
    92 
    93         // First suspend, once the thread arrives here,
    94         // the function pointer to main can be invalidated without risk
    95         __finish_creation( thrd );
    96 
    9783        // Officially start the thread by enabling preemption
    9884        enable_interrupts( __cfaabi_dbg_ctx );
     
    10894        // The order of these 4 operations is very important
    10995        //Final suspend, should never return
    110         __leave_thread_monitor( thrd );
     96        __cfactx_thrd_leave();
    11197        __cabi_abort( "Resumed dead thread" );
    11298}
    11399
    114 
    115 void CtxStart(
     100void __cfactx_start(
    116101        void (*main)(void *),
    117         struct coroutine_desc *(*get_coroutine)(void *),
     102        struct $coroutine * cor,
    118103        void *this,
    119104        void (*invoke)(void *)
    120105) {
    121         struct coroutine_desc * cor = get_coroutine( this );
    122106        struct __stack_t * stack = cor->stack.storage;
    123107
     
    138122
    139123        fs->dummyReturn = NULL;
    140         fs->argument[0] = this;     // argument to invoke
     124        fs->argument[0] = main;     // argument to invoke
     125        fs->argument[1] = this;     // argument to invoke
    141126        fs->rturn = invoke;
    142127
     
    155140
    156141        fs->dummyReturn = NULL;
    157         fs->rturn = CtxInvokeStub;
    158         fs->fixedRegisters[0] = this;
    159         fs->fixedRegisters[1] = invoke;
     142        fs->rturn = __cfactx_invoke_stub;
     143        fs->fixedRegisters[0] = main;
     144        fs->fixedRegisters[1] = this;
     145        fs->fixedRegisters[2] = invoke;
    160146
    161147#elif defined( __ARM_ARCH )
    162 
     148#error ARM needs to be upgrade to use to parameters like X86/X64 (A.K.A. : I broke this and do not know how to fix it)
    163149        struct FakeStack {
    164150                float fpRegs[16];                       // floating point registers
     
    172158        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
    173159
    174         fs->intRegs[8] = CtxInvokeStub;
     160        fs->intRegs[8] = __cfactx_invoke_stub;
    175161        fs->arg[0] = this;
    176162        fs->arg[1] = invoke;
  • libcfa/src/concurrency/invoke.h

    r9fb8f01 r3d5701e  
    1010// Created On       : Tue Jan 17 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Jun 22 18:19:13 2019
    13 // Update Count     : 40
     12// Last Modified On : Thu Dec  5 16:26:03 2019
     13// Update Count     : 44
    1414//
    1515
     
    4646        #ifdef __cforall
    4747        extern "Cforall" {
    48                 extern thread_local struct KernelThreadData {
    49                         struct thread_desc    * volatile this_thread;
     48                extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
     49                        struct $thread    * volatile this_thread;
    5050                        struct processor      * volatile this_processor;
    5151
     
    5555                                volatile bool in_progress;
    5656                        } preemption_state;
     57
     58                        uint32_t rand_seed;
    5759                } kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
    5860        }
     
    9092        };
    9193
    92         enum coroutine_state { Halted, Start, Inactive, Active, Primed };
    93 
    94         struct coroutine_desc {
    95                 // context that is switch during a CtxSwitch
     94        enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun };
     95        enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
     96
     97        struct $coroutine {
     98                // context that is switch during a __cfactx_switch
    9699                struct __stack_context_t context;
    97100
     
    106109
    107110                // first coroutine to resume this one
    108                 struct coroutine_desc * starter;
     111                struct $coroutine * starter;
    109112
    110113                // last coroutine to resume this one
    111                 struct coroutine_desc * last;
     114                struct $coroutine * last;
    112115
    113116                // If non-null stack must be unwound with this exception
     
    125128        };
    126129
    127         struct monitor_desc {
     130        struct $monitor {
    128131                // spinlock to protect internal data
    129132                struct __spinlock_t lock;
    130133
    131134                // current owner of the monitor
    132                 struct thread_desc * owner;
     135                struct $thread * owner;
    133136
    134137                // queue of threads that are blocked waiting for the monitor
    135                 __queue_t(struct thread_desc) entry_queue;
     138                __queue_t(struct $thread) entry_queue;
    136139
    137140                // stack of conditions to run next once we exit the monitor
     
    150153        struct __monitor_group_t {
    151154                // currently held monitors
    152                 __cfa_anonymous_object( __small_array_t(monitor_desc*) );
     155                __cfa_anonymous_object( __small_array_t($monitor*) );
    153156
    154157                // last function that acquired monitors
     
    156159        };
    157160
    158         struct thread_desc {
     161        struct $thread {
    159162                // Core threading fields
    160                 // context that is switch during a CtxSwitch
     163                // context that is switch during a __cfactx_switch
    161164                struct __stack_context_t context;
    162165
    163166                // current execution status for coroutine
    164                 enum coroutine_state state;
     167                volatile int state;
     168                enum __Preemption_Reason preempted;
    165169
    166170                //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
    167171
    168172                // coroutine body used to store context
    169                 struct coroutine_desc  self_cor;
     173                struct $coroutine  self_cor;
    170174
    171175                // current active context
    172                 struct coroutine_desc * curr_cor;
     176                struct $coroutine * curr_cor;
    173177
    174178                // monitor body used for mutual exclusion
    175                 struct monitor_desc    self_mon;
     179                struct $monitor    self_mon;
    176180
    177181                // pointer to monitor with sufficient lifetime for current monitors
    178                 struct monitor_desc *  self_mon_p;
     182                struct $monitor *  self_mon_p;
    179183
    180184                // pointer to the cluster on which the thread is running
     
    186190                // Link lists fields
    187191                // instrusive link field for threads
    188                 struct thread_desc * next;
     192                struct $thread * next;
    189193
    190194                struct {
    191                         struct thread_desc * next;
    192                         struct thread_desc * prev;
     195                        struct $thread * next;
     196                        struct $thread * prev;
    193197                } node;
    194198        };
     
    196200        #ifdef __cforall
    197201        extern "Cforall" {
    198                 static inline thread_desc *& get_next( thread_desc & this ) {
     202                static inline $thread *& get_next( $thread & this ) __attribute__((const)) {
    199203                        return this.next;
    200204                }
    201205
    202                 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) {
     206                static inline [$thread *&, $thread *& ] __get( $thread & this ) __attribute__((const)) {
    203207                        return this.node.[next, prev];
    204208                }
    205209
    206210                static inline void ?{}(__monitor_group_t & this) {
    207                         (this.data){NULL};
     211                        (this.data){0p};
    208212                        (this.size){0};
    209213                        (this.func){NULL};
    210214                }
    211215
    212                 static inline void ?{}(__monitor_group_t & this, struct monitor_desc ** data, __lock_size_t size, fptr_t func) {
     216                static inline void ?{}(__monitor_group_t & this, struct $monitor ** data, __lock_size_t size, fptr_t func) {
    213217                        (this.data){data};
    214218                        (this.size){size};
     
    216220                }
    217221
    218                 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
     222                static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) {
    219223                        if( (lhs.data != 0) != (rhs.data != 0) ) return false;
    220224                        if( lhs.size != rhs.size ) return false;
     
    250254
    251255        // assembler routines that performs the context switch
    252         extern void CtxInvokeStub( void );
    253         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
     256        extern void __cfactx_invoke_stub( void );
     257        extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
    254258        // void CtxStore ( void * this ) asm ("CtxStore");
    255259        // void CtxRet   ( void * dst  ) asm ("CtxRet");
  • libcfa/src/concurrency/kernel.cfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Jun 20 17:21:23 2019
    13 // Update Count     : 25
     12// Last Modified On : Tue Feb  4 13:03:15 2020
     13// Update Count     : 58
    1414//
    1515
     
    2626#include <signal.h>
    2727#include <unistd.h>
     28#include <limits.h>                                                                             // PTHREAD_STACK_MIN
     29#include <sys/mman.h>                                                                   // mprotect
    2830}
    2931
     
    4042//-----------------------------------------------------------------------------
    4143// Some assembly required
    42 #if   defined( __i386 )
     44#if defined( __i386 )
    4345        #define CtxGet( ctx )        \
    4446                __asm__ volatile (     \
     
    108110//-----------------------------------------------------------------------------
    109111//Start and stop routine for the kernel, declared first to make sure they run first
    110 static void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    111 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     112static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     113static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    112114
    113115//-----------------------------------------------------------------------------
     
    115117KERNEL_STORAGE(cluster,         mainCluster);
    116118KERNEL_STORAGE(processor,       mainProcessor);
    117 KERNEL_STORAGE(thread_desc,     mainThread);
     119KERNEL_STORAGE($thread, mainThread);
    118120KERNEL_STORAGE(__stack_t,       mainThreadCtx);
    119121
    120122cluster     * mainCluster;
    121123processor   * mainProcessor;
    122 thread_desc * mainThread;
     124$thread * mainThread;
    123125
    124126extern "C" {
    125 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
     127        struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    126128}
    127129
     
    131133// Global state
    132134thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = {
     135        NULL,                                                                                           // cannot use 0p
    133136        NULL,
    134         NULL,
    135         { 1, false, false }
     137        { 1, false, false },
     138        6u //this should be seeded better but due to a bug calling rdtsc doesn't work
    136139};
    137140
     
    139142// Struct to steal stack
    140143struct current_stack_info_t {
    141         __stack_t * storage;            // pointer to stack object
    142         void *base;                             // base of stack
    143         void *limit;                    // stack grows towards stack limit
    144         void *context;                  // address of cfa_context_t
     144        __stack_t * storage;                                                            // pointer to stack object
     145        void * base;                                                                            // base of stack
     146        void * limit;                                                                           // stack grows towards stack limit
     147        void * context;                                                                         // address of cfa_context_t
    145148};
    146149
     
    161164// Main thread construction
    162165
    163 void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
     166void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
    164167        stack.storage = info->storage;
    165168        with(*stack.storage) {
     
    171174        name = "Main Thread";
    172175        state = Start;
    173         starter = NULL;
    174         last = NULL;
    175         cancellation = NULL;
    176 }
    177 
    178 void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
     176        starter = 0p;
     177        last = 0p;
     178        cancellation = 0p;
     179}
     180
     181void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
    179182        state = Start;
    180183        self_cor{ info };
     
    184187        self_mon.recursion = 1;
    185188        self_mon_p = &self_mon;
    186         next = NULL;
    187 
    188         node.next = NULL;
    189         node.prev = NULL;
     189        next = 0p;
     190
     191        node.next = 0p;
     192        node.prev = 0p;
    190193        doregister(curr_cluster, this);
    191194
     
    205208}
    206209
    207 static void start(processor * this);
    208 void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
     210static void * __invoke_processor(void * arg);
     211
     212void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
    209213        this.name = name;
    210214        this.cltr = &cltr;
    211215        terminated{ 0 };
     216        destroyer = 0p;
    212217        do_terminate = false;
    213         preemption_alarm = NULL;
     218        preemption_alarm = 0p;
    214219        pending_preemption = false;
    215220        runner.proc = &this;
     
    217222        idleLock{};
    218223
    219         start( &this );
     224        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
     225
     226        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
     227
     228        __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
    220229}
    221230
     
    231240        }
    232241
    233         pthread_join( kernel_thread, NULL );
    234 }
    235 
    236 void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) {
     242        pthread_join( kernel_thread, 0p );
     243        free( this.stack );
     244}
     245
     246void ?{}(cluster & this, const char name[], Duration preemption_rate) with( this ) {
    237247        this.name = name;
    238248        this.preemption_rate = preemption_rate;
     
    254264// Kernel Scheduling logic
    255265//=============================================================================================
    256 static void runThread(processor * this, thread_desc * dst);
    257 static void finishRunning(processor * this);
    258 static void halt(processor * this);
     266static $thread * __next_thread(cluster * this);
     267static void __run_thread(processor * this, $thread * dst);
     268static void __halt(processor * this);
    259269
    260270//Main of the processor contexts
    261271void main(processorCtx_t & runner) {
     272        // Because of a bug, we couldn't initialized the seed on construction
     273        // Do it here
     274        kernelTLS.rand_seed ^= rdtscl();
     275
    262276        processor * this = runner.proc;
    263277        verify(this);
     
    273287                __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
    274288
    275                 thread_desc * readyThread = NULL;
    276                 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ )
    277                 {
    278                         readyThread = nextThread( this->cltr );
    279 
    280                         if(readyThread)
    281                         {
    282                                 verify( ! kernelTLS.preemption_state.enabled );
    283 
    284                                 runThread(this, readyThread);
    285 
    286                                 verify( ! kernelTLS.preemption_state.enabled );
    287 
    288                                 //Some actions need to be taken from the kernel
    289                                 finishRunning(this);
     289                $thread * readyThread = 0p;
     290                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
     291                        readyThread = __next_thread( this->cltr );
     292
     293                        if(readyThread) {
     294                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     295                                /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
     296                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
     297
     298                                __run_thread(this, readyThread);
     299
     300                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    290301
    291302                                spin_count = 0;
    292                         }
    293                         else
    294                         {
     303                        } else {
    295304                                // spin(this, &spin_count);
    296                                 halt(this);
     305                                __halt(this);
    297306                        }
    298307                }
     
    314323// runThread runs a thread by context switching
    315324// from the processor coroutine to the target thread
    316 static void runThread(processor * this, thread_desc * thrd_dst) {
    317         coroutine_desc * proc_cor = get_coroutine(this->runner);
    318 
    319         // Reset the terminating actions here
    320         this->finish.action_code = No_Action;
     325static void __run_thread(processor * this, $thread * thrd_dst) {
     326        $coroutine * proc_cor = get_coroutine(this->runner);
    321327
    322328        // Update global state
    323329        kernelTLS.this_thread = thrd_dst;
    324330
    325         // set state of processor coroutine to inactive and the thread to active
    326         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    327         thrd_dst->state = Active;
    328 
    329         // set context switch to the thread that the processor is executing
    330         verify( thrd_dst->context.SP );
    331         CtxSwitch( &proc_cor->context, &thrd_dst->context );
    332         // when CtxSwitch returns we are back in the processor coroutine
    333 
    334         // set state of processor coroutine to active and the thread to inactive
    335         thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
     331        // set state of processor coroutine to inactive
     332        verify(proc_cor->state == Active);
     333        proc_cor->state = Inactive;
     334
     335        // Actually run the thread
     336        RUNNING:  while(true) {
     337                if(unlikely(thrd_dst->preempted)) {
     338                        thrd_dst->preempted = __NO_PREEMPTION;
     339                        verify(thrd_dst->state == Active || thrd_dst->state == Rerun);
     340                } else {
     341                        verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive);
     342                        thrd_dst->state = Active;
     343                }
     344
     345                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     346
     347                // set context switch to the thread that the processor is executing
     348                verify( thrd_dst->context.SP );
     349                __cfactx_switch( &proc_cor->context, &thrd_dst->context );
     350                // when __cfactx_switch returns we are back in the processor coroutine
     351
     352                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     353
     354
     355                // We just finished running a thread, there are a few things that could have happened.
     356                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
     357                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
     358                // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way
     359                // 4 - Preempted
     360                // In case 1, we may have won a race so we can't write to the state again.
     361                // In case 2, we lost the race so we now own the thread.
     362                // In case 3, we lost the race but can just reschedule the thread.
     363
     364                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
     365                        // The thread was preempted, reschedule it and reset the flag
     366                        __schedule_thread( thrd_dst );
     367                        break RUNNING;
     368                }
     369
     370                // set state of processor coroutine to active and the thread to inactive
     371                static_assert(sizeof(thrd_dst->state) == sizeof(int));
     372                enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST);
     373                switch(old_state) {
     374                        case Halted:
     375                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
     376                                thrd_dst->state = Halted;
     377
     378                                // We may need to wake someone up here since
     379                                unpark( this->destroyer );
     380                                this->destroyer = 0p;
     381                                break RUNNING;
     382                        case Active:
     383                                // This is case 1, the regular case, nothing more is needed
     384                                break RUNNING;
     385                        case Rerun:
     386                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
     387                                // In this case, just run it again.
     388                                continue RUNNING;
     389                        default:
     390                                // This makes no sense, something is wrong abort
     391                                abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state);
     392                }
     393        }
     394
     395        // Just before returning to the processor, set the processor coroutine to active
    336396        proc_cor->state = Active;
    337397}
    338398
    339399// KERNEL_ONLY
    340 static void returnToKernel() {
    341         coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    342         thread_desc * thrd_src = kernelTLS.this_thread;
    343 
    344         // set state of current coroutine to inactive
    345         thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
    346         proc_cor->state = Active;
    347         int local_errno = *__volatile_errno();
    348         #if defined( __i386 ) || defined( __x86_64 )
    349                 __x87_store;
    350         #endif
    351 
    352         // set new coroutine that the processor is executing
    353         // and context switch to it
    354         verify( proc_cor->context.SP );
    355         CtxSwitch( &thrd_src->context, &proc_cor->context );
    356 
    357         // set state of new coroutine to active
    358         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    359         thrd_src->state = Active;
    360 
    361         #if defined( __i386 ) || defined( __x86_64 )
    362                 __x87_load;
    363         #endif
    364         *__volatile_errno() = local_errno;
    365 }
    366 
    367 // KERNEL_ONLY
    368 // Once a thread has finished running, some of
    369 // its final actions must be executed from the kernel
    370 static void finishRunning(processor * this) with( this->finish ) {
    371         verify( ! kernelTLS.preemption_state.enabled );
    372         choose( action_code ) {
    373         case No_Action:
    374                 break;
    375         case Release:
    376                 unlock( *lock );
    377         case Schedule:
    378                 ScheduleThread( thrd );
    379         case Release_Schedule:
    380                 unlock( *lock );
    381                 ScheduleThread( thrd );
    382         case Release_Multi:
    383                 for(int i = 0; i < lock_count; i++) {
    384                         unlock( *locks[i] );
    385                 }
    386         case Release_Multi_Schedule:
    387                 for(int i = 0; i < lock_count; i++) {
    388                         unlock( *locks[i] );
    389                 }
    390                 for(int i = 0; i < thrd_count; i++) {
    391                         ScheduleThread( thrds[i] );
    392                 }
    393         case Callback:
    394                 callback();
    395         default:
    396                 abort("KERNEL ERROR: Unexpected action to run after thread");
    397         }
     400void returnToKernel() {
     401        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     402        $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
     403        $thread * thrd_src = kernelTLS.this_thread;
     404
     405        // Run the thread on this processor
     406        {
     407                int local_errno = *__volatile_errno();
     408                #if defined( __i386 ) || defined( __x86_64 )
     409                        __x87_store;
     410                #endif
     411                verify( proc_cor->context.SP );
     412                __cfactx_switch( &thrd_src->context, &proc_cor->context );
     413                #if defined( __i386 ) || defined( __x86_64 )
     414                        __x87_load;
     415                #endif
     416                *__volatile_errno() = local_errno;
     417        }
     418
     419        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    398420}
    399421
     
    402424// This is the entry point for processors (kernel threads)
    403425// It effectively constructs a coroutine by stealing the pthread stack
    404 static void * CtxInvokeProcessor(void * arg) {
     426static void * __invoke_processor(void * arg) {
    405427        processor * proc = (processor *) arg;
    406428        kernelTLS.this_processor = proc;
    407         kernelTLS.this_thread    = NULL;
     429        kernelTLS.this_thread    = 0p;
    408430        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
    409431        // SKULLDUGGERY: We want to create a context for the processor coroutine
     
    418440
    419441        //Set global state
    420         kernelTLS.this_thread    = NULL;
     442        kernelTLS.this_thread = 0p;
    421443
    422444        //We now have a proper context from which to schedule threads
     
    434456        __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);
    435457
    436         return NULL;
    437 }
    438 
    439 static void start(processor * this) {
    440         __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
    441 
    442         pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
    443 
    444         __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
     458        return 0p;
     459}
     460
     461static void Abort( int ret, const char func[] ) {
     462        if ( ret ) {                                                                            // pthread routines return errno values
     463                abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
     464        } // if
     465} // Abort
     466
     467void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
     468        pthread_attr_t attr;
     469
     470        Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
     471
     472        size_t stacksize;
     473        // default stack size, normally defined by shell limit
     474        Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
     475        assert( stacksize >= PTHREAD_STACK_MIN );
     476
     477        void * stack;
     478        __cfaabi_dbg_debug_do(
     479                stack = memalign( __page_size, stacksize + __page_size );
     480                // pthread has no mechanism to create the guard page in user supplied stack.
     481                if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
     482                        abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
     483                } // if
     484        );
     485        __cfaabi_dbg_no_debug_do(
     486                stack = malloc( stacksize );
     487        );
     488
     489        Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
     490
     491        Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
     492        return stack;
    445493}
    446494
    447495// KERNEL_ONLY
    448 void kernel_first_resume( processor * this ) {
    449         thread_desc * src = mainThread;
    450         coroutine_desc * dst = get_coroutine(this->runner);
     496static void __kernel_first_resume( processor * this ) {
     497        $thread * src = mainThread;
     498        $coroutine * dst = get_coroutine(this->runner);
    451499
    452500        verify( ! kernelTLS.preemption_state.enabled );
    453501
     502        kernelTLS.this_thread->curr_cor = dst;
    454503        __stack_prepare( &dst->stack, 65000 );
    455         CtxStart(&this->runner, CtxInvokeCoroutine);
     504        __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
    456505
    457506        verify( ! kernelTLS.preemption_state.enabled );
     
    465514        // context switch to specified coroutine
    466515        verify( dst->context.SP );
    467         CtxSwitch( &src->context, &dst->context );
    468         // when CtxSwitch returns we are back in the src coroutine
     516        __cfactx_switch( &src->context, &dst->context );
     517        // when __cfactx_switch returns we are back in the src coroutine
     518
     519        mainThread->curr_cor = &mainThread->self_cor;
    469520
    470521        // set state of new coroutine to active
     
    475526
    476527// KERNEL_ONLY
    477 void kernel_last_resume( processor * this ) {
    478         coroutine_desc * src = &mainThread->self_cor;
    479         coroutine_desc * dst = get_coroutine(this->runner);
     528static void __kernel_last_resume( processor * this ) {
     529        $coroutine * src = &mainThread->self_cor;
     530        $coroutine * dst = get_coroutine(this->runner);
    480531
    481532        verify( ! kernelTLS.preemption_state.enabled );
     
    484535
    485536        // context switch to the processor
    486         CtxSwitch( &src->context, &dst->context );
     537        __cfactx_switch( &src->context, &dst->context );
    487538}
    488539
    489540//-----------------------------------------------------------------------------
    490541// Scheduler routines
    491 
    492542// KERNEL ONLY
    493 void ScheduleThread( thread_desc * thrd ) {
    494         verify( thrd );
    495         verify( thrd->state != Halted );
    496 
    497         verify( ! kernelTLS.preemption_state.enabled );
    498 
    499         verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    500 
    501         with( *thrd->curr_cluster ) {
    502                 lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
    503                 bool was_empty = !(ready_queue != 0);
    504                 append( ready_queue, thrd );
    505                 unlock( ready_queue_lock );
    506 
    507                 if(was_empty) {
    508                         lock      (proc_list_lock __cfaabi_dbg_ctx2);
    509                         if(idles) {
    510                                 wake_fast(idles.head);
    511                         }
    512                         unlock    (proc_list_lock);
     543void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
     544        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     545        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     546        /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     547                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
     548        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
     549                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
     550        /* paranoid */ #endif
     551        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
     552
     553        lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
     554        bool was_empty = !(ready_queue != 0);
     555        append( ready_queue, thrd );
     556        unlock( ready_queue_lock );
     557
     558        if(was_empty) {
     559                lock      (proc_list_lock __cfaabi_dbg_ctx2);
     560                if(idles) {
     561                        wake_fast(idles.head);
    513562                }
    514                 else if( struct processor * idle = idles.head ) {
    515                         wake_fast(idle);
    516                 }
    517 
    518         }
    519 
    520         verify( ! kernelTLS.preemption_state.enabled );
     563                unlock    (proc_list_lock);
     564        }
     565        else if( struct processor * idle = idles.head ) {
     566                wake_fast(idle);
     567        }
     568
     569        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    521570}
    522571
    523572// KERNEL ONLY
    524 thread_desc * nextThread(cluster * this) with( *this ) {
    525         verify( ! kernelTLS.preemption_state.enabled );
     573static $thread * __next_thread(cluster * this) with( *this ) {
     574        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     575
    526576        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    527         thread_desc * head = pop_head( ready_queue );
     577        $thread * head = pop_head( ready_queue );
    528578        unlock( ready_queue_lock );
    529         verify( ! kernelTLS.preemption_state.enabled );
     579
     580        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    530581        return head;
    531582}
    532583
    533 void BlockInternal() {
     584void unpark( $thread * thrd ) {
     585        if( !thrd ) return;
     586
    534587        disable_interrupts();
    535         verify( ! kernelTLS.preemption_state.enabled );
     588        static_assert(sizeof(thrd->state) == sizeof(int));
     589        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
     590        switch(old_state) {
     591                case Active:
     592                        // Wake won the race, the thread will reschedule/rerun itself
     593                        break;
     594                case Inactive:
     595                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
     596
     597                        // Wake lost the race,
     598                        thrd->state = Inactive;
     599                        __schedule_thread( thrd );
     600                        break;
     601                case Rerun:
     602                        abort("More than one thread attempted to schedule thread %p\n", thrd);
     603                        break;
     604                case Halted:
     605                case Start:
     606                case Primed:
     607                default:
     608                        // This makes no sense, something is wrong abort
     609                        abort();
     610        }
     611        enable_interrupts( __cfaabi_dbg_ctx );
     612}
     613
     614void park( void ) {
     615        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     616        disable_interrupts();
     617        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     618        /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
     619
    536620        returnToKernel();
    537         verify( ! kernelTLS.preemption_state.enabled );
     621
     622        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    538623        enable_interrupts( __cfaabi_dbg_ctx );
    539 }
    540 
    541 void BlockInternal( __spinlock_t * lock ) {
     624        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     625
     626}
     627
     628// KERNEL ONLY
     629void __leave_thread() {
     630        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     631        returnToKernel();
     632        abort();
     633}
     634
     635// KERNEL ONLY
     636bool force_yield( __Preemption_Reason reason ) {
     637        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
    542638        disable_interrupts();
    543         with( *kernelTLS.this_processor ) {
    544                 finish.action_code = Release;
    545                 finish.lock        = lock;
    546         }
    547 
    548         verify( ! kernelTLS.preemption_state.enabled );
    549         returnToKernel();
    550         verify( ! kernelTLS.preemption_state.enabled );
    551 
    552         enable_interrupts( __cfaabi_dbg_ctx );
    553 }
    554 
    555 void BlockInternal( thread_desc * thrd ) {
    556         disable_interrupts();
    557         with( * kernelTLS.this_processor ) {
    558                 finish.action_code = Schedule;
    559                 finish.thrd        = thrd;
    560         }
    561 
    562         verify( ! kernelTLS.preemption_state.enabled );
    563         returnToKernel();
    564         verify( ! kernelTLS.preemption_state.enabled );
    565 
    566         enable_interrupts( __cfaabi_dbg_ctx );
    567 }
    568 
    569 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) {
    570         assert(thrd);
    571         disable_interrupts();
    572         with( * kernelTLS.this_processor ) {
    573                 finish.action_code = Release_Schedule;
    574                 finish.lock        = lock;
    575                 finish.thrd        = thrd;
    576         }
    577 
    578         verify( ! kernelTLS.preemption_state.enabled );
    579         returnToKernel();
    580         verify( ! kernelTLS.preemption_state.enabled );
    581 
    582         enable_interrupts( __cfaabi_dbg_ctx );
    583 }
    584 
    585 void BlockInternal(__spinlock_t * locks [], unsigned short count) {
    586         disable_interrupts();
    587         with( * kernelTLS.this_processor ) {
    588                 finish.action_code = Release_Multi;
    589                 finish.locks       = locks;
    590                 finish.lock_count  = count;
    591         }
    592 
    593         verify( ! kernelTLS.preemption_state.enabled );
    594         returnToKernel();
    595         verify( ! kernelTLS.preemption_state.enabled );
    596 
    597         enable_interrupts( __cfaabi_dbg_ctx );
    598 }
    599 
    600 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    601         disable_interrupts();
    602         with( *kernelTLS.this_processor ) {
    603                 finish.action_code = Release_Multi_Schedule;
    604                 finish.locks       = locks;
    605                 finish.lock_count  = lock_count;
    606                 finish.thrds       = thrds;
    607                 finish.thrd_count  = thrd_count;
    608         }
    609 
    610         verify( ! kernelTLS.preemption_state.enabled );
    611         returnToKernel();
    612         verify( ! kernelTLS.preemption_state.enabled );
    613 
    614         enable_interrupts( __cfaabi_dbg_ctx );
    615 }
    616 
    617 void BlockInternal(__finish_callback_fptr_t callback) {
    618         disable_interrupts();
    619         with( *kernelTLS.this_processor ) {
    620                 finish.action_code = Callback;
    621                 finish.callback    = callback;
    622         }
    623 
    624         verify( ! kernelTLS.preemption_state.enabled );
    625         returnToKernel();
    626         verify( ! kernelTLS.preemption_state.enabled );
    627 
    628         enable_interrupts( __cfaabi_dbg_ctx );
    629 }
    630 
    631 // KERNEL ONLY
    632 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
    633         verify( ! kernelTLS.preemption_state.enabled );
    634         with( * kernelTLS.this_processor ) {
    635                 finish.action_code = thrd ? Release_Schedule : Release;
    636                 finish.lock        = lock;
    637                 finish.thrd        = thrd;
    638         }
    639 
    640         returnToKernel();
     639        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     640
     641        $thread * thrd = kernelTLS.this_thread;
     642        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
     643
     644        // SKULLDUGGERY: It is possible that we are preempting this thread just before
     645        // it was going to park itself. If that is the case and it is already using the
     646        // intrusive fields then we can't use them to preempt the thread
     647        // If that is the case, abandon the preemption.
     648        bool preempted = false;
     649        if(thrd->next == 0p) {
     650                preempted = true;
     651                thrd->preempted = reason;
     652                returnToKernel();
     653        }
     654
     655        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     656        enable_interrupts_noPoll();
     657        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     658
     659        return preempted;
    641660}
    642661
     
    646665//-----------------------------------------------------------------------------
    647666// Kernel boot procedures
    648 static void kernel_startup(void) {
     667static void __kernel_startup(void) {
    649668        verify( ! kernelTLS.preemption_state.enabled );
    650669        __cfaabi_dbg_print_safe("Kernel : Starting\n");
     
    664683        // SKULLDUGGERY: the mainThread steals the process main thread
    665684        // which will then be scheduled by the mainProcessor normally
    666         mainThread = (thread_desc *)&storage_mainThread;
     685        mainThread = ($thread *)&storage_mainThread;
    667686        current_stack_info_t info;
    668687        info.storage = (__stack_t*)&storage_mainThreadCtx;
     
    676695        void ?{}(processorCtx_t & this, processor * proc) {
    677696                (this.__cor){ "Processor" };
    678                 this.__cor.starter = NULL;
     697                this.__cor.starter = 0p;
    679698                this.proc = proc;
    680699        }
     
    685704                terminated{ 0 };
    686705                do_terminate = false;
    687                 preemption_alarm = NULL;
     706                preemption_alarm = 0p;
    688707                pending_preemption = false;
    689708                kernel_thread = pthread_self();
     
    707726        // Add the main thread to the ready queue
    708727        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    709         ScheduleThread(mainThread);
     728        __schedule_thread(mainThread);
    710729
    711730        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    712         // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
     731        // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
    713732        // mainThread is on the ready queue when this call is made.
    714         kernel_first_resume( kernelTLS.this_processor );
     733        __kernel_first_resume( kernelTLS.this_processor );
    715734
    716735
     
    724743}
    725744
    726 static void kernel_shutdown(void) {
     745static void __kernel_shutdown(void) {
    727746        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    728747
     
    735754        // which is currently here
    736755        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    737         kernel_last_resume( kernelTLS.this_processor );
     756        __kernel_last_resume( kernelTLS.this_processor );
    738757        mainThread->self_cor.state = Halted;
    739758
     
    761780// Kernel Quiescing
    762781//=============================================================================================
    763 static void halt(processor * this) with( *this ) {
     782static void __halt(processor * this) with( *this ) {
    764783        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
    765784
     
    803822                sigemptyset( &mask );
    804823                sigaddset( &mask, SIGALRM );            // block SIGALRM signals
    805                 sigsuspend( &mask );                    // block the processor to prevent further damage during abort
    806                 _exit( EXIT_FAILURE );                  // if processor unblocks before it is killed, terminate it
     824                sigaddset( &mask, SIGUSR1 );            // block SIGALRM signals
     825                sigsuspend( &mask );                            // block the processor to prevent further damage during abort
     826                _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it
    807827        }
    808828        else {
     
    815835
    816836void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
    817         thread_desc * thrd = kernel_data;
     837        $thread * thrd = kernel_data;
    818838
    819839        if(thrd) {
    820840                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
    821                 __cfaabi_dbg_bits_write( abort_text, len );
     841                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
    822842
    823843                if ( &thrd->self_cor != thrd->curr_cor ) {
    824844                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
    825                         __cfaabi_dbg_bits_write( abort_text, len );
     845                        __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
    826846                }
    827847                else {
    828                         __cfaabi_dbg_bits_write( ".\n", 2 );
     848                        __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
    829849                }
    830850        }
    831851        else {
    832852                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
    833                 __cfaabi_dbg_bits_write( abort_text, len );
     853                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
    834854        }
    835855}
     
    842862
    843863extern "C" {
    844         void __cfaabi_dbg_bits_acquire() {
     864        void __cfaabi_bits_acquire() {
    845865                lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
    846866        }
    847867
    848         void __cfaabi_dbg_bits_release() {
     868        void __cfaabi_bits_release() {
    849869                unlock( kernel_debug_lock );
    850870        }
     
    871891
    872892                // atomically release spin lock and block
    873                 BlockInternal( &lock );
     893                unlock( lock );
     894                park();
    874895        }
    875896        else {
     
    879900
    880901void V(semaphore & this) with( this ) {
    881         thread_desc * thrd = NULL;
     902        $thread * thrd = 0p;
    882903        lock( lock __cfaabi_dbg_ctx2 );
    883904        count += 1;
     
    890911
    891912        // make new owner
    892         WakeThread( thrd );
     913        unpark( thrd );
    893914}
    894915
     
    907928}
    908929
    909 void doregister( cluster * cltr, thread_desc & thrd ) {
     930void doregister( cluster * cltr, $thread & thrd ) {
    910931        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    911932        cltr->nthreads += 1;
     
    914935}
    915936
    916 void unregister( cluster * cltr, thread_desc & thrd ) {
     937void unregister( cluster * cltr, $thread & thrd ) {
    917938        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    918939        remove(cltr->threads, thrd );
     
    939960__cfaabi_dbg_debug_do(
    940961        extern "C" {
    941                 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {
     962                void __cfaabi_dbg_record(__spinlock_t & this, const char prev_name[]) {
    942963                        this.prev_name = prev_name;
    943964                        this.prev_thrd = kernelTLS.this_thread;
     
    948969//-----------------------------------------------------------------------------
    949970// Debug
    950 bool threading_enabled(void) {
     971bool threading_enabled(void) __attribute__((const)) {
    951972        return true;
    952973}
  • libcfa/src/concurrency/kernel.hfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Jun 22 11:39:17 2019
    13 // Update Count     : 16
     12// Last Modified On : Tue Feb  4 12:29:26 2020
     13// Update Count     : 22
    1414//
    1515
     
    2020#include "invoke.h"
    2121#include "time_t.hfa"
     22#include "coroutine.hfa"
    2223
    2324extern "C" {
     
    3132        __spinlock_t lock;
    3233        int count;
    33         __queue_t(thread_desc) waiting;
     34        __queue_t($thread) waiting;
    3435};
    3536
     
    4344// Processor
    4445extern struct cluster * mainCluster;
    45 
    46 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };
    47 
    48 typedef void (*__finish_callback_fptr_t)(void);
    49 
    50 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)
    51 struct FinishAction {
    52         FinishOpCode action_code;
    53         /*
    54         // Union of possible actions
    55         union {
    56                 // Option 1 : locks and threads
    57                 struct {
    58                         // 1 thread or N thread
    59                         union {
    60                                 thread_desc * thrd;
    61                                 struct {
    62                                         thread_desc ** thrds;
    63                                         unsigned short thrd_count;
    64                                 };
    65                         };
    66                         // 1 lock or N lock
    67                         union {
    68                                 __spinlock_t * lock;
    69                                 struct {
    70                                         __spinlock_t ** locks;
    71                                         unsigned short lock_count;
    72                                 };
    73                         };
    74                 };
    75                 // Option 2 : action pointer
    76                 __finish_callback_fptr_t callback;
    77         };
    78         /*/
    79         thread_desc * thrd;
    80         thread_desc ** thrds;
    81         unsigned short thrd_count;
    82         __spinlock_t * lock;
    83         __spinlock_t ** locks;
    84         unsigned short lock_count;
    85         __finish_callback_fptr_t callback;
    86         //*/
    87 };
    88 static inline void ?{}(FinishAction & this) {
    89         this.action_code = No_Action;
    90         this.thrd = NULL;
    91         this.lock = NULL;
    92 }
    93 static inline void ^?{}(FinishAction &) {}
    9446
    9547// Processor
     
    11567        // RunThread data
    11668        // Action to do after a thread is ran
    117         struct FinishAction finish;
     69        $thread * destroyer;
    11870
    11971        // Preemption data
     
    13486        semaphore terminated;
    13587
     88        // pthread Stack
     89        void * stack;
     90
    13691        // Link lists fields
    13792        struct __dbg_node_proc {
     
    146101};
    147102
    148 void  ?{}(processor & this, const char * name, struct cluster & cltr);
     103void  ?{}(processor & this, const char name[], struct cluster & cltr);
    149104void ^?{}(processor & this);
    150105
    151106static inline void  ?{}(processor & this)                    { this{ "Anonymous Processor", *mainCluster}; }
    152107static inline void  ?{}(processor & this, struct cluster & cltr)    { this{ "Anonymous Processor", cltr}; }
    153 static inline void  ?{}(processor & this, const char * name) { this{name, *mainCluster }; }
     108static inline void  ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
    154109
    155 static inline [processor *&, processor *& ] __get( processor & this ) {
    156         return this.node.[next, prev];
    157 }
     110static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; }
    158111
    159112//-----------------------------------------------------------------------------
     
    164117
    165118        // Ready queue for threads
    166         __queue_t(thread_desc) ready_queue;
     119        __queue_t($thread) ready_queue;
    167120
    168121        // Name of the cluster
     
    180133        // List of threads
    181134        __spinlock_t thread_list_lock;
    182         __dllist_t(struct thread_desc) threads;
     135        __dllist_t(struct $thread) threads;
    183136        unsigned int nthreads;
    184137
     
    191144extern Duration default_preemption();
    192145
    193 void ?{} (cluster & this, const char * name, Duration preemption_rate);
     146void ?{} (cluster & this, const char name[], Duration preemption_rate);
    194147void ^?{}(cluster & this);
    195148
    196149static inline void ?{} (cluster & this)                           { this{"Anonymous Cluster", default_preemption()}; }
    197150static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; }
    198 static inline void ?{} (cluster & this, const char * name)        { this{name, default_preemption()}; }
     151static inline void ?{} (cluster & this, const char name[])        { this{name, default_preemption()}; }
    199152
    200 static inline [cluster *&, cluster *& ] __get( cluster & this ) {
    201         return this.node.[next, prev];
    202 }
     153static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
    203154
    204155static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
  • libcfa/src/concurrency/kernel_private.hfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Mon Feb 13 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Mar 29 14:06:40 2018
    13 // Update Count     : 3
     12// Last Modified On : Sat Nov 30 19:25:02 2019
     13// Update Count     : 8
    1414//
    1515
     
    3131}
    3232
    33 void ScheduleThread( thread_desc * );
    34 static inline void WakeThread( thread_desc * thrd ) {
    35         if( !thrd ) return;
    36 
    37         disable_interrupts();
    38         ScheduleThread( thrd );
    39         enable_interrupts( __cfaabi_dbg_ctx );
    40 }
    41 thread_desc * nextThread(cluster * this);
     33void __schedule_thread( $thread * ) __attribute__((nonnull (1)));
    4234
    4335//Block current thread and release/wake-up the following resources
    44 void BlockInternal(void);
    45 void BlockInternal(__spinlock_t * lock);
    46 void BlockInternal(thread_desc * thrd);
    47 void BlockInternal(__spinlock_t * lock, thread_desc * thrd);
    48 void BlockInternal(__spinlock_t * locks [], unsigned short count);
    49 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);
    50 void BlockInternal(__finish_callback_fptr_t callback);
    51 void LeaveThread(__spinlock_t * lock, thread_desc * thrd);
     36void __leave_thread() __attribute__((noreturn));
    5237
    5338//-----------------------------------------------------------------------------
    5439// Processor
    5540void main(processorCtx_t *);
     41
     42void * __create_pthread( pthread_t *, void * (*)(void *), void * );
    5643
    5744static inline void wake_fast(processor * this) {
     
    8471// Threads
    8572extern "C" {
    86       forall(dtype T | is_thread(T))
    87       void CtxInvokeThread(T * this);
     73      void __cfactx_invoke_thread(void (*main)(void *), void * this);
    8874}
    8975
    90 extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
    91 
    9276__cfaabi_dbg_debug_do(
    93         extern void __cfaabi_dbg_thread_register  ( thread_desc * thrd );
    94         extern void __cfaabi_dbg_thread_unregister( thread_desc * thrd );
     77        extern void __cfaabi_dbg_thread_register  ( $thread * thrd );
     78        extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
    9579)
    9680
     
    9983#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
    10084
     85static inline uint32_t __tls_rand() {
     86        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
     87        kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
     88        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7;
     89        return kernelTLS.rand_seed;
     90}
     91
    10192
    10293void doregister( struct cluster & cltr );
    10394void unregister( struct cluster & cltr );
    10495
    105 void doregister( struct cluster * cltr, struct thread_desc & thrd );
    106 void unregister( struct cluster * cltr, struct thread_desc & thrd );
     96void doregister( struct cluster * cltr, struct $thread & thrd );
     97void unregister( struct cluster * cltr, struct $thread & thrd );
    10798
    10899void doregister( struct cluster * cltr, struct processor * proc );
  • libcfa/src/concurrency/monitor.cfa

    r9fb8f01 r3d5701e  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // monitor_desc.c --
     7// $monitor.c --
    88//
    99// Author           : Thierry Delisle
    1010// Created On       : Thd Feb 23 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 30 14:30:26 2018
    13 // Update Count     : 9
     12// Last Modified On : Wed Dec  4 07:55:14 2019
     13// Update Count     : 10
    1414//
    1515
     
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    31 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    32 static inline void reset_mask( monitor_desc * this );
    33 
    34 static inline thread_desc * next_thread( monitor_desc * this );
    35 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     29static inline void __set_owner ( $monitor * this, $thread * owner );
     30static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
     31static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     32static inline void reset_mask( $monitor * this );
     33
     34static inline $thread * next_thread( $monitor * this );
     35static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
    3636
    3737static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    38 static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     38static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    3939static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    40 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    41 
    42 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    43 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    44 
    45 static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    46 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 
    48 static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     40static inline void unlock_all( $monitor * locks [], __lock_size_t count );
     41
     42static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     43static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     44
     45static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     46static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47
     48static inline $thread *        check_condition   ( __condition_criterion_t * );
    4949static inline void                 brand_condition   ( condition & );
    50 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     50static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
    5151
    5252forall(dtype T | sized( T ))
    5353static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5454static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    55 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
     55static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
    5656
    5757//-----------------------------------------------------------------------------
     
    6868
    6969#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    70         monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     70        $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7171        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7272        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8080//-----------------------------------------------------------------------------
    8181// Enter/Leave routines
    82 
    83 
    84 extern "C" {
    85         // Enter single monitor
    86         static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    87                 // Lock the monitor spinlock
    88                 lock( this->lock __cfaabi_dbg_ctx2 );
    89                 // Interrupts disable inside critical section
    90                 thread_desc * thrd = kernelTLS.this_thread;
    91 
    92                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    93 
    94                 if( !this->owner ) {
    95                         // No one has the monitor, just take it
    96                         set_owner( this, thrd );
    97 
    98                         __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
    99                 }
    100                 else if( this->owner == thrd) {
    101                         // We already have the monitor, just note how many times we took it
    102                         this->recursion += 1;
    103 
    104                         __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
    105                 }
    106                 else if( is_accepted( this, group) ) {
    107                         // Some one was waiting for us, enter
    108                         set_owner( this, thrd );
    109 
    110                         // Reset mask
    111                         reset_mask( this );
    112 
    113                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
    114                 }
    115                 else {
    116                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    117 
    118                         // Some one else has the monitor, wait in line for it
    119                         append( this->entry_queue, thrd );
    120 
    121                         BlockInternal( &this->lock );
    122 
    123                         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    124 
    125                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    126                         return;
    127                 }
     82// Enter single monitor
     83static void __enter( $monitor * this, const __monitor_group_t & group ) {
     84        // Lock the monitor spinlock
     85        lock( this->lock __cfaabi_dbg_ctx2 );
     86        // Interrupts disable inside critical section
     87        $thread * thrd = kernelTLS.this_thread;
     88
     89        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     90
     91        if( !this->owner ) {
     92                // No one has the monitor, just take it
     93                __set_owner( this, thrd );
     94
     95                __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     96        }
     97        else if( this->owner == thrd) {
     98                // We already have the monitor, just note how many times we took it
     99                this->recursion += 1;
     100
     101                __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     102        }
     103        else if( is_accepted( this, group) ) {
     104                // Some one was waiting for us, enter
     105                __set_owner( this, thrd );
     106
     107                // Reset mask
     108                reset_mask( this );
     109
     110                __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
     111        }
     112        else {
     113                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     114
     115                // Some one else has the monitor, wait in line for it
     116                /* paranoid */ verify( thrd->next == 0p );
     117                append( this->entry_queue, thrd );
     118                /* paranoid */ verify( thrd->next == 1p );
     119
     120                unlock( this->lock );
     121                park();
    128122
    129123                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    130124
    131                 // Release the lock and leave
     125                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     126                return;
     127        }
     128
     129        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     130
     131        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     132        /* paranoid */ verify( this->lock.lock );
     133
     134        // Release the lock and leave
     135        unlock( this->lock );
     136        return;
     137}
     138
     139static void __dtor_enter( $monitor * this, fptr_t func ) {
     140        // Lock the monitor spinlock
     141        lock( this->lock __cfaabi_dbg_ctx2 );
     142        // Interrupts disable inside critical section
     143        $thread * thrd = kernelTLS.this_thread;
     144
     145        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     146
     147
     148        if( !this->owner ) {
     149                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     150
     151                // No one has the monitor, just take it
     152                __set_owner( this, thrd );
     153
     154                verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     155
    132156                unlock( this->lock );
    133157                return;
    134158        }
    135 
    136         static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
    137                 // Lock the monitor spinlock
    138                 lock( this->lock __cfaabi_dbg_ctx2 );
    139                 // Interrupts disable inside critical section
    140                 thread_desc * thrd = kernelTLS.this_thread;
    141 
    142                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
    143 
    144 
    145                 if( !this->owner ) {
    146                         __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
    147 
    148                         // No one has the monitor, just take it
    149                         set_owner( this, thrd );
    150 
    151                         unlock( this->lock );
    152                         return;
     159        else if( this->owner == thrd) {
     160                // We already have the monitor... but where about to destroy it so the nesting will fail
     161                // Abort!
     162                abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     163        }
     164
     165        __lock_size_t count = 1;
     166        $monitor ** monitors = &this;
     167        __monitor_group_t group = { &this, 1, func };
     168        if( is_accepted( this, group) ) {
     169                __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
     170
     171                // Wake the thread that is waiting for this
     172                __condition_criterion_t * urgent = pop( this->signal_stack );
     173                /* paranoid */ verify( urgent );
     174
     175                // Reset mask
     176                reset_mask( this );
     177
     178                // Create the node specific to this wait operation
     179                wait_ctx_primed( thrd, 0 )
     180
     181                // Some one else has the monitor, wait for him to finish and then run
     182                unlock( this->lock );
     183
     184                // Release the next thread
     185                /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     186                unpark( urgent->owner->waiting_thread );
     187
     188                // Park current thread waiting
     189                park();
     190
     191                // Some one was waiting for us, enter
     192                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     193        }
     194        else {
     195                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     196
     197                wait_ctx( thrd, 0 )
     198                this->dtor_node = &waiter;
     199
     200                // Some one else has the monitor, wait in line for it
     201                /* paranoid */ verify( thrd->next == 0p );
     202                append( this->entry_queue, thrd );
     203                /* paranoid */ verify( thrd->next == 1p );
     204                unlock( this->lock );
     205
     206                // Park current thread waiting
     207                park();
     208
     209                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     210                return;
     211        }
     212
     213        __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
     214
     215}
     216
     217// Leave single monitor
     218void __leave( $monitor * this ) {
     219        // Lock the monitor spinlock
     220        lock( this->lock __cfaabi_dbg_ctx2 );
     221
     222        __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
     223
     224        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     225
     226        // Leaving a recursion level, decrement the counter
     227        this->recursion -= 1;
     228
     229        // If we haven't left the last level of recursion
     230        // it means we don't need to do anything
     231        if( this->recursion != 0) {
     232                __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
     233                unlock( this->lock );
     234                return;
     235        }
     236
     237        // Get the next thread, will be null on low contention monitor
     238        $thread * new_owner = next_thread( this );
     239
     240        // Check the new owner is consistent with who we wake-up
     241        // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
     242        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     243
     244        // We can now let other threads in safely
     245        unlock( this->lock );
     246
     247        //We need to wake-up the thread
     248        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     249        unpark( new_owner );
     250}
     251
     252// Leave single monitor for the last time
     253void __dtor_leave( $monitor * this ) {
     254        __cfaabi_dbg_debug_do(
     255                if( TL_GET( this_thread ) != this->owner ) {
     256                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    153257                }
    154                 else if( this->owner == thrd) {
    155                         // We already have the monitor... but where about to destroy it so the nesting will fail
    156                         // Abort!
    157                         abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     258                if( this->recursion != 1 ) {
     259                        abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    158260                }
    159 
    160                 __lock_size_t count = 1;
    161                 monitor_desc ** monitors = &this;
    162                 __monitor_group_t group = { &this, 1, func };
    163                 if( is_accepted( this, group) ) {
    164                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
    165 
    166                         // Wake the thread that is waiting for this
    167                         __condition_criterion_t * urgent = pop( this->signal_stack );
    168                         verify( urgent );
    169 
    170                         // Reset mask
    171                         reset_mask( this );
    172 
    173                         // Create the node specific to this wait operation
    174                         wait_ctx_primed( thrd, 0 )
    175 
    176                         // Some one else has the monitor, wait for him to finish and then run
    177                         BlockInternal( &this->lock, urgent->owner->waiting_thread );
    178 
    179                         // Some one was waiting for us, enter
    180                         set_owner( this, thrd );
    181                 }
    182                 else {
    183                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    184 
    185                         wait_ctx( thrd, 0 )
    186                         this->dtor_node = &waiter;
    187 
    188                         // Some one else has the monitor, wait in line for it
    189                         append( this->entry_queue, thrd );
    190                         BlockInternal( &this->lock );
    191 
    192                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    193                         return;
    194                 }
    195 
    196                 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
    197 
    198         }
    199 
    200         // Leave single monitor
    201         void __leave_monitor_desc( monitor_desc * this ) {
    202                 // Lock the monitor spinlock
    203                 lock( this->lock __cfaabi_dbg_ctx2 );
    204 
    205                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    206 
    207                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    208 
    209                 // Leaving a recursion level, decrement the counter
    210                 this->recursion -= 1;
    211 
    212                 // If we haven't left the last level of recursion
    213                 // it means we don't need to do anything
    214                 if( this->recursion != 0) {
    215                         __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
    216                         unlock( this->lock );
    217                         return;
    218                 }
    219 
    220                 // Get the next thread, will be null on low contention monitor
    221                 thread_desc * new_owner = next_thread( this );
    222 
    223                 // We can now let other threads in safely
    224                 unlock( this->lock );
    225 
    226                 //We need to wake-up the thread
    227                 WakeThread( new_owner );
    228         }
    229 
    230         // Leave single monitor for the last time
    231         void __leave_dtor_monitor_desc( monitor_desc * this ) {
    232                 __cfaabi_dbg_debug_do(
    233                         if( TL_GET( this_thread ) != this->owner ) {
    234                                 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    235                         }
    236                         if( this->recursion != 1 ) {
    237                                 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    238                         }
    239                 )
    240         }
    241 
     261        )
     262}
     263
     264extern "C" {
    242265        // Leave the thread monitor
    243266        // last routine called by a thread.
    244267        // Should never return
    245         void __leave_thread_monitor( thread_desc * thrd ) {
    246                 monitor_desc * this = &thrd->self_mon;
     268        void __cfactx_thrd_leave() {
     269                $thread * thrd = TL_GET( this_thread );
     270                $monitor * this = &thrd->self_mon;
    247271
    248272                // Lock the monitor now
     
    251275                disable_interrupts();
    252276
    253                 thrd->self_cor.state = Halted;
    254 
    255                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     277                thrd->state = Halted;
     278
     279                /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    256280
    257281                // Leaving a recursion level, decrement the counter
     
    263287
    264288                // Fetch the next thread, can be null
    265                 thread_desc * new_owner = next_thread( this );
    266 
    267                 // Leave the thread, this will unlock the spinlock
    268                 // Use leave thread instead of BlockInternal which is
    269                 // specialized for this case and supports null new_owner
    270                 LeaveThread( &this->lock, new_owner );
     289                $thread * new_owner = next_thread( this );
     290
     291                // Release the monitor lock
     292                unlock( this->lock );
     293
     294                // Unpark the next owner if needed
     295                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     296                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     297                /* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
     298                /* paranoid */ verify( thrd->state == Halted );
     299
     300                kernelTLS.this_processor->destroyer = new_owner;
     301
     302                // Leave the thread
     303                __leave_thread();
    271304
    272305                // Control flow should never reach here!
     
    278311static inline void enter( __monitor_group_t monitors ) {
    279312        for( __lock_size_t i = 0; i < monitors.size; i++) {
    280                 __enter_monitor_desc( monitors[i], monitors );
     313                __enter( monitors[i], monitors );
    281314        }
    282315}
     
    284317// Leave multiple monitor
    285318// relies on the monitor array being sorted
    286 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     319static inline void leave($monitor * monitors [], __lock_size_t count) {
    287320        for( __lock_size_t i = count - 1; i >= 0; i--) {
    288                 __leave_monitor_desc( monitors[i] );
     321                __leave( monitors[i] );
    289322        }
    290323}
     
    292325// Ctor for monitor guard
    293326// Sorts monitors before entering
    294 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    295         thread_desc * thrd = TL_GET( this_thread );
     327void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
     328        $thread * thrd = TL_GET( this_thread );
    296329
    297330        // Store current array
     
    333366// Ctor for monitor guard
    334367// Sorts monitors before entering
    335 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     368void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) {
    336369        // optimization
    337         thread_desc * thrd = TL_GET( this_thread );
     370        $thread * thrd = TL_GET( this_thread );
    338371
    339372        // Store current array
     
    346379        (thrd->monitors){m, 1, func};
    347380
    348         __enter_monitor_dtor( this.m, func );
     381        __dtor_enter( this.m, func );
    349382}
    350383
     
    352385void ^?{}( monitor_dtor_guard_t & this ) {
    353386        // Leave the monitors in order
    354         __leave_dtor_monitor_desc( this.m );
     387        __dtor_leave( this.m );
    355388
    356389        // Restore thread context
     
    360393//-----------------------------------------------------------------------------
    361394// Internal scheduling types
    362 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     395void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    363396        this.waiting_thread = waiting_thread;
    364397        this.count = count;
    365         this.next = NULL;
     398        this.next = 0p;
    366399        this.user_info = user_info;
    367400}
     
    369402void ?{}(__condition_criterion_t & this ) with( this ) {
    370403        ready  = false;
    371         target = NULL;
    372         owner  = NULL;
    373         next   = NULL;
    374 }
    375 
    376 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
     404        target = 0p;
     405        owner  = 0p;
     406        next   = 0p;
     407}
     408
     409void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
    377410        this.ready  = false;
    378411        this.target = target;
    379412        this.owner  = &owner;
    380         this.next   = NULL;
     413        this.next   = 0p;
    381414}
    382415
     
    387420
    388421        // Check that everything is as expected
    389         assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     422        assertf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors );
    390423        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
    391424        verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count );
     
    399432        // Append the current wait operation to the ones already queued on the condition
    400433        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
     434        /* paranoid */ verify( waiter.next == 0p );
    401435        append( this.blocked, &waiter );
     436        /* paranoid */ verify( waiter.next == 1p );
    402437
    403438        // Lock all monitors (aggregates the locks as well)
     
    406441        // Find the next thread(s) to run
    407442        __lock_size_t thread_count = 0;
    408         thread_desc * threads[ count ];
     443        $thread * threads[ count ];
    409444        __builtin_memset( threads, 0, sizeof( threads ) );
    410445
     
    414449        // Remove any duplicate threads
    415450        for( __lock_size_t i = 0; i < count; i++) {
    416                 thread_desc * new_owner = next_thread( monitors[i] );
     451                $thread * new_owner = next_thread( monitors[i] );
    417452                insert_unique( threads, thread_count, new_owner );
    418453        }
    419454
     455        // Unlock the locks, we don't need them anymore
     456        for(int i = 0; i < count; i++) {
     457                unlock( *locks[i] );
     458        }
     459
     460        // Wake the threads
     461        for(int i = 0; i < thread_count; i++) {
     462                unpark( threads[i] );
     463        }
     464
    420465        // Everything is ready to go to sleep
    421         BlockInternal( locks, count, threads, thread_count );
     466        park();
    422467
    423468        // We are back, restore the owners and recursions
     
    434479        //Some more checking in debug
    435480        __cfaabi_dbg_debug_do(
    436                 thread_desc * this_thrd = TL_GET( this_thread );
     481                $thread * this_thrd = TL_GET( this_thread );
    437482                if ( this.monitor_count != this_thrd->monitors.size ) {
    438483                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    449494
    450495        // Lock all monitors
    451         lock_all( this.monitors, NULL, count );
     496        lock_all( this.monitors, 0p, count );
    452497
    453498        //Pop the head of the waiting queue
     
    471516
    472517        //Check that everything is as expected
    473         verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
     518        verifyf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors );
    474519        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
    475520
     
    488533
    489534        //Find the thread to run
    490         thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    491         set_owner( monitors, count, signallee );
     535        $thread * signallee = pop_head( this.blocked )->waiting_thread;
     536        /* paranoid */ verify( signallee->next == 0p );
     537        __set_owner( monitors, count, signallee );
    492538
    493539        __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    494540
     541        // unlock all the monitors
     542        unlock_all( locks, count );
     543
     544        // unpark the thread we signalled
     545        unpark( signallee );
     546
    495547        //Everything is ready to go to sleep
    496         BlockInternal( locks, count, &signallee, 1 );
     548        park();
    497549
    498550
     
    535587        // Create one!
    536588        __lock_size_t max = count_max( mask );
    537         monitor_desc * mon_storage[max];
     589        $monitor * mon_storage[max];
    538590        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    539591        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    553605        {
    554606                // Check if the entry queue
    555                 thread_desc * next; int index;
     607                $thread * next; int index;
    556608                [next, index] = search_entry_queue( mask, monitors, count );
    557609
     
    563615                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    564616
    565                                 monitor_desc * mon2dtor = accepted[0];
     617                                $monitor * mon2dtor = accepted[0];
    566618                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    567619
     
    589641
    590642                                // Set the owners to be the next thread
    591                                 set_owner( monitors, count, next );
    592 
    593                                 // Everything is ready to go to sleep
    594                                 BlockInternal( locks, count, &next, 1 );
     643                                __set_owner( monitors, count, next );
     644
     645                                // unlock all the monitors
     646                                unlock_all( locks, count );
     647
     648                                // unpark the thread we signalled
     649                                unpark( next );
     650
     651                                //Everything is ready to go to sleep
     652                                park();
    595653
    596654                                // We are back, restore the owners and recursions
     
    630688        }
    631689
     690        // unlock all the monitors
     691        unlock_all( locks, count );
     692
    632693        //Everything is ready to go to sleep
    633         BlockInternal( locks, count );
     694        park();
    634695
    635696
     
    648709// Utilities
    649710
    650 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
    651         // __cfaabi_dbg_print_safe( "Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     711static inline void __set_owner( $monitor * this, $thread * owner ) {
     712        /* paranoid */ verify( this->lock.lock );
    652713
    653714        //Pass the monitor appropriately
     
    658719}
    659720
    660 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    661         monitors[0]->owner     = owner;
    662         monitors[0]->recursion = 1;
     721static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
     722        /* paranoid */ verify ( monitors[0]->lock.lock );
     723        /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     724        monitors[0]->owner        = owner;
     725        monitors[0]->recursion    = 1;
    663726        for( __lock_size_t i = 1; i < count; i++ ) {
    664                 monitors[i]->owner     = owner;
    665                 monitors[i]->recursion = 0;
    666         }
    667 }
    668 
    669 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     727                /* paranoid */ verify ( monitors[i]->lock.lock );
     728                /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] );
     729                monitors[i]->owner        = owner;
     730                monitors[i]->recursion    = 0;
     731        }
     732}
     733
     734static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    670735        for( __lock_size_t i = 0; i < count; i++) {
    671736                storage[i]->mask = mask;
     
    673738}
    674739
    675 static inline void reset_mask( monitor_desc * this ) {
    676         this->mask.accepted = NULL;
    677         this->mask.data = NULL;
     740static inline void reset_mask( $monitor * this ) {
     741        this->mask.accepted = 0p;
     742        this->mask.data = 0p;
    678743        this->mask.size = 0;
    679744}
    680745
    681 static inline thread_desc * next_thread( monitor_desc * this ) {
     746static inline $thread * next_thread( $monitor * this ) {
    682747        //Check the signaller stack
    683748        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    687752                //regardless of if we are ready to baton pass,
    688753                //we need to set the monitor as in use
    689                 set_owner( this,  urgent->owner->waiting_thread );
     754                /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     755                __set_owner( this,  urgent->owner->waiting_thread );
    690756
    691757                return check_condition( urgent );
     
    694760        // No signaller thread
    695761        // Get the next thread in the entry_queue
    696         thread_desc * new_owner = pop_head( this->entry_queue );
    697         set_owner( this, new_owner );
     762        $thread * new_owner = pop_head( this->entry_queue );
     763        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     764        /* paranoid */ verify( !new_owner || new_owner->next == 0p );
     765        __set_owner( this, new_owner );
    698766
    699767        return new_owner;
    700768}
    701769
    702 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     770static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
    703771        __acceptable_t * it = this->mask.data; // Optim
    704772        __lock_size_t count = this->mask.size;
     
    722790}
    723791
    724 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     792static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    725793        for( __lock_size_t i = 0; i < count; i++) {
    726794                (criteria[i]){ monitors[i], waiter };
     
    730798}
    731799
    732 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     800static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    733801        for( __lock_size_t i = 0; i < count; i++) {
    734802                (criteria[i]){ monitors[i], waiter };
     
    746814}
    747815
    748 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     816static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    749817        for( __lock_size_t i = 0; i < count; i++ ) {
    750818                __spinlock_t * l = &source[i]->lock;
     
    760828}
    761829
    762 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     830static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
    763831        for( __lock_size_t i = 0; i < count; i++ ) {
    764832                unlock( locks[i]->lock );
     
    767835
    768836static inline void save(
    769         monitor_desc * ctx [],
     837        $monitor * ctx [],
    770838        __lock_size_t count,
    771839        __attribute((unused)) __spinlock_t * locks [],
     
    780848
    781849static inline void restore(
    782         monitor_desc * ctx [],
     850        $monitor * ctx [],
    783851        __lock_size_t count,
    784852        __spinlock_t * locks [],
     
    798866// 2 - Checks if all the monitors are ready to run
    799867//     if so return the thread to run
    800 static inline thread_desc * check_condition( __condition_criterion_t * target ) {
     868static inline $thread * check_condition( __condition_criterion_t * target ) {
    801869        __condition_node_t * node = target->owner;
    802870        unsigned short count = node->count;
     
    816884        }
    817885
    818         __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL );
    819         return ready2run ? node->waiting_thread : NULL;
     886        __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p );
     887        return ready2run ? node->waiting_thread : 0p;
    820888}
    821889
    822890static inline void brand_condition( condition & this ) {
    823         thread_desc * thrd = TL_GET( this_thread );
     891        $thread * thrd = TL_GET( this_thread );
    824892        if( !this.monitors ) {
    825893                // __cfaabi_dbg_print_safe( "Branding\n" );
    826                 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data );
     894                assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data );
    827895                this.monitor_count = thrd->monitors.size;
    828896
    829                 this.monitors = (monitor_desc **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     897                this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    830898                for( int i = 0; i < this.monitor_count; i++ ) {
    831899                        this.monitors[i] = thrd->monitors[i];
     
    834902}
    835903
    836 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
    837 
    838         __queue_t(thread_desc) & entry_queue = monitors[0]->entry_queue;
     904static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
     905
     906        __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
    839907
    840908        // For each thread in the entry-queue
    841         for(    thread_desc ** thrd_it = &entry_queue.head;
    842                 *thrd_it;
     909        for(    $thread ** thrd_it = &entry_queue.head;
     910                *thrd_it != 1p;
    843911                thrd_it = &(*thrd_it)->next
    844912        ) {
     
    883951}
    884952
    885 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     953static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
    886954        __lock_size_t size = 0;
    887955        for( __lock_size_t i = 0; i < mask.size; i++ ) {
  • libcfa/src/concurrency/monitor.hfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Thd Feb 23 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Oct  7 18:06:45 2017
    13 // Update Count     : 10
     12// Last Modified On : Wed Dec  4 07:55:32 2019
     13// Update Count     : 11
    1414//
    1515
     
    2323
    2424trait is_monitor(dtype T) {
    25         monitor_desc * get_monitor( T & );
     25        $monitor * get_monitor( T & );
    2626        void ^?{}( T & mutex );
    2727};
    2828
    29 static inline void ?{}(monitor_desc & this) with( this ) {
     29static inline void ?{}($monitor & this) with( this ) {
    3030        lock{};
    3131        entry_queue{};
    3232        signal_stack{};
    33         owner         = NULL;
     33        owner         = 0p;
    3434        recursion     = 0;
    35         mask.accepted = NULL;
    36         mask.data     = NULL;
     35        mask.accepted = 0p;
     36        mask.data     = 0p;
    3737        mask.size     = 0;
    38         dtor_node     = NULL;
     38        dtor_node     = 0p;
    3939}
    4040
     41static inline void ^?{}($monitor & ) {}
     42
    4143struct monitor_guard_t {
    42         monitor_desc **         m;
     44        $monitor **     m;
    4345        __lock_size_t           count;
    4446        __monitor_group_t prev;
    4547};
    4648
    47 void ?{}( monitor_guard_t & this, monitor_desc ** m, __lock_size_t count, void (*func)() );
     49void ?{}( monitor_guard_t & this, $monitor ** m, __lock_size_t count, void (*func)() );
    4850void ^?{}( monitor_guard_t & this );
    4951
    5052struct monitor_dtor_guard_t {
    51         monitor_desc *    m;
     53        $monitor *    m;
    5254        __monitor_group_t prev;
    5355};
    5456
    55 void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, void (*func)() );
     57void ?{}( monitor_dtor_guard_t & this, $monitor ** m, void (*func)() );
    5658void ^?{}( monitor_dtor_guard_t & this );
    5759
     
    7072
    7173        // The monitor this criterion concerns
    72         monitor_desc * target;
     74        $monitor * target;
    7375
    7476        // The parent node to which this criterion belongs
     
    8587struct __condition_node_t {
    8688        // Thread that needs to be woken when all criteria are met
    87         thread_desc * waiting_thread;
     89        $thread * waiting_thread;
    8890
    8991        // Array of criteria (Criterions are contiguous in memory)
     
    104106}
    105107
    106 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info );
     108void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info );
    107109void ?{}(__condition_criterion_t & this );
    108 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner );
     110void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t * owner );
    109111
    110112struct condition {
     
    113115
    114116        // Array of monitor pointers (Monitors are NOT contiguous in memory)
    115         monitor_desc ** monitors;
     117        $monitor ** monitors;
    116118
    117119        // Number of monitors in the array
     
    120122
    121123static inline void ?{}( condition & this ) {
    122         this.monitors = NULL;
     124        this.monitors = 0p;
    123125        this.monitor_count = 0;
    124126}
     
    131133              bool signal      ( condition & this );
    132134              bool signal_block( condition & this );
    133 static inline bool is_empty    ( condition & this ) { return !this.blocked.head; }
     135static inline bool is_empty    ( condition & this ) { return this.blocked.head == 1p; }
    134136         uintptr_t front       ( condition & this );
    135137
  • libcfa/src/concurrency/mutex.cfa

    r9fb8f01 r3d5701e  
    1111// Author           : Thierry Delisle
    1212// Created On       : Fri May 25 01:37:11 2018
    13 // Last Modified By : Thierry Delisle
    14 // Last Modified On : Fri May 25 01:37:51 2018
    15 // Update Count     : 0
     13// Last Modified By : Peter A. Buhr
     14// Last Modified On : Wed Dec  4 09:16:39 2019
     15// Update Count     : 1
    1616//
    1717
     
    4040        if( is_locked ) {
    4141                append( blocked_threads, kernelTLS.this_thread );
    42                 BlockInternal( &lock );
     42                unlock( lock );
     43                park();
    4344        }
    4445        else {
     
    6263        lock( this.lock __cfaabi_dbg_ctx2 );
    6364        this.is_locked = (this.blocked_threads != 0);
    64         WakeThread(
     65        unpark(
    6566                pop_head( this.blocked_threads )
    6667        );
     
    7374        this.lock{};
    7475        this.blocked_threads{};
    75         this.owner = NULL;
     76        this.owner = 0p;
    7677        this.recursion_count = 0;
    7778}
     
    8384void lock(recursive_mutex_lock & this) with(this) {
    8485        lock( lock __cfaabi_dbg_ctx2 );
    85         if( owner == NULL ) {
     86        if( owner == 0p ) {
    8687                owner = kernelTLS.this_thread;
    8788                recursion_count = 1;
     
    9495        else {
    9596                append( blocked_threads, kernelTLS.this_thread );
    96                 BlockInternal( &lock );
     97                unlock( lock );
     98                park();
    9799        }
    98100}
     
    101103        bool ret = false;
    102104        lock( lock __cfaabi_dbg_ctx2 );
    103         if( owner == NULL ) {
     105        if( owner == 0p ) {
    104106                owner = kernelTLS.this_thread;
    105107                recursion_count = 1;
     
    118120        recursion_count--;
    119121        if( recursion_count == 0 ) {
    120                 thread_desc * thrd = pop_head( blocked_threads );
     122                $thread * thrd = pop_head( blocked_threads );
    121123                owner = thrd;
    122124                recursion_count = (thrd ? 1 : 0);
    123                 WakeThread( thrd );
     125                unpark( thrd );
    124126        }
    125127        unlock( lock );
     
    138140void notify_one(condition_variable & this) with(this) {
    139141        lock( lock __cfaabi_dbg_ctx2 );
    140         WakeThread(
     142        unpark(
    141143                pop_head( this.blocked_threads )
    142144        );
     
    147149        lock( lock __cfaabi_dbg_ctx2 );
    148150        while(this.blocked_threads) {
    149                 WakeThread(
     151                unpark(
    150152                        pop_head( this.blocked_threads )
    151153                );
     
    157159        lock( this.lock __cfaabi_dbg_ctx2 );
    158160        append( this.blocked_threads, kernelTLS.this_thread );
    159         BlockInternal( &this.lock );
     161        unlock( this.lock );
     162        park();
    160163}
    161164
     
    164167        lock( this.lock __cfaabi_dbg_ctx2 );
    165168        append( this.blocked_threads, kernelTLS.this_thread );
    166         void __unlock(void) {
    167                 unlock(l);
    168                 unlock(this.lock);
    169         }
    170         BlockInternal( __unlock );
     169        unlock(l);
     170        unlock(this.lock);
     171        park();
    171172        lock(l);
    172173}
  • libcfa/src/concurrency/mutex.hfa

    r9fb8f01 r3d5701e  
    1111// Author           : Thierry Delisle
    1212// Created On       : Fri May 25 01:24:09 2018
    13 // Last Modified By : Thierry Delisle
    14 // Last Modified On : Fri May 25 01:24:12 2018
    15 // Update Count     : 0
     13// Last Modified By : Peter A. Buhr
     14// Last Modified On : Wed Dec  4 09:16:53 2019
     15// Update Count     : 1
    1616//
    1717
     
    3636
    3737        // List of blocked threads
    38         __queue_t(struct thread_desc) blocked_threads;
     38        __queue_t(struct $thread) blocked_threads;
    3939
    4040        // Locked flag
     
    5555
    5656        // List of blocked threads
    57         __queue_t(struct thread_desc) blocked_threads;
     57        __queue_t(struct $thread) blocked_threads;
    5858
    5959        // Current thread owning the lock
    60         struct thread_desc * owner;
     60        struct $thread * owner;
    6161
    6262        // Number of recursion level
     
    8383
    8484        // List of blocked threads
    85         __queue_t(struct thread_desc) blocked_threads;
     85        __queue_t(struct $thread) blocked_threads;
    8686};
    8787
     
    110110
    111111        static inline void ?{}(lock_scope(L) & this) {
    112                 this.locks = NULL;
     112                this.locks = 0p;
    113113                this.count = 0;
    114114        }
  • libcfa/src/concurrency/preemption.cfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Mon Jun 5 14:20:42 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Jun  5 17:35:49 2018
    13 // Update Count     : 37
     12// Last Modified On : Thu Dec  5 16:34:05 2019
     13// Update Count     : 43
    1414//
    1515
     
    2424#include <string.h>
    2525#include <unistd.h>
     26#include <limits.h>                                                                             // PTHREAD_STACK_MIN
    2627}
    2728
     
    3839// FwdDeclarations : timeout handlers
    3940static void preempt( processor   * this );
    40 static void timeout( thread_desc * this );
     41static void timeout( $thread * this );
    4142
    4243// FwdDeclarations : Signal handlers
     
    6465event_kernel_t * event_kernel;                        // kernel public handle to even kernel
    6566static pthread_t alarm_thread;                        // pthread handle to alarm thread
     67static void * alarm_stack;                                                        // pthread stack for alarm thread
    6668
    6769static void ?{}(event_kernel_t & this) with( this ) {
     
    8183// Get next expired node
    8284static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) {
    83         if( !alarms->head ) return NULL;                          // If no alarms return null
    84         if( alarms->head->alarm >= currtime ) return NULL;        // If alarms head not expired return null
    85         return pop(alarms);                                       // Otherwise just pop head
     85        if( !alarms->head ) return 0p;                                          // If no alarms return null
     86        if( alarms->head->alarm >= currtime ) return 0p;        // If alarms head not expired return null
     87        return pop(alarms);                                                                     // Otherwise just pop head
    8688}
    8789
    8890// Tick one frame of the Discrete Event Simulation for alarms
    8991static void tick_preemption() {
    90         alarm_node_t * node = NULL;                     // Used in the while loop but cannot be declared in the while condition
    91         alarm_list_t * alarms = &event_kernel->alarms;  // Local copy for ease of reading
    92         Time currtime = __kernel_get_time();                    // Check current time once so we everything "happens at once"
     92        alarm_node_t * node = 0p;                                                       // Used in the while loop but cannot be declared in the while condition
     93        alarm_list_t * alarms = &event_kernel->alarms;          // Local copy for ease of reading
     94        Time currtime = __kernel_get_time();                            // Check current time once so everything "happens at once"
    9395
    9496        //Loop throught every thing expired
     
    182184
    183185        // Enable interrupts by decrementing the counter
    184         // If counter reaches 0, execute any pending CtxSwitch
     186        // If counter reaches 0, execute any pending __cfactx_switch
    185187        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    186188                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
    187                 thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic store
    188189
    189190                with( kernelTLS.preemption_state ){
     
    207208                                if( proc->pending_preemption ) {
    208209                                        proc->pending_preemption = false;
    209                                         BlockInternal( thrd );
     210                                        force_yield( __POLL_PREEMPTION );
    210211                                }
    211212                        }
     
    217218
    218219        // Disable interrupts by incrementint the counter
    219         // Don't execute any pending CtxSwitch even if counter reaches 0
     220        // Don't execute any pending __cfactx_switch even if counter reaches 0
    220221        void enable_interrupts_noPoll() {
    221222                unsigned short prev = kernelTLS.preemption_state.disable_count;
     
    243244        sigaddset( &mask, sig );
    244245
    245         if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) {
     246        if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) {
    246247            abort( "internal error, pthread_sigmask" );
    247248        }
     
    254255        sigaddset( &mask, sig );
    255256
    256         if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     257        if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
    257258            abort( "internal error, pthread_sigmask" );
    258259        }
     
    266267
    267268// reserved for future use
    268 static void timeout( thread_desc * this ) {
     269static void timeout( $thread * this ) {
    269270        //TODO : implement waking threads
    270271}
    271272
    272273// KERNEL ONLY
    273 // Check if a CtxSwitch signal handler shoud defer
     274// Check if a __cfactx_switch signal handler shoud defer
    274275// If true  : preemption is safe
    275276// If false : preemption is unsafe and marked as pending
     
    301302
    302303        // Setup proper signal handlers
    303         __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART );         // CtxSwitch handler
     304        __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler
    304305
    305306        signal_block( SIGALRM );
    306307
    307         pthread_create( &alarm_thread, NULL, alarm_loop, NULL );
     308        alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p );
    308309}
    309310
     
    316317        sigset_t mask;
    317318        sigfillset( &mask );
    318         sigprocmask( SIG_BLOCK, &mask, NULL );
     319        sigprocmask( SIG_BLOCK, &mask, 0p );
    319320
    320321        // Notify the alarm thread of the shutdown
     
    323324
    324325        // Wait for the preemption thread to finish
    325         pthread_join( alarm_thread, NULL );
     326
     327        pthread_join( alarm_thread, 0p );
     328        free( alarm_stack );
    326329
    327330        // Preemption is now fully stopped
     
    380383        static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
    381384        #endif
    382         if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) {
     385        if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) {
    383386                abort( "internal error, sigprocmask" );
    384387        }
     
    390393        // Preemption can occur here
    391394
    392         BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch
     395        force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch
    393396}
    394397
     
    399402        sigset_t mask;
    400403        sigfillset(&mask);
    401         if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     404        if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
    402405            abort( "internal error, pthread_sigmask" );
    403406        }
     
    420423                                        {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );}
    421424                                        continue;
    422                         case EINVAL :
     425                                case EINVAL :
    423426                                        abort( "Timeout was invalid." );
    424427                                default:
     
    453456EXIT:
    454457        __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" );
    455         return NULL;
     458        return 0p;
    456459}
    457460
     
    466469        sigset_t oldset;
    467470        int ret;
    468         ret = pthread_sigmask(0, NULL, &oldset);
     471        ret = pthread_sigmask(0, 0p, &oldset);
    469472        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    470473
  • libcfa/src/concurrency/thread.cfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 30 17:19:52 2018
    13 // Update Count     : 8
     12// Last Modified On : Wed Dec  4 09:17:49 2019
     13// Update Count     : 9
    1414//
    1515
     
    2323#include "invoke.h"
    2424
    25 extern "C" {
    26         #include <fenv.h>
    27         #include <stddef.h>
    28 }
    29 
    30 //extern volatile thread_local processor * this_processor;
    31 
    3225//-----------------------------------------------------------------------------
    3326// Thread ctors and dtors
    34 void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
    35         context{ NULL, NULL };
     27void ?{}($thread & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
     28        context{ 0p, 0p };
    3629        self_cor{ name, storage, storageSize };
    3730        state = Start;
     31        preempted = __NO_PREEMPTION;
    3832        curr_cor = &self_cor;
    3933        self_mon.owner = &this;
     
    4135        self_mon_p = &self_mon;
    4236        curr_cluster = &cl;
    43         next = NULL;
     37        next = 0p;
    4438
    45         node.next = NULL;
    46         node.prev = NULL;
     39        node.next = 0p;
     40        node.prev = 0p;
    4741        doregister(curr_cluster, this);
    4842
     
    5044}
    5145
    52 void ^?{}(thread_desc& this) with( this ) {
     46void ^?{}($thread& this) with( this ) {
    5347        unregister(curr_cluster, this);
    5448        ^self_cor{};
    5549}
    5650
     51//-----------------------------------------------------------------------------
     52// Starting and stopping threads
     53forall( dtype T | is_thread(T) )
     54void __thrd_start( T & this, void (*main_p)(T &) ) {
     55        $thread * this_thrd = get_thread(this);
     56
     57        disable_interrupts();
     58        __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
     59
     60        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
     61        verify( this_thrd->context.SP );
     62
     63        __schedule_thread(this_thrd);
     64        enable_interrupts( __cfaabi_dbg_ctx );
     65}
     66
     67//-----------------------------------------------------------------------------
     68// Support for threads that don't ues the thread keyword
    5769forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } )
    5870void ?{}( scoped(T)& this ) with( this ) {
    5971        handle{};
    60         __thrd_start(handle);
     72        __thrd_start(handle, main);
    6173}
    6274
     
    6476void ?{}( scoped(T)& this, P params ) with( this ) {
    6577        handle{ params };
    66         __thrd_start(handle);
     78        __thrd_start(handle, main);
    6779}
    6880
     
    7284}
    7385
    74 //-----------------------------------------------------------------------------
    75 // Starting and stopping threads
    76 forall( dtype T | is_thread(T) )
    77 void __thrd_start( T& this ) {
    78         thread_desc * this_thrd = get_thread(this);
    79         thread_desc * curr_thrd = TL_GET( this_thread );
    80 
    81         disable_interrupts();
    82         CtxStart(&this, CtxInvokeThread);
    83         this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
    84         verify( this_thrd->context.SP );
    85         CtxSwitch( &curr_thrd->context, &this_thrd->context );
    86 
    87         ScheduleThread(this_thrd);
    88         enable_interrupts( __cfaabi_dbg_ctx );
    89 }
    90 
    91 extern "C" {
    92         // KERNEL ONLY
    93         void __finish_creation(thread_desc * this) {
    94                 // set new coroutine that the processor is executing
    95                 // and context switch to it
    96                 verify( kernelTLS.this_thread != this );
    97                 verify( kernelTLS.this_thread->context.SP );
    98                 CtxSwitch( &this->context, &kernelTLS.this_thread->context );
    99         }
    100 }
    101 
    102 void yield( void ) {
    103         // Safety note : This could cause some false positives due to preemption
    104       verify( TL_GET( preemption_state.enabled ) );
    105         BlockInternal( TL_GET( this_thread ) );
    106         // Safety note : This could cause some false positives due to preemption
    107       verify( TL_GET( preemption_state.enabled ) );
    108 }
    109 
    110 void yield( unsigned times ) {
    111         for( unsigned i = 0; i < times; i++ ) {
    112                 yield();
    113         }
    114 }
    115 
    11686// Local Variables: //
    11787// mode: c //
  • libcfa/src/concurrency/thread.hfa

    r9fb8f01 r3d5701e  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Jun 21 17:51:33 2019
    13 // Update Count     : 5
     12// Last Modified On : Wed Dec  4 09:18:14 2019
     13// Update Count     : 6
    1414//
    1515
     
    2828      void ^?{}(T& mutex this);
    2929      void main(T& this);
    30       thread_desc* get_thread(T& this);
     30      $thread* get_thread(T& this);
    3131};
    3232
    33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this)
     33// define that satisfies the trait without using the thread keyword
     34#define DECL_THREAD(X) $thread* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this)
     35
     36// Inline getters for threads/coroutines/monitors
     37forall( dtype T | is_thread(T) )
     38static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; }
    3439
    3540forall( dtype T | is_thread(T) )
    36 static inline coroutine_desc* get_coroutine(T & this) {
    37         return &get_thread(this)->self_cor;
    38 }
     41static inline $monitor  * get_monitor  (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; }
    3942
    40 forall( dtype T | is_thread(T) )
    41 static inline monitor_desc* get_monitor(T & this) {
    42         return &get_thread(this)->self_mon;
    43 }
     43static inline $coroutine* get_coroutine($thread * this) __attribute__((const)) { return &this->self_cor; }
     44static inline $monitor  * get_monitor  ($thread * this) __attribute__((const)) { return &this->self_mon; }
    4445
    45 static inline coroutine_desc* get_coroutine(thread_desc * this) {
    46         return &this->self_cor;
    47 }
    48 
    49 static inline monitor_desc* get_monitor(thread_desc * this) {
    50         return &this->self_mon;
    51 }
    52 
     46//-----------------------------------------------------------------------------
     47// forward declarations needed for threads
    5348extern struct cluster * mainCluster;
    5449
    5550forall( dtype T | is_thread(T) )
    56 void __thrd_start( T & this );
     51void __thrd_start( T & this, void (*)(T &) );
    5752
    5853//-----------------------------------------------------------------------------
    5954// Ctors and dtors
    60 void ?{}(thread_desc & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );
    61 void ^?{}(thread_desc & this);
     55void ?{}($thread & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );
     56void ^?{}($thread & this);
    6257
    63 static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }
    64 static inline void ?{}(thread_desc & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }
    65 static inline void ?{}(thread_desc & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
    66 static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 65000 }; }
    67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, NULL, stackSize }; }
    68 static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
    69 static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 65000 }; }
    70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 65000 }; }
    71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }
     58static inline void ?{}($thread & this)                                                                  { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }
     59static inline void ?{}($thread & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }
     60static inline void ?{}($thread & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
     61static inline void ?{}($thread & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, 0p, 65000 }; }
     62static inline void ?{}($thread & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0p, stackSize }; }
     63static inline void ?{}($thread & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
     64static inline void ?{}($thread & this, const char * const name)                                         { this{ name, *mainCluster, 0p, 65000 }; }
     65static inline void ?{}($thread & this, const char * const name, struct cluster & cl )                   { this{ name, cl, 0p, 65000 }; }
     66static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }
    7267
    7368//-----------------------------------------------------------------------------
     
    8883void ^?{}( scoped(T)& this );
    8984
    90 void yield();
    91 void yield( unsigned times );
     85//-----------------------------------------------------------------------------
     86// Thread getters
     87static inline struct $thread * active_thread () { return TL_GET( this_thread ); }
    9288
    93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); }
     89//-----------------------------------------------------------------------------
     90// Scheduler API
     91
     92//----------
     93// Park thread: block until corresponding call to unpark, won't block if unpark is already called
     94void park( void );
     95
     96//----------
     97// Unpark a thread, if the thread is already blocked, schedule it
     98//                  if the thread is not yet block, signal that it should rerun immediately
     99void unpark( $thread * this );
     100
     101forall( dtype T | is_thread(T) )
     102static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );}
     103
     104//----------
     105// Yield: force thread to block and be rescheduled
     106bool force_yield( enum __Preemption_Reason );
     107
     108static inline void yield() {
     109        force_yield(__MANUAL_PREEMPTION);
     110}
     111
     112// Yield: yield N times
     113static inline void yield( unsigned times ) {
     114        for( times ) {
     115                yield();
     116        }
     117}
    94118
    95119// Local Variables: //
Note: See TracChangeset for help on using the changeset viewer.