Changeset 04e6f93 for libcfa


Ignore:
Timestamp:
Feb 27, 2020, 4:04:25 PM (6 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
a037f85
Parents:
41efd33 (diff), 930b504 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa/src
Files:
26 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/bits/containers.hfa

    r41efd33 r04e6f93  
    146146        static inline forall( dtype T | is_node(T) ) {
    147147                void ?{}( __queue(T) & this ) with( this ) {
    148                         head{ 0p };
     148                        head{ 1p };
    149149                        tail{ &head };
     150                        verify(*tail == 1p);
    150151                }
    151152
    152153                void append( __queue(T) & this, T * val ) with( this ) {
    153154                        verify(tail != 0p);
     155                        verify(*tail == 1p);
    154156                        *tail = val;
    155157                        tail = &get_next( *val );
     158                        *tail = 1p;
    156159                }
    157160
    158161                T * pop_head( __queue(T) & this ) {
     162                        verify(*this.tail == 1p);
    159163                        T * head = this.head;
    160                         if( head ) {
     164                        if( head != 1p ) {
    161165                                this.head = get_next( *head );
    162                                 if( !get_next( *head ) ) {
     166                                if( get_next( *head ) == 1p ) {
    163167                                        this.tail = &this.head;
    164168                                }
    165169                                get_next( *head ) = 0p;
    166                         }
    167                         return head;
     170                                verify(*this.tail == 1p);
     171                                return head;
     172                        }
     173                        verify(*this.tail == 1p);
     174                        return 0p;
    168175                }
    169176
     
    180187                        get_next( *val ) = 0p;
    181188
    182                         verify( (head == 0p) == (&head == tail) );
    183                         verify( *tail == 0p );
     189                        verify( (head == 1p) == (&head == tail) );
     190                        verify( *tail == 1p );
    184191                        return val;
    185192                }
  • libcfa/src/bits/locks.hfa

    r41efd33 r04e6f93  
    6060        }
    6161
    62         extern void yield( unsigned int );
    63 
    6462        static inline void ?{}( __spinlock_t & this ) {
    6563                this.lock = 0;
     
    6866        // Lock the spinlock, return false if already acquired
    6967        static inline bool try_lock  ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
     68                disable_interrupts();
    7069                bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0);
    7170                if( result ) {
    72                         disable_interrupts();
    7371                        __cfaabi_dbg_record( this, caller );
     72                } else {
     73                        enable_interrupts_noPoll();
    7474                }
    7575                return result;
     
    8383                #endif
    8484
     85                disable_interrupts();
    8586                for ( unsigned int i = 1;; i += 1 ) {
    8687                        if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break;
     
    9899                        #endif
    99100                }
    100                 disable_interrupts();
    101101                __cfaabi_dbg_record( this, caller );
    102102        }
    103103
    104104        static inline void unlock( __spinlock_t & this ) {
     105                __atomic_clear( &this.lock, __ATOMIC_RELEASE );
    105106                enable_interrupts_noPoll();
    106                 __atomic_clear( &this.lock, __ATOMIC_RELEASE );
    107107        }
    108108
  • libcfa/src/concurrency/CtxSwitch-arm.S

    r41efd33 r04e6f93  
    1313        .text
    1414        .align  2
    15         .global CtxSwitch
    16         .type   CtxSwitch, %function
     15        .global __cfactx_switch
     16        .type   __cfactx_switch, %function
    1717
    18 CtxSwitch:
     18__cfactx_switch:
    1919        @ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification)
    2020        @ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved
     
    5252        mov r15, r14
    5353        #endif // R9_SPECIAL
    54        
     54
    5555        .text
    5656        .align  2
    57         .global CtxInvokeStub
    58         .type   CtxInvokeStub, %function
     57        .global __cfactx_invoke_stub
     58        .type   __cfactx_invoke_stub, %function
    5959
    60 CtxInvokeStub:
     60__cfactx_invoke_stub:
    6161        ldmfd r13!, {r0-r1}
    6262        mov r15, r1
  • libcfa/src/concurrency/CtxSwitch-i386.S

    r41efd33 r04e6f93  
    4343        .text
    4444        .align 2
    45         .globl CtxSwitch
    46         .type  CtxSwitch, @function
    47 CtxSwitch:
     45        .globl __cfactx_switch
     46        .type  __cfactx_switch, @function
     47__cfactx_switch:
    4848
    4949        // Copy the "from" context argument from the stack to register eax
     
    8383
    8484        ret
    85         .size  CtxSwitch, .-CtxSwitch
     85        .size  __cfactx_switch, .-__cfactx_switch
    8686
    8787// Local Variables: //
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    r41efd33 r04e6f93  
    4444        .text
    4545        .align 2
    46         .globl CtxSwitch
    47         .type  CtxSwitch, @function
    48 CtxSwitch:
     46        .globl __cfactx_switch
     47        .type  __cfactx_switch, @function
     48__cfactx_switch:
    4949
    5050        // Save volatile registers on the stack.
     
    7777
    7878        ret
    79         .size  CtxSwitch, .-CtxSwitch
     79        .size  __cfactx_switch, .-__cfactx_switch
    8080
    8181//-----------------------------------------------------------------------------
     
    8383        .text
    8484        .align 2
    85         .globl CtxInvokeStub
    86         .type    CtxInvokeStub, @function
    87 CtxInvokeStub:
     85        .globl __cfactx_invoke_stub
     86        .type    __cfactx_invoke_stub, @function
     87__cfactx_invoke_stub:
    8888        movq %rbx, %rdi
    8989        movq %r12, %rsi
    9090        jmp *%r13
    91         .size  CtxInvokeStub, .-CtxInvokeStub
     91        .size  __cfactx_invoke_stub, .-__cfactx_invoke_stub
    9292
    9393// Local Variables: //
  • libcfa/src/concurrency/alarm.cfa

    r41efd33 r04e6f93  
    4747//=============================================================================================
    4848
    49 void ?{}( alarm_node_t & this, thread_desc * thrd, Time alarm, Duration period ) with( this ) {
     49void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ) with( this ) {
    5050        this.thrd = thrd;
    5151        this.alarm = alarm;
  • libcfa/src/concurrency/alarm.hfa

    r41efd33 r04e6f93  
    2323#include "time.hfa"
    2424
    25 struct thread_desc;
     25struct $thread;
    2626struct processor;
    2727
     
    4343
    4444        union {
    45                 thread_desc * thrd;     // thrd who created event
     45                $thread * thrd; // thrd who created event
    4646                processor * proc;               // proc who created event
    4747        };
     
    5353typedef alarm_node_t ** __alarm_it_t;
    5454
    55 void ?{}( alarm_node_t & this, thread_desc * thrd, Time alarm, Duration period );
     55void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period );
    5656void ?{}( alarm_node_t & this, processor   * proc, Time alarm, Duration period );
    5757void ^?{}( alarm_node_t & this );
  • libcfa/src/concurrency/coroutine.cfa

    r41efd33 r04e6f93  
    3737
    3838extern "C" {
    39         void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     39        void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__));
    4040        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    4141        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     
    8989}
    9090
    91 void ?{}( coroutine_desc & this, const char name[], void * storage, size_t storageSize ) with( this ) {
     91void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ) with( this ) {
    9292        (this.context){0p, 0p};
    9393        (this.stack){storage, storageSize};
     
    9999}
    100100
    101 void ^?{}(coroutine_desc& this) {
     101void ^?{}($coroutine& this) {
    102102        if(this.state != Halted && this.state != Start && this.state != Primed) {
    103                 coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    104                 coroutine_desc * dst = &this;
     103                $coroutine * src = TL_GET( this_thread )->curr_cor;
     104                $coroutine * dst = &this;
    105105
    106106                struct _Unwind_Exception storage;
     
    115115                }
    116116
    117                 CoroutineCtxSwitch( src, dst );
     117                $ctx_switch( src, dst );
    118118        }
    119119}
     
    123123forall(dtype T | is_coroutine(T))
    124124void prime(T& cor) {
    125         coroutine_desc* this = get_coroutine(cor);
     125        $coroutine* this = get_coroutine(cor);
    126126        assert(this->state == Start);
    127127
     
    187187// is not inline (We can't inline Cforall in C)
    188188extern "C" {
    189         void __leave_coroutine( struct coroutine_desc * src ) {
    190                 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
     189        void __cfactx_cor_leave( struct $coroutine * src ) {
     190                $coroutine * starter = src->cancellation != 0 ? src->last : src->starter;
    191191
    192192                src->state = Halted;
     
    201201                        src->name, src, starter->name, starter );
    202202
    203                 CoroutineCtxSwitch( src, starter );
    204         }
    205 
    206         struct coroutine_desc * __finish_coroutine(void) {
    207                 struct coroutine_desc * cor = kernelTLS.this_thread->curr_cor;
     203                $ctx_switch( src, starter );
     204        }
     205
     206        struct $coroutine * __cfactx_cor_finish(void) {
     207                struct $coroutine * cor = kernelTLS.this_thread->curr_cor;
    208208
    209209                if(cor->state == Primed) {
  • libcfa/src/concurrency/coroutine.hfa

    r41efd33 r04e6f93  
    2525trait is_coroutine(dtype T) {
    2626      void main(T & this);
    27       coroutine_desc * get_coroutine(T & this);
     27      $coroutine * get_coroutine(T & this);
    2828};
    2929
    30 #define DECL_COROUTINE(X) static inline coroutine_desc* get_coroutine(X& this) { return &this.__cor; } void main(X& this)
     30#define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this)
    3131
    3232//-----------------------------------------------------------------------------
     
    3535// void ^?{}( coStack_t & this );
    3636
    37 void ?{}( coroutine_desc & this, const char name[], void * storage, size_t storageSize );
    38 void ^?{}( coroutine_desc & this );
     37void  ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize );
     38void ^?{}( $coroutine & this );
    3939
    40 static inline void ?{}( coroutine_desc & this)                                       { this{ "Anonymous Coroutine", 0p, 0 }; }
    41 static inline void ?{}( coroutine_desc & this, size_t stackSize)                     { this{ "Anonymous Coroutine", 0p, stackSize }; }
    42 static inline void ?{}( coroutine_desc & this, void * storage, size_t storageSize )  { this{ "Anonymous Coroutine", storage, storageSize }; }
    43 static inline void ?{}( coroutine_desc & this, const char name[])                    { this{ name, 0p, 0 }; }
    44 static inline void ?{}( coroutine_desc & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }
     40static inline void ?{}( $coroutine & this)                                       { this{ "Anonymous Coroutine", 0p, 0 }; }
     41static inline void ?{}( $coroutine & this, size_t stackSize)                     { this{ "Anonymous Coroutine", 0p, stackSize }; }
     42static inline void ?{}( $coroutine & this, void * storage, size_t storageSize )  { this{ "Anonymous Coroutine", storage, storageSize }; }
     43static inline void ?{}( $coroutine & this, const char name[])                    { this{ name, 0p, 0 }; }
     44static inline void ?{}( $coroutine & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }
    4545
    4646//-----------------------------------------------------------------------------
     
    5454void prime(T & cor);
    5555
    56 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; }
     56static inline struct $coroutine * active_coroutine() { return TL_GET( this_thread )->curr_cor; }
    5757
    5858//-----------------------------------------------------------------------------
     
    6161// Start coroutine routines
    6262extern "C" {
    63         void CtxInvokeCoroutine(void (*main)(void *), void * this);
     63        void __cfactx_invoke_coroutine(void (*main)(void *), void * this);
    6464
    6565        forall(dtype T)
    66         void CtxStart(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *));
     66        void __cfactx_start(void (*main)(T &), struct $coroutine * cor, T & this, void (*invoke)(void (*main)(void *), void *));
    6767
    68         extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     68        extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__));
    6969
    70         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
     70        extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
    7171}
    7272
    7373// Private wrappers for context switch and stack creation
    7474// Wrapper for co
    75 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
     75static inline void $ctx_switch( $coroutine * src, $coroutine * dst ) __attribute__((nonnull (1, 2))) {
    7676        // set state of current coroutine to inactive
    7777        src->state = src->state == Halted ? Halted : Inactive;
     
    8282        // context switch to specified coroutine
    8383        verify( dst->context.SP );
    84         CtxSwitch( &src->context, &dst->context );
    85         // when CtxSwitch returns we are back in the src coroutine
     84        __cfactx_switch( &src->context, &dst->context );
     85        // when __cfactx_switch returns we are back in the src coroutine
    8686
    8787        // set state of new coroutine to active
     
    8989
    9090        if( unlikely(src->cancellation != 0p) ) {
    91                 _CtxCoroutine_Unwind(src->cancellation, src);
     91                __cfactx_coroutine_unwind(src->cancellation, src);
    9292        }
    9393}
     
    102102        // will also migrate which means this value will
    103103        // stay in syn with the TLS
    104         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     104        $coroutine * src = TL_GET( this_thread )->curr_cor;
    105105
    106106        assertf( src->last != 0,
     
    113113                src->name, src, src->last->name, src->last );
    114114
    115         CoroutineCtxSwitch( src, src->last );
     115        $ctx_switch( src, src->last );
    116116}
    117117
     
    124124        // will also migrate which means this value will
    125125        // stay in syn with the TLS
    126         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    127         coroutine_desc * dst = get_coroutine(cor);
     126        $coroutine * src = TL_GET( this_thread )->curr_cor;
     127        $coroutine * dst = get_coroutine(cor);
    128128
    129129        if( unlikely(dst->context.SP == 0p) ) {
    130130                TL_GET( this_thread )->curr_cor = dst;
    131131                __stack_prepare(&dst->stack, 65000);
    132                 CtxStart(main, dst, cor, CtxInvokeCoroutine);
     132                __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine);
    133133                TL_GET( this_thread )->curr_cor = src;
    134134        }
     
    147147
    148148        // always done for performance testing
    149         CoroutineCtxSwitch( src, dst );
     149        $ctx_switch( src, dst );
    150150
    151151        return cor;
    152152}
    153153
    154 static inline void resume(coroutine_desc * dst) {
     154static inline void resume( $coroutine * dst ) __attribute__((nonnull (1))) {
    155155        // optimization : read TLS once and reuse it
    156156        // Safety note: this is preemption safe since if
     
    158158        // will also migrate which means this value will
    159159        // stay in syn with the TLS
    160         coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     160        $coroutine * src = TL_GET( this_thread )->curr_cor;
    161161
    162162        // not resuming self ?
     
    172172
    173173        // always done for performance testing
    174         CoroutineCtxSwitch( src, dst );
     174        $ctx_switch( src, dst );
    175175}
    176176
  • libcfa/src/concurrency/invoke.c

    r41efd33 r04e6f93  
    2929// Called from the kernel when starting a coroutine or task so must switch back to user mode.
    3030
    31 extern void __leave_coroutine ( struct coroutine_desc * );
    32 extern struct coroutine_desc * __finish_coroutine(void);
    33 extern void __leave_thread_monitor();
     31extern struct $coroutine * __cfactx_cor_finish(void);
     32extern void __cfactx_cor_leave ( struct $coroutine * );
     33extern void __cfactx_thrd_leave();
     34
    3435extern void disable_interrupts() OPTIONAL_THREAD;
    3536extern void enable_interrupts( __cfaabi_dbg_ctx_param );
    3637
    37 void CtxInvokeCoroutine(
     38void __cfactx_invoke_coroutine(
    3839        void (*main)(void *),
    3940        void *this
    4041) {
    4142        // Finish setting up the coroutine by setting its state
    42         struct coroutine_desc * cor = __finish_coroutine();
     43        struct $coroutine * cor = __cfactx_cor_finish();
    4344
    4445        // Call the main of the coroutine
     
    4647
    4748        //Final suspend, should never return
    48         __leave_coroutine( cor );
     49        __cfactx_cor_leave( cor );
    4950        __cabi_abort( "Resumed dead coroutine" );
    5051}
    5152
    52 static _Unwind_Reason_Code _CtxCoroutine_UnwindStop(
     53static _Unwind_Reason_Code __cfactx_coroutine_unwindstop(
    5354        __attribute((__unused__)) int version,
    5455        _Unwind_Action actions,
     
    6162                // We finished unwinding the coroutine,
    6263                // leave it
    63                 __leave_coroutine( param );
     64                __cfactx_cor_leave( param );
    6465                __cabi_abort( "Resumed dead coroutine" );
    6566        }
     
    6970}
    7071
    71 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
    72 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
    73         _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, cor );
     72void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) __attribute__ ((__noreturn__));
     73void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) {
     74        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
    7475        printf("UNWIND ERROR %d after force unwind\n", ret);
    7576        abort();
    7677}
    7778
    78 void CtxInvokeThread(
     79void __cfactx_invoke_thread(
    7980        void (*main)(void *),
    8081        void *this
     
    9394        // The order of these 4 operations is very important
    9495        //Final suspend, should never return
    95         __leave_thread_monitor();
     96        __cfactx_thrd_leave();
    9697        __cabi_abort( "Resumed dead thread" );
    9798}
    9899
    99 void CtxStart(
     100void __cfactx_start(
    100101        void (*main)(void *),
    101         struct coroutine_desc * cor,
     102        struct $coroutine * cor,
    102103        void *this,
    103104        void (*invoke)(void *)
     
    139140
    140141        fs->dummyReturn = NULL;
    141         fs->rturn = CtxInvokeStub;
     142        fs->rturn = __cfactx_invoke_stub;
    142143        fs->fixedRegisters[0] = main;
    143144        fs->fixedRegisters[1] = this;
     
    157158        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
    158159
    159         fs->intRegs[8] = CtxInvokeStub;
     160        fs->intRegs[8] = __cfactx_invoke_stub;
    160161        fs->arg[0] = this;
    161162        fs->arg[1] = invoke;
  • libcfa/src/concurrency/invoke.h

    r41efd33 r04e6f93  
    4747        extern "Cforall" {
    4848                extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
    49                         struct thread_desc    * volatile this_thread;
     49                        struct $thread    * volatile this_thread;
    5050                        struct processor      * volatile this_processor;
    5151
     
    9292        };
    9393
    94         enum coroutine_state { Halted, Start, Inactive, Active, Primed };
    95 
    96         struct coroutine_desc {
    97                 // context that is switch during a CtxSwitch
     94        enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun };
     95        enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
     96
     97        struct $coroutine {
     98                // context that is switch during a __cfactx_switch
    9899                struct __stack_context_t context;
    99100
     
    108109
    109110                // first coroutine to resume this one
    110                 struct coroutine_desc * starter;
     111                struct $coroutine * starter;
    111112
    112113                // last coroutine to resume this one
    113                 struct coroutine_desc * last;
     114                struct $coroutine * last;
    114115
    115116                // If non-null stack must be unwound with this exception
     
    127128        };
    128129
    129         struct monitor_desc {
     130        struct $monitor {
    130131                // spinlock to protect internal data
    131132                struct __spinlock_t lock;
    132133
    133134                // current owner of the monitor
    134                 struct thread_desc * owner;
     135                struct $thread * owner;
    135136
    136137                // queue of threads that are blocked waiting for the monitor
    137                 __queue_t(struct thread_desc) entry_queue;
     138                __queue_t(struct $thread) entry_queue;
    138139
    139140                // stack of conditions to run next once we exit the monitor
     
    152153        struct __monitor_group_t {
    153154                // currently held monitors
    154                 __cfa_anonymous_object( __small_array_t(monitor_desc*) );
     155                __cfa_anonymous_object( __small_array_t($monitor*) );
    155156
    156157                // last function that acquired monitors
     
    158159        };
    159160
    160         struct thread_desc {
     161        struct $thread {
    161162                // Core threading fields
    162                 // context that is switch during a CtxSwitch
     163                // context that is switch during a __cfactx_switch
    163164                struct __stack_context_t context;
    164165
    165166                // current execution status for coroutine
    166                 enum coroutine_state state;
     167                volatile int state;
     168                enum __Preemption_Reason preempted;
    167169
    168170                //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
    169171
    170172                // coroutine body used to store context
    171                 struct coroutine_desc  self_cor;
     173                struct $coroutine  self_cor;
    172174
    173175                // current active context
    174                 struct coroutine_desc * curr_cor;
     176                struct $coroutine * curr_cor;
    175177
    176178                // monitor body used for mutual exclusion
    177                 struct monitor_desc    self_mon;
     179                struct $monitor    self_mon;
    178180
    179181                // pointer to monitor with sufficient lifetime for current monitors
    180                 struct monitor_desc *  self_mon_p;
     182                struct $monitor *  self_mon_p;
    181183
    182184                // pointer to the cluster on which the thread is running
     
    188190                // Link lists fields
    189191                // instrusive link field for threads
    190                 struct thread_desc * next;
     192                struct $thread * next;
    191193
    192194                struct {
    193                         struct thread_desc * next;
    194                         struct thread_desc * prev;
     195                        struct $thread * next;
     196                        struct $thread * prev;
    195197                } node;
    196198        };
     
    198200        #ifdef __cforall
    199201        extern "Cforall" {
    200                 static inline thread_desc *& get_next( thread_desc & this ) {
     202                static inline $thread *& get_next( $thread & this ) __attribute__((const)) {
    201203                        return this.next;
    202204                }
    203205
    204                 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) {
     206                static inline [$thread *&, $thread *& ] __get( $thread & this ) __attribute__((const)) {
    205207                        return this.node.[next, prev];
    206208                }
     
    212214                }
    213215
    214                 static inline void ?{}(__monitor_group_t & this, struct monitor_desc ** data, __lock_size_t size, fptr_t func) {
     216                static inline void ?{}(__monitor_group_t & this, struct $monitor ** data, __lock_size_t size, fptr_t func) {
    215217                        (this.data){data};
    216218                        (this.size){size};
     
    218220                }
    219221
    220                 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
     222                static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) {
    221223                        if( (lhs.data != 0) != (rhs.data != 0) ) return false;
    222224                        if( lhs.size != rhs.size ) return false;
     
    252254
    253255        // assembler routines that performs the context switch
    254         extern void CtxInvokeStub( void );
    255         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
     256        extern void __cfactx_invoke_stub( void );
     257        extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
    256258        // void CtxStore ( void * this ) asm ("CtxStore");
    257259        // void CtxRet   ( void * dst  ) asm ("CtxRet");
  • libcfa/src/concurrency/kernel.cfa

    r41efd33 r04e6f93  
    110110//-----------------------------------------------------------------------------
    111111//Start and stop routine for the kernel, declared first to make sure they run first
    112 static void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     112static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     113static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    114114
    115115//-----------------------------------------------------------------------------
     
    117117KERNEL_STORAGE(cluster,         mainCluster);
    118118KERNEL_STORAGE(processor,       mainProcessor);
    119 KERNEL_STORAGE(thread_desc,     mainThread);
     119KERNEL_STORAGE($thread, mainThread);
    120120KERNEL_STORAGE(__stack_t,       mainThreadCtx);
    121121
    122122cluster     * mainCluster;
    123123processor   * mainProcessor;
    124 thread_desc * mainThread;
     124$thread * mainThread;
    125125
    126126extern "C" {
     
    164164// Main thread construction
    165165
    166 void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
     166void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
    167167        stack.storage = info->storage;
    168168        with(*stack.storage) {
     
    179179}
    180180
    181 void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
     181void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
    182182        state = Start;
    183183        self_cor{ info };
     
    208208}
    209209
    210 static void start(processor * this);
     210static void * __invoke_processor(void * arg);
     211
    211212void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
    212213        this.name = name;
    213214        this.cltr = &cltr;
    214215        terminated{ 0 };
     216        destroyer = 0p;
    215217        do_terminate = false;
    216218        preemption_alarm = 0p;
     
    220222        idleLock{};
    221223
    222         start( &this );
     224        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
     225
     226        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
     227
     228        __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
    223229}
    224230
     
    258264// Kernel Scheduling logic
    259265//=============================================================================================
    260 static void runThread(processor * this, thread_desc * dst);
    261 static void finishRunning(processor * this);
    262 static void halt(processor * this);
     266static $thread * __next_thread(cluster * this);
     267static void __run_thread(processor * this, $thread * dst);
     268static void __halt(processor * this);
    263269
    264270//Main of the processor contexts
     
    281287                __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
    282288
    283                 thread_desc * readyThread = 0p;
     289                $thread * readyThread = 0p;
    284290                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
    285                         readyThread = nextThread( this->cltr );
     291                        readyThread = __next_thread( this->cltr );
    286292
    287293                        if(readyThread) {
    288                                 verify( ! kernelTLS.preemption_state.enabled );
    289 
    290                                 runThread(this, readyThread);
    291 
    292                                 verify( ! kernelTLS.preemption_state.enabled );
    293 
    294                                 //Some actions need to be taken from the kernel
    295                                 finishRunning(this);
     294                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     295                                /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
     296                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
     297
     298                                __run_thread(this, readyThread);
     299
     300                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    296301
    297302                                spin_count = 0;
    298303                        } else {
    299304                                // spin(this, &spin_count);
    300                                 halt(this);
     305                                __halt(this);
    301306                        }
    302307                }
     
    318323// runThread runs a thread by context switching
    319324// from the processor coroutine to the target thread
    320 static void runThread(processor * this, thread_desc * thrd_dst) {
    321         coroutine_desc * proc_cor = get_coroutine(this->runner);
    322 
    323         // Reset the terminating actions here
    324         this->finish.action_code = No_Action;
     325static void __run_thread(processor * this, $thread * thrd_dst) {
     326        $coroutine * proc_cor = get_coroutine(this->runner);
    325327
    326328        // Update global state
    327329        kernelTLS.this_thread = thrd_dst;
    328330
    329         // set state of processor coroutine to inactive and the thread to active
    330         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    331         thrd_dst->state = Active;
    332 
    333         // set context switch to the thread that the processor is executing
    334         verify( thrd_dst->context.SP );
    335         CtxSwitch( &proc_cor->context, &thrd_dst->context );
    336         // when CtxSwitch returns we are back in the processor coroutine
    337 
    338         // set state of processor coroutine to active and the thread to inactive
    339         thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
     331        // set state of processor coroutine to inactive
     332        verify(proc_cor->state == Active);
     333        proc_cor->state = Inactive;
     334
     335        // Actually run the thread
     336        RUNNING:  while(true) {
     337                if(unlikely(thrd_dst->preempted)) {
     338                        thrd_dst->preempted = __NO_PREEMPTION;
     339                        verify(thrd_dst->state == Active || thrd_dst->state == Rerun);
     340                } else {
     341                        verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive);
     342                        thrd_dst->state = Active;
     343                }
     344
     345                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     346
     347                // set context switch to the thread that the processor is executing
     348                verify( thrd_dst->context.SP );
     349                __cfactx_switch( &proc_cor->context, &thrd_dst->context );
     350                // when __cfactx_switch returns we are back in the processor coroutine
     351
     352                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     353
     354
     355                // We just finished running a thread, there are a few things that could have happened.
     356                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
     357                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
     358                // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way
     359                // 4 - Preempted
     360                // In case 1, we may have won a race so we can't write to the state again.
     361                // In case 2, we lost the race so we now own the thread.
     362                // In case 3, we lost the race but can just reschedule the thread.
     363
     364                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
     365                        // The thread was preempted, reschedule it and reset the flag
     366                        __schedule_thread( thrd_dst );
     367                        break RUNNING;
     368                }
     369
     370                // set state of processor coroutine to active and the thread to inactive
     371                static_assert(sizeof(thrd_dst->state) == sizeof(int));
     372                enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST);
     373                switch(old_state) {
     374                        case Halted:
     375                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
     376                                thrd_dst->state = Halted;
     377
     378                                // We may need to wake someone up here since
     379                                unpark( this->destroyer );
     380                                this->destroyer = 0p;
     381                                break RUNNING;
     382                        case Active:
     383                                // This is case 1, the regular case, nothing more is needed
     384                                break RUNNING;
     385                        case Rerun:
     386                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
     387                                // In this case, just run it again.
     388                                continue RUNNING;
     389                        default:
     390                                // This makes no sense, something is wrong abort
     391                                abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state);
     392                }
     393        }
     394
     395        // Just before returning to the processor, set the processor coroutine to active
    340396        proc_cor->state = Active;
    341397}
    342398
    343399// KERNEL_ONLY
    344 static void returnToKernel() {
    345         coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    346         thread_desc * thrd_src = kernelTLS.this_thread;
    347 
    348         // set state of current coroutine to inactive
    349         thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
    350         proc_cor->state = Active;
    351         int local_errno = *__volatile_errno();
    352         #if defined( __i386 ) || defined( __x86_64 )
    353                 __x87_store;
    354         #endif
    355 
    356         // set new coroutine that the processor is executing
    357         // and context switch to it
    358         verify( proc_cor->context.SP );
    359         CtxSwitch( &thrd_src->context, &proc_cor->context );
    360 
    361         // set state of new coroutine to active
    362         proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
    363         thrd_src->state = Active;
    364 
    365         #if defined( __i386 ) || defined( __x86_64 )
    366                 __x87_load;
    367         #endif
    368         *__volatile_errno() = local_errno;
    369 }
    370 
    371 // KERNEL_ONLY
    372 // Once a thread has finished running, some of
    373 // its final actions must be executed from the kernel
    374 static void finishRunning(processor * this) with( this->finish ) {
    375         verify( ! kernelTLS.preemption_state.enabled );
    376         choose( action_code ) {
    377         case No_Action:
    378                 break;
    379         case Release:
    380                 unlock( *lock );
    381         case Schedule:
    382                 ScheduleThread( thrd );
    383         case Release_Schedule:
    384                 unlock( *lock );
    385                 ScheduleThread( thrd );
    386         case Release_Multi:
    387                 for(int i = 0; i < lock_count; i++) {
    388                         unlock( *locks[i] );
    389                 }
    390         case Release_Multi_Schedule:
    391                 for(int i = 0; i < lock_count; i++) {
    392                         unlock( *locks[i] );
    393                 }
    394                 for(int i = 0; i < thrd_count; i++) {
    395                         ScheduleThread( thrds[i] );
    396                 }
    397         case Callback:
    398                 callback();
    399         default:
    400                 abort("KERNEL ERROR: Unexpected action to run after thread");
    401         }
     400void returnToKernel() {
     401        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     402        $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
     403        $thread * thrd_src = kernelTLS.this_thread;
     404
     405        // Run the thread on this processor
     406        {
     407                int local_errno = *__volatile_errno();
     408                #if defined( __i386 ) || defined( __x86_64 )
     409                        __x87_store;
     410                #endif
     411                verify( proc_cor->context.SP );
     412                __cfactx_switch( &thrd_src->context, &proc_cor->context );
     413                #if defined( __i386 ) || defined( __x86_64 )
     414                        __x87_load;
     415                #endif
     416                *__volatile_errno() = local_errno;
     417        }
     418
     419        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    402420}
    403421
     
    406424// This is the entry point for processors (kernel threads)
    407425// It effectively constructs a coroutine by stealing the pthread stack
    408 static void * CtxInvokeProcessor(void * arg) {
     426static void * __invoke_processor(void * arg) {
    409427        processor * proc = (processor *) arg;
    410428        kernelTLS.this_processor = proc;
     
    447465} // Abort
    448466
    449 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
     467void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
    450468        pthread_attr_t attr;
    451469
     
    475493}
    476494
    477 static void start(processor * this) {
    478         __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
    479 
    480         this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );
    481 
    482         __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
    483 }
    484 
    485495// KERNEL_ONLY
    486 void kernel_first_resume( processor * this ) {
    487         thread_desc * src = mainThread;
    488         coroutine_desc * dst = get_coroutine(this->runner);
     496static void __kernel_first_resume( processor * this ) {
     497        $thread * src = mainThread;
     498        $coroutine * dst = get_coroutine(this->runner);
    489499
    490500        verify( ! kernelTLS.preemption_state.enabled );
     
    492502        kernelTLS.this_thread->curr_cor = dst;
    493503        __stack_prepare( &dst->stack, 65000 );
    494         CtxStart(main, dst, this->runner, CtxInvokeCoroutine);
     504        __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
    495505
    496506        verify( ! kernelTLS.preemption_state.enabled );
     
    504514        // context switch to specified coroutine
    505515        verify( dst->context.SP );
    506         CtxSwitch( &src->context, &dst->context );
    507         // when CtxSwitch returns we are back in the src coroutine
     516        __cfactx_switch( &src->context, &dst->context );
     517        // when __cfactx_switch returns we are back in the src coroutine
    508518
    509519        mainThread->curr_cor = &mainThread->self_cor;
     
    516526
    517527// KERNEL_ONLY
    518 void kernel_last_resume( processor * this ) {
    519         coroutine_desc * src = &mainThread->self_cor;
    520         coroutine_desc * dst = get_coroutine(this->runner);
     528static void __kernel_last_resume( processor * this ) {
     529        $coroutine * src = &mainThread->self_cor;
     530        $coroutine * dst = get_coroutine(this->runner);
    521531
    522532        verify( ! kernelTLS.preemption_state.enabled );
     
    525535
    526536        // context switch to the processor
    527         CtxSwitch( &src->context, &dst->context );
     537        __cfactx_switch( &src->context, &dst->context );
    528538}
    529539
    530540//-----------------------------------------------------------------------------
    531541// Scheduler routines
    532 
    533542// KERNEL ONLY
    534 void ScheduleThread( thread_desc * thrd ) {
    535         verify( thrd );
    536         verify( thrd->state != Halted );
    537 
    538         verify( ! kernelTLS.preemption_state.enabled );
    539 
    540         verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
    541 
    542         with( *thrd->curr_cluster ) {
    543                 lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
    544                 bool was_empty = !(ready_queue != 0);
    545                 append( ready_queue, thrd );
    546                 unlock( ready_queue_lock );
    547 
    548                 if(was_empty) {
    549                         lock      (proc_list_lock __cfaabi_dbg_ctx2);
    550                         if(idles) {
    551                                 wake_fast(idles.head);
    552                         }
    553                         unlock    (proc_list_lock);
     543void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
     544        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     545        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     546        /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     547                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
     548        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
     549                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
     550        /* paranoid */ #endif
     551        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
     552
     553        lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
     554        bool was_empty = !(ready_queue != 0);
     555        append( ready_queue, thrd );
     556        unlock( ready_queue_lock );
     557
     558        if(was_empty) {
     559                lock      (proc_list_lock __cfaabi_dbg_ctx2);
     560                if(idles) {
     561                        wake_fast(idles.head);
    554562                }
    555                 else if( struct processor * idle = idles.head ) {
    556                         wake_fast(idle);
    557                 }
    558 
    559         }
    560 
    561         verify( ! kernelTLS.preemption_state.enabled );
     563                unlock    (proc_list_lock);
     564        }
     565        else if( struct processor * idle = idles.head ) {
     566                wake_fast(idle);
     567        }
     568
     569        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    562570}
    563571
    564572// KERNEL ONLY
    565 thread_desc * nextThread(cluster * this) with( *this ) {
    566         verify( ! kernelTLS.preemption_state.enabled );
     573static $thread * __next_thread(cluster * this) with( *this ) {
     574        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     575
    567576        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    568         thread_desc * head = pop_head( ready_queue );
     577        $thread * head = pop_head( ready_queue );
    569578        unlock( ready_queue_lock );
    570         verify( ! kernelTLS.preemption_state.enabled );
     579
     580        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    571581        return head;
    572582}
    573583
    574 void BlockInternal() {
     584void unpark( $thread * thrd ) {
     585        if( !thrd ) return;
     586
    575587        disable_interrupts();
    576         verify( ! kernelTLS.preemption_state.enabled );
     588        static_assert(sizeof(thrd->state) == sizeof(int));
     589        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
     590        switch(old_state) {
     591                case Active:
     592                        // Wake won the race, the thread will reschedule/rerun itself
     593                        break;
     594                case Inactive:
     595                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
     596
     597                        // Wake lost the race,
     598                        thrd->state = Inactive;
     599                        __schedule_thread( thrd );
     600                        break;
     601                case Rerun:
     602                        abort("More than one thread attempted to schedule thread %p\n", thrd);
     603                        break;
     604                case Halted:
     605                case Start:
     606                case Primed:
     607                default:
     608                        // This makes no sense, something is wrong abort
     609                        abort();
     610        }
     611        enable_interrupts( __cfaabi_dbg_ctx );
     612}
     613
     614void park( void ) {
     615        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     616        disable_interrupts();
     617        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     618        /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
     619
    577620        returnToKernel();
    578         verify( ! kernelTLS.preemption_state.enabled );
     621
     622        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    579623        enable_interrupts( __cfaabi_dbg_ctx );
    580 }
    581 
    582 void BlockInternal( __spinlock_t * lock ) {
     624        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     625
     626}
     627
     628// KERNEL ONLY
     629void __leave_thread() {
     630        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     631        returnToKernel();
     632        abort();
     633}
     634
     635// KERNEL ONLY
     636bool force_yield( __Preemption_Reason reason ) {
     637        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
    583638        disable_interrupts();
    584         with( *kernelTLS.this_processor ) {
    585                 finish.action_code = Release;
    586                 finish.lock        = lock;
    587         }
    588 
    589         verify( ! kernelTLS.preemption_state.enabled );
    590         returnToKernel();
    591         verify( ! kernelTLS.preemption_state.enabled );
    592 
    593         enable_interrupts( __cfaabi_dbg_ctx );
    594 }
    595 
    596 void BlockInternal( thread_desc * thrd ) {
    597         disable_interrupts();
    598         with( * kernelTLS.this_processor ) {
    599                 finish.action_code = Schedule;
    600                 finish.thrd        = thrd;
    601         }
    602 
    603         verify( ! kernelTLS.preemption_state.enabled );
    604         returnToKernel();
    605         verify( ! kernelTLS.preemption_state.enabled );
    606 
    607         enable_interrupts( __cfaabi_dbg_ctx );
    608 }
    609 
    610 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) {
    611         assert(thrd);
    612         disable_interrupts();
    613         with( * kernelTLS.this_processor ) {
    614                 finish.action_code = Release_Schedule;
    615                 finish.lock        = lock;
    616                 finish.thrd        = thrd;
    617         }
    618 
    619         verify( ! kernelTLS.preemption_state.enabled );
    620         returnToKernel();
    621         verify( ! kernelTLS.preemption_state.enabled );
    622 
    623         enable_interrupts( __cfaabi_dbg_ctx );
    624 }
    625 
    626 void BlockInternal(__spinlock_t * locks [], unsigned short count) {
    627         disable_interrupts();
    628         with( * kernelTLS.this_processor ) {
    629                 finish.action_code = Release_Multi;
    630                 finish.locks       = locks;
    631                 finish.lock_count  = count;
    632         }
    633 
    634         verify( ! kernelTLS.preemption_state.enabled );
    635         returnToKernel();
    636         verify( ! kernelTLS.preemption_state.enabled );
    637 
    638         enable_interrupts( __cfaabi_dbg_ctx );
    639 }
    640 
    641 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    642         disable_interrupts();
    643         with( *kernelTLS.this_processor ) {
    644                 finish.action_code = Release_Multi_Schedule;
    645                 finish.locks       = locks;
    646                 finish.lock_count  = lock_count;
    647                 finish.thrds       = thrds;
    648                 finish.thrd_count  = thrd_count;
    649         }
    650 
    651         verify( ! kernelTLS.preemption_state.enabled );
    652         returnToKernel();
    653         verify( ! kernelTLS.preemption_state.enabled );
    654 
    655         enable_interrupts( __cfaabi_dbg_ctx );
    656 }
    657 
    658 void BlockInternal(__finish_callback_fptr_t callback) {
    659         disable_interrupts();
    660         with( *kernelTLS.this_processor ) {
    661                 finish.action_code = Callback;
    662                 finish.callback    = callback;
    663         }
    664 
    665         verify( ! kernelTLS.preemption_state.enabled );
    666         returnToKernel();
    667         verify( ! kernelTLS.preemption_state.enabled );
    668 
    669         enable_interrupts( __cfaabi_dbg_ctx );
    670 }
    671 
    672 // KERNEL ONLY
    673 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
    674         verify( ! kernelTLS.preemption_state.enabled );
    675         with( * kernelTLS.this_processor ) {
    676                 finish.action_code = thrd ? Release_Schedule : Release;
    677                 finish.lock        = lock;
    678                 finish.thrd        = thrd;
    679         }
    680 
    681         returnToKernel();
     639        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     640
     641        $thread * thrd = kernelTLS.this_thread;
     642        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
     643
     644        // SKULLDUGGERY: It is possible that we are preempting this thread just before
     645        // it was going to park itself. If that is the case and it is already using the
     646        // intrusive fields then we can't use them to preempt the thread
     647        // If that is the case, abandon the preemption.
     648        bool preempted = false;
     649        if(thrd->next == 0p) {
     650                preempted = true;
     651                thrd->preempted = reason;
     652                returnToKernel();
     653        }
     654
     655        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     656        enable_interrupts_noPoll();
     657        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     658
     659        return preempted;
    682660}
    683661
     
    687665//-----------------------------------------------------------------------------
    688666// Kernel boot procedures
    689 static void kernel_startup(void) {
     667static void __kernel_startup(void) {
    690668        verify( ! kernelTLS.preemption_state.enabled );
    691669        __cfaabi_dbg_print_safe("Kernel : Starting\n");
     
    705683        // SKULLDUGGERY: the mainThread steals the process main thread
    706684        // which will then be scheduled by the mainProcessor normally
    707         mainThread = (thread_desc *)&storage_mainThread;
     685        mainThread = ($thread *)&storage_mainThread;
    708686        current_stack_info_t info;
    709687        info.storage = (__stack_t*)&storage_mainThreadCtx;
     
    748726        // Add the main thread to the ready queue
    749727        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    750         ScheduleThread(mainThread);
     728        __schedule_thread(mainThread);
    751729
    752730        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    753         // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
     731        // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
    754732        // mainThread is on the ready queue when this call is made.
    755         kernel_first_resume( kernelTLS.this_processor );
     733        __kernel_first_resume( kernelTLS.this_processor );
    756734
    757735
     
    765743}
    766744
    767 static void kernel_shutdown(void) {
     745static void __kernel_shutdown(void) {
    768746        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    769747
     
    776754        // which is currently here
    777755        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    778         kernel_last_resume( kernelTLS.this_processor );
     756        __kernel_last_resume( kernelTLS.this_processor );
    779757        mainThread->self_cor.state = Halted;
    780758
     
    802780// Kernel Quiescing
    803781//=============================================================================================
    804 static void halt(processor * this) with( *this ) {
     782static void __halt(processor * this) with( *this ) {
    805783        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
    806784
     
    857835
    858836void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
    859         thread_desc * thrd = kernel_data;
     837        $thread * thrd = kernel_data;
    860838
    861839        if(thrd) {
     
    913891
    914892                // atomically release spin lock and block
    915                 BlockInternal( &lock );
     893                unlock( lock );
     894                park();
    916895        }
    917896        else {
     
    921900
    922901void V(semaphore & this) with( this ) {
    923         thread_desc * thrd = 0p;
     902        $thread * thrd = 0p;
    924903        lock( lock __cfaabi_dbg_ctx2 );
    925904        count += 1;
     
    932911
    933912        // make new owner
    934         WakeThread( thrd );
     913        unpark( thrd );
    935914}
    936915
     
    949928}
    950929
    951 void doregister( cluster * cltr, thread_desc & thrd ) {
     930void doregister( cluster * cltr, $thread & thrd ) {
    952931        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    953932        cltr->nthreads += 1;
     
    956935}
    957936
    958 void unregister( cluster * cltr, thread_desc & thrd ) {
     937void unregister( cluster * cltr, $thread & thrd ) {
    959938        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    960939        remove(cltr->threads, thrd );
     
    990969//-----------------------------------------------------------------------------
    991970// Debug
    992 bool threading_enabled(void) {
     971bool threading_enabled(void) __attribute__((const)) {
    993972        return true;
    994973}
  • libcfa/src/concurrency/kernel.hfa

    r41efd33 r04e6f93  
    3232        __spinlock_t lock;
    3333        int count;
    34         __queue_t(thread_desc) waiting;
     34        __queue_t($thread) waiting;
    3535};
    3636
     
    4444// Processor
    4545extern struct cluster * mainCluster;
    46 
    47 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };
    48 
    49 typedef void (*__finish_callback_fptr_t)(void);
    50 
    51 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)
    52 struct FinishAction {
    53         FinishOpCode action_code;
    54         /*
    55         // Union of possible actions
    56         union {
    57                 // Option 1 : locks and threads
    58                 struct {
    59                         // 1 thread or N thread
    60                         union {
    61                                 thread_desc * thrd;
    62                                 struct {
    63                                         thread_desc ** thrds;
    64                                         unsigned short thrd_count;
    65                                 };
    66                         };
    67                         // 1 lock or N lock
    68                         union {
    69                                 __spinlock_t * lock;
    70                                 struct {
    71                                         __spinlock_t ** locks;
    72                                         unsigned short lock_count;
    73                                 };
    74                         };
    75                 };
    76                 // Option 2 : action pointer
    77                 __finish_callback_fptr_t callback;
    78         };
    79         /*/
    80         thread_desc * thrd;
    81         thread_desc ** thrds;
    82         unsigned short thrd_count;
    83         __spinlock_t * lock;
    84         __spinlock_t ** locks;
    85         unsigned short lock_count;
    86         __finish_callback_fptr_t callback;
    87         //*/
    88 };
    89 static inline void ?{}(FinishAction & this) {
    90         this.action_code = No_Action;
    91         this.thrd = 0p;
    92         this.lock = 0p;
    93 }
    94 static inline void ^?{}(FinishAction &) {}
    9546
    9647// Processor
     
    11667        // RunThread data
    11768        // Action to do after a thread is ran
    118         struct FinishAction finish;
     69        $thread * destroyer;
    11970
    12071        // Preemption data
     
    157108static inline void  ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
    158109
    159 static inline [processor *&, processor *& ] __get( processor & this ) {
    160         return this.node.[next, prev];
    161 }
     110static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; }
    162111
    163112//-----------------------------------------------------------------------------
     
    168117
    169118        // Ready queue for threads
    170         __queue_t(thread_desc) ready_queue;
     119        __queue_t($thread) ready_queue;
    171120
    172121        // Name of the cluster
     
    184133        // List of threads
    185134        __spinlock_t thread_list_lock;
    186         __dllist_t(struct thread_desc) threads;
     135        __dllist_t(struct $thread) threads;
    187136        unsigned int nthreads;
    188137
     
    202151static inline void ?{} (cluster & this, const char name[])        { this{name, default_preemption()}; }
    203152
    204 static inline [cluster *&, cluster *& ] __get( cluster & this ) {
    205         return this.node.[next, prev];
    206 }
     153static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
    207154
    208155static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
  • libcfa/src/concurrency/kernel_private.hfa

    r41efd33 r04e6f93  
    3131}
    3232
    33 void ScheduleThread( thread_desc * );
    34 static inline void WakeThread( thread_desc * thrd ) {
    35         if( !thrd ) return;
    36 
    37         verify(thrd->state == Inactive);
    38 
    39         disable_interrupts();
    40         ScheduleThread( thrd );
    41         enable_interrupts( __cfaabi_dbg_ctx );
    42 }
    43 thread_desc * nextThread(cluster * this);
     33void __schedule_thread( $thread * ) __attribute__((nonnull (1)));
    4434
    4535//Block current thread and release/wake-up the following resources
    46 void BlockInternal(void);
    47 void BlockInternal(__spinlock_t * lock);
    48 void BlockInternal(thread_desc * thrd);
    49 void BlockInternal(__spinlock_t * lock, thread_desc * thrd);
    50 void BlockInternal(__spinlock_t * locks [], unsigned short count);
    51 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);
    52 void BlockInternal(__finish_callback_fptr_t callback);
    53 void LeaveThread(__spinlock_t * lock, thread_desc * thrd);
     36void __leave_thread() __attribute__((noreturn));
    5437
    5538//-----------------------------------------------------------------------------
     
    5740void main(processorCtx_t *);
    5841
    59 void * create_pthread( pthread_t *, void * (*)(void *), void * );
     42void * __create_pthread( pthread_t *, void * (*)(void *), void * );
    6043
    6144static inline void wake_fast(processor * this) {
     
    8871// Threads
    8972extern "C" {
    90       void CtxInvokeThread(void (*main)(void *), void * this);
     73      void __cfactx_invoke_thread(void (*main)(void *), void * this);
    9174}
    9275
    93 extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
    94 
    9576__cfaabi_dbg_debug_do(
    96         extern void __cfaabi_dbg_thread_register  ( thread_desc * thrd );
    97         extern void __cfaabi_dbg_thread_unregister( thread_desc * thrd );
     77        extern void __cfaabi_dbg_thread_register  ( $thread * thrd );
     78        extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
    9879)
    9980
     
    10283#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
    10384
    104 static inline uint32_t tls_rand() {
     85static inline uint32_t __tls_rand() {
    10586        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
    10687        kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
     
    11394void unregister( struct cluster & cltr );
    11495
    115 void doregister( struct cluster * cltr, struct thread_desc & thrd );
    116 void unregister( struct cluster * cltr, struct thread_desc & thrd );
     96void doregister( struct cluster * cltr, struct $thread & thrd );
     97void unregister( struct cluster * cltr, struct $thread & thrd );
    11798
    11899void doregister( struct cluster * cltr, struct processor * proc );
  • libcfa/src/concurrency/monitor.cfa

    r41efd33 r04e6f93  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // monitor_desc.c --
     7// $monitor.c --
    88//
    99// Author           : Thierry Delisle
     
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    31 static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    32 static inline void reset_mask( monitor_desc * this );
    33 
    34 static inline thread_desc * next_thread( monitor_desc * this );
    35 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
     29static inline void __set_owner ( $monitor * this, $thread * owner );
     30static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
     31static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     32static inline void reset_mask( $monitor * this );
     33
     34static inline $thread * next_thread( $monitor * this );
     35static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
    3636
    3737static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    38 static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     38static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    3939static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    40 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
    41 
    42 static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    43 static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    44 
    45 static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    46 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 
    48 static inline thread_desc *        check_condition   ( __condition_criterion_t * );
     40static inline void unlock_all( $monitor * locks [], __lock_size_t count );
     41
     42static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     43static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     44
     45static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     46static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47
     48static inline $thread *        check_condition   ( __condition_criterion_t * );
    4949static inline void                 brand_condition   ( condition & );
    50 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
     50static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
    5151
    5252forall(dtype T | sized( T ))
    5353static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5454static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    55 static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
     55static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
    5656
    5757//-----------------------------------------------------------------------------
     
    6868
    6969#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    70         monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     70        $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7171        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7272        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8080//-----------------------------------------------------------------------------
    8181// Enter/Leave routines
    82 
    83 
    84 extern "C" {
    85         // Enter single monitor
    86         static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    87                 // Lock the monitor spinlock
    88                 lock( this->lock __cfaabi_dbg_ctx2 );
    89                 // Interrupts disable inside critical section
    90                 thread_desc * thrd = kernelTLS.this_thread;
    91 
    92                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    93 
    94                 if( !this->owner ) {
    95                         // No one has the monitor, just take it
    96                         set_owner( this, thrd );
    97 
    98                         __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
    99                 }
    100                 else if( this->owner == thrd) {
    101                         // We already have the monitor, just note how many times we took it
    102                         this->recursion += 1;
    103 
    104                         __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
    105                 }
    106                 else if( is_accepted( this, group) ) {
    107                         // Some one was waiting for us, enter
    108                         set_owner( this, thrd );
    109 
    110                         // Reset mask
    111                         reset_mask( this );
    112 
    113                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
    114                 }
    115                 else {
    116                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    117 
    118                         // Some one else has the monitor, wait in line for it
    119                         append( this->entry_queue, thrd );
    120 
    121                         BlockInternal( &this->lock );
    122 
    123                         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    124 
    125                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    126                         return;
    127                 }
     82// Enter single monitor
     83static void __enter( $monitor * this, const __monitor_group_t & group ) {
     84        // Lock the monitor spinlock
     85        lock( this->lock __cfaabi_dbg_ctx2 );
     86        // Interrupts disable inside critical section
     87        $thread * thrd = kernelTLS.this_thread;
     88
     89        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     90
     91        if( !this->owner ) {
     92                // No one has the monitor, just take it
     93                __set_owner( this, thrd );
     94
     95                __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     96        }
     97        else if( this->owner == thrd) {
     98                // We already have the monitor, just note how many times we took it
     99                this->recursion += 1;
     100
     101                __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     102        }
     103        else if( is_accepted( this, group) ) {
     104                // Some one was waiting for us, enter
     105                __set_owner( this, thrd );
     106
     107                // Reset mask
     108                reset_mask( this );
     109
     110                __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
     111        }
     112        else {
     113                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     114
     115                // Some one else has the monitor, wait in line for it
     116                /* paranoid */ verify( thrd->next == 0p );
     117                append( this->entry_queue, thrd );
     118                /* paranoid */ verify( thrd->next == 1p );
     119
     120                unlock( this->lock );
     121                park();
    128122
    129123                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    130124
    131                 // Release the lock and leave
     125                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     126                return;
     127        }
     128
     129        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     130
     131        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     132        /* paranoid */ verify( this->lock.lock );
     133
     134        // Release the lock and leave
     135        unlock( this->lock );
     136        return;
     137}
     138
     139static void __dtor_enter( $monitor * this, fptr_t func ) {
     140        // Lock the monitor spinlock
     141        lock( this->lock __cfaabi_dbg_ctx2 );
     142        // Interrupts disable inside critical section
     143        $thread * thrd = kernelTLS.this_thread;
     144
     145        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     146
     147
     148        if( !this->owner ) {
     149                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     150
     151                // No one has the monitor, just take it
     152                __set_owner( this, thrd );
     153
     154                verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     155
    132156                unlock( this->lock );
    133157                return;
    134158        }
    135 
    136         static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
    137                 // Lock the monitor spinlock
    138                 lock( this->lock __cfaabi_dbg_ctx2 );
    139                 // Interrupts disable inside critical section
    140                 thread_desc * thrd = kernelTLS.this_thread;
    141 
    142                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
    143 
    144 
    145                 if( !this->owner ) {
    146                         __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
    147 
    148                         // No one has the monitor, just take it
    149                         set_owner( this, thrd );
    150 
    151                         unlock( this->lock );
    152                         return;
     159        else if( this->owner == thrd) {
     160                // We already have the monitor... but where about to destroy it so the nesting will fail
     161                // Abort!
     162                abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     163        }
     164
     165        __lock_size_t count = 1;
     166        $monitor ** monitors = &this;
     167        __monitor_group_t group = { &this, 1, func };
     168        if( is_accepted( this, group) ) {
     169                __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
     170
     171                // Wake the thread that is waiting for this
     172                __condition_criterion_t * urgent = pop( this->signal_stack );
     173                /* paranoid */ verify( urgent );
     174
     175                // Reset mask
     176                reset_mask( this );
     177
     178                // Create the node specific to this wait operation
     179                wait_ctx_primed( thrd, 0 )
     180
     181                // Some one else has the monitor, wait for him to finish and then run
     182                unlock( this->lock );
     183
     184                // Release the next thread
     185                /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     186                unpark( urgent->owner->waiting_thread );
     187
     188                // Park current thread waiting
     189                park();
     190
     191                // Some one was waiting for us, enter
     192                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     193        }
     194        else {
     195                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     196
     197                wait_ctx( thrd, 0 )
     198                this->dtor_node = &waiter;
     199
     200                // Some one else has the monitor, wait in line for it
     201                /* paranoid */ verify( thrd->next == 0p );
     202                append( this->entry_queue, thrd );
     203                /* paranoid */ verify( thrd->next == 1p );
     204                unlock( this->lock );
     205
     206                // Park current thread waiting
     207                park();
     208
     209                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     210                return;
     211        }
     212
     213        __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
     214
     215}
     216
     217// Leave single monitor
     218void __leave( $monitor * this ) {
     219        // Lock the monitor spinlock
     220        lock( this->lock __cfaabi_dbg_ctx2 );
     221
     222        __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
     223
     224        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     225
     226        // Leaving a recursion level, decrement the counter
     227        this->recursion -= 1;
     228
     229        // If we haven't left the last level of recursion
     230        // it means we don't need to do anything
     231        if( this->recursion != 0) {
     232                __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
     233                unlock( this->lock );
     234                return;
     235        }
     236
     237        // Get the next thread, will be null on low contention monitor
     238        $thread * new_owner = next_thread( this );
     239
     240        // Check the new owner is consistent with who we wake-up
     241        // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
     242        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     243
     244        // We can now let other threads in safely
     245        unlock( this->lock );
     246
     247        //We need to wake-up the thread
     248        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     249        unpark( new_owner );
     250}
     251
     252// Leave single monitor for the last time
     253void __dtor_leave( $monitor * this ) {
     254        __cfaabi_dbg_debug_do(
     255                if( TL_GET( this_thread ) != this->owner ) {
     256                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    153257                }
    154                 else if( this->owner == thrd) {
    155                         // We already have the monitor... but where about to destroy it so the nesting will fail
    156                         // Abort!
    157                         abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     258                if( this->recursion != 1 ) {
     259                        abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    158260                }
    159 
    160                 __lock_size_t count = 1;
    161                 monitor_desc ** monitors = &this;
    162                 __monitor_group_t group = { &this, 1, func };
    163                 if( is_accepted( this, group) ) {
    164                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
    165 
    166                         // Wake the thread that is waiting for this
    167                         __condition_criterion_t * urgent = pop( this->signal_stack );
    168                         verify( urgent );
    169 
    170                         // Reset mask
    171                         reset_mask( this );
    172 
    173                         // Create the node specific to this wait operation
    174                         wait_ctx_primed( thrd, 0 )
    175 
    176                         // Some one else has the monitor, wait for him to finish and then run
    177                         BlockInternal( &this->lock, urgent->owner->waiting_thread );
    178 
    179                         // Some one was waiting for us, enter
    180                         set_owner( this, thrd );
    181                 }
    182                 else {
    183                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    184 
    185                         wait_ctx( thrd, 0 )
    186                         this->dtor_node = &waiter;
    187 
    188                         // Some one else has the monitor, wait in line for it
    189                         append( this->entry_queue, thrd );
    190                         BlockInternal( &this->lock );
    191 
    192                         // BlockInternal will unlock spinlock, no need to unlock ourselves
    193                         return;
    194                 }
    195 
    196                 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
    197 
    198         }
    199 
    200         // Leave single monitor
    201         void __leave_monitor_desc( monitor_desc * this ) {
    202                 // Lock the monitor spinlock
    203                 lock( this->lock __cfaabi_dbg_ctx2 );
    204 
    205                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    206 
    207                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    208 
    209                 // Leaving a recursion level, decrement the counter
    210                 this->recursion -= 1;
    211 
    212                 // If we haven't left the last level of recursion
    213                 // it means we don't need to do anything
    214                 if( this->recursion != 0) {
    215                         __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
    216                         unlock( this->lock );
    217                         return;
    218                 }
    219 
    220                 // Get the next thread, will be null on low contention monitor
    221                 thread_desc * new_owner = next_thread( this );
    222 
    223                 // We can now let other threads in safely
    224                 unlock( this->lock );
    225 
    226                 //We need to wake-up the thread
    227                 WakeThread( new_owner );
    228         }
    229 
    230         // Leave single monitor for the last time
    231         void __leave_dtor_monitor_desc( monitor_desc * this ) {
    232                 __cfaabi_dbg_debug_do(
    233                         if( TL_GET( this_thread ) != this->owner ) {
    234                                 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    235                         }
    236                         if( this->recursion != 1 ) {
    237                                 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    238                         }
    239                 )
    240         }
    241 
     261        )
     262}
     263
     264extern "C" {
    242265        // Leave the thread monitor
    243266        // last routine called by a thread.
    244267        // Should never return
    245         void __leave_thread_monitor() {
    246                 thread_desc * thrd = TL_GET( this_thread );
    247                 monitor_desc * this = &thrd->self_mon;
     268        void __cfactx_thrd_leave() {
     269                $thread * thrd = TL_GET( this_thread );
     270                $monitor * this = &thrd->self_mon;
    248271
    249272                // Lock the monitor now
     
    252275                disable_interrupts();
    253276
    254                 thrd->self_cor.state = Halted;
    255 
    256                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     277                thrd->state = Halted;
     278
     279                /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    257280
    258281                // Leaving a recursion level, decrement the counter
     
    264287
    265288                // Fetch the next thread, can be null
    266                 thread_desc * new_owner = next_thread( this );
    267 
    268                 // Leave the thread, this will unlock the spinlock
    269                 // Use leave thread instead of BlockInternal which is
    270                 // specialized for this case and supports null new_owner
    271                 LeaveThread( &this->lock, new_owner );
     289                $thread * new_owner = next_thread( this );
     290
     291                // Release the monitor lock
     292                unlock( this->lock );
     293
     294                // Unpark the next owner if needed
     295                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     296                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     297                /* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
     298                /* paranoid */ verify( thrd->state == Halted );
     299
     300                kernelTLS.this_processor->destroyer = new_owner;
     301
     302                // Leave the thread
     303                __leave_thread();
    272304
    273305                // Control flow should never reach here!
     
    279311static inline void enter( __monitor_group_t monitors ) {
    280312        for( __lock_size_t i = 0; i < monitors.size; i++) {
    281                 __enter_monitor_desc( monitors[i], monitors );
     313                __enter( monitors[i], monitors );
    282314        }
    283315}
     
    285317// Leave multiple monitor
    286318// relies on the monitor array being sorted
    287 static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
     319static inline void leave($monitor * monitors [], __lock_size_t count) {
    288320        for( __lock_size_t i = count - 1; i >= 0; i--) {
    289                 __leave_monitor_desc( monitors[i] );
     321                __leave( monitors[i] );
    290322        }
    291323}
     
    293325// Ctor for monitor guard
    294326// Sorts monitors before entering
    295 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
    296         thread_desc * thrd = TL_GET( this_thread );
     327void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
     328        $thread * thrd = TL_GET( this_thread );
    297329
    298330        // Store current array
     
    334366// Ctor for monitor guard
    335367// Sorts monitors before entering
    336 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     368void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) {
    337369        // optimization
    338         thread_desc * thrd = TL_GET( this_thread );
     370        $thread * thrd = TL_GET( this_thread );
    339371
    340372        // Store current array
     
    347379        (thrd->monitors){m, 1, func};
    348380
    349         __enter_monitor_dtor( this.m, func );
     381        __dtor_enter( this.m, func );
    350382}
    351383
     
    353385void ^?{}( monitor_dtor_guard_t & this ) {
    354386        // Leave the monitors in order
    355         __leave_dtor_monitor_desc( this.m );
     387        __dtor_leave( this.m );
    356388
    357389        // Restore thread context
     
    361393//-----------------------------------------------------------------------------
    362394// Internal scheduling types
    363 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     395void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    364396        this.waiting_thread = waiting_thread;
    365397        this.count = count;
     
    375407}
    376408
    377 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
     409void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
    378410        this.ready  = false;
    379411        this.target = target;
     
    400432        // Append the current wait operation to the ones already queued on the condition
    401433        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
     434        /* paranoid */ verify( waiter.next == 0p );
    402435        append( this.blocked, &waiter );
     436        /* paranoid */ verify( waiter.next == 1p );
    403437
    404438        // Lock all monitors (aggregates the locks as well)
     
    407441        // Find the next thread(s) to run
    408442        __lock_size_t thread_count = 0;
    409         thread_desc * threads[ count ];
     443        $thread * threads[ count ];
    410444        __builtin_memset( threads, 0, sizeof( threads ) );
    411445
     
    415449        // Remove any duplicate threads
    416450        for( __lock_size_t i = 0; i < count; i++) {
    417                 thread_desc * new_owner = next_thread( monitors[i] );
     451                $thread * new_owner = next_thread( monitors[i] );
    418452                insert_unique( threads, thread_count, new_owner );
    419453        }
    420454
     455        // Unlock the locks, we don't need them anymore
     456        for(int i = 0; i < count; i++) {
     457                unlock( *locks[i] );
     458        }
     459
     460        // Wake the threads
     461        for(int i = 0; i < thread_count; i++) {
     462                unpark( threads[i] );
     463        }
     464
    421465        // Everything is ready to go to sleep
    422         BlockInternal( locks, count, threads, thread_count );
     466        park();
    423467
    424468        // We are back, restore the owners and recursions
     
    435479        //Some more checking in debug
    436480        __cfaabi_dbg_debug_do(
    437                 thread_desc * this_thrd = TL_GET( this_thread );
     481                $thread * this_thrd = TL_GET( this_thread );
    438482                if ( this.monitor_count != this_thrd->monitors.size ) {
    439483                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    489533
    490534        //Find the thread to run
    491         thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    492         set_owner( monitors, count, signallee );
     535        $thread * signallee = pop_head( this.blocked )->waiting_thread;
     536        /* paranoid */ verify( signallee->next == 0p );
     537        __set_owner( monitors, count, signallee );
    493538
    494539        __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    495540
     541        // unlock all the monitors
     542        unlock_all( locks, count );
     543
     544        // unpark the thread we signalled
     545        unpark( signallee );
     546
    496547        //Everything is ready to go to sleep
    497         BlockInternal( locks, count, &signallee, 1 );
     548        park();
    498549
    499550
     
    536587        // Create one!
    537588        __lock_size_t max = count_max( mask );
    538         monitor_desc * mon_storage[max];
     589        $monitor * mon_storage[max];
    539590        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    540591        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    554605        {
    555606                // Check if the entry queue
    556                 thread_desc * next; int index;
     607                $thread * next; int index;
    557608                [next, index] = search_entry_queue( mask, monitors, count );
    558609
     
    564615                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    565616
    566                                 monitor_desc * mon2dtor = accepted[0];
     617                                $monitor * mon2dtor = accepted[0];
    567618                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    568619
     
    590641
    591642                                // Set the owners to be the next thread
    592                                 set_owner( monitors, count, next );
    593 
    594                                 // Everything is ready to go to sleep
    595                                 BlockInternal( locks, count, &next, 1 );
     643                                __set_owner( monitors, count, next );
     644
     645                                // unlock all the monitors
     646                                unlock_all( locks, count );
     647
     648                                // unpark the thread we signalled
     649                                unpark( next );
     650
     651                                //Everything is ready to go to sleep
     652                                park();
    596653
    597654                                // We are back, restore the owners and recursions
     
    631688        }
    632689
     690        // unlock all the monitors
     691        unlock_all( locks, count );
     692
    633693        //Everything is ready to go to sleep
    634         BlockInternal( locks, count );
     694        park();
    635695
    636696
     
    649709// Utilities
    650710
    651 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
    652         // __cfaabi_dbg_print_safe( "Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     711static inline void __set_owner( $monitor * this, $thread * owner ) {
     712        /* paranoid */ verify( this->lock.lock );
    653713
    654714        //Pass the monitor appropriately
     
    659719}
    660720
    661 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    662         monitors[0]->owner     = owner;
    663         monitors[0]->recursion = 1;
     721static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
     722        /* paranoid */ verify ( monitors[0]->lock.lock );
     723        /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     724        monitors[0]->owner        = owner;
     725        monitors[0]->recursion    = 1;
    664726        for( __lock_size_t i = 1; i < count; i++ ) {
    665                 monitors[i]->owner     = owner;
    666                 monitors[i]->recursion = 0;
    667         }
    668 }
    669 
    670 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     727                /* paranoid */ verify ( monitors[i]->lock.lock );
     728                /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] );
     729                monitors[i]->owner        = owner;
     730                monitors[i]->recursion    = 0;
     731        }
     732}
     733
     734static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    671735        for( __lock_size_t i = 0; i < count; i++) {
    672736                storage[i]->mask = mask;
     
    674738}
    675739
    676 static inline void reset_mask( monitor_desc * this ) {
     740static inline void reset_mask( $monitor * this ) {
    677741        this->mask.accepted = 0p;
    678742        this->mask.data = 0p;
     
    680744}
    681745
    682 static inline thread_desc * next_thread( monitor_desc * this ) {
     746static inline $thread * next_thread( $monitor * this ) {
    683747        //Check the signaller stack
    684748        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    688752                //regardless of if we are ready to baton pass,
    689753                //we need to set the monitor as in use
    690                 set_owner( this,  urgent->owner->waiting_thread );
     754                /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     755                __set_owner( this,  urgent->owner->waiting_thread );
    691756
    692757                return check_condition( urgent );
     
    695760        // No signaller thread
    696761        // Get the next thread in the entry_queue
    697         thread_desc * new_owner = pop_head( this->entry_queue );
    698         set_owner( this, new_owner );
     762        $thread * new_owner = pop_head( this->entry_queue );
     763        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     764        /* paranoid */ verify( !new_owner || new_owner->next == 0p );
     765        __set_owner( this, new_owner );
    699766
    700767        return new_owner;
    701768}
    702769
    703 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
     770static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
    704771        __acceptable_t * it = this->mask.data; // Optim
    705772        __lock_size_t count = this->mask.size;
     
    723790}
    724791
    725 static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     792static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    726793        for( __lock_size_t i = 0; i < count; i++) {
    727794                (criteria[i]){ monitors[i], waiter };
     
    731798}
    732799
    733 static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     800static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    734801        for( __lock_size_t i = 0; i < count; i++) {
    735802                (criteria[i]){ monitors[i], waiter };
     
    747814}
    748815
    749 static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     816static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    750817        for( __lock_size_t i = 0; i < count; i++ ) {
    751818                __spinlock_t * l = &source[i]->lock;
     
    761828}
    762829
    763 static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
     830static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
    764831        for( __lock_size_t i = 0; i < count; i++ ) {
    765832                unlock( locks[i]->lock );
     
    768835
    769836static inline void save(
    770         monitor_desc * ctx [],
     837        $monitor * ctx [],
    771838        __lock_size_t count,
    772839        __attribute((unused)) __spinlock_t * locks [],
     
    781848
    782849static inline void restore(
    783         monitor_desc * ctx [],
     850        $monitor * ctx [],
    784851        __lock_size_t count,
    785852        __spinlock_t * locks [],
     
    799866// 2 - Checks if all the monitors are ready to run
    800867//     if so return the thread to run
    801 static inline thread_desc * check_condition( __condition_criterion_t * target ) {
     868static inline $thread * check_condition( __condition_criterion_t * target ) {
    802869        __condition_node_t * node = target->owner;
    803870        unsigned short count = node->count;
     
    822889
    823890static inline void brand_condition( condition & this ) {
    824         thread_desc * thrd = TL_GET( this_thread );
     891        $thread * thrd = TL_GET( this_thread );
    825892        if( !this.monitors ) {
    826893                // __cfaabi_dbg_print_safe( "Branding\n" );
     
    828895                this.monitor_count = thrd->monitors.size;
    829896
    830                 this.monitors = (monitor_desc **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     897                this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    831898                for( int i = 0; i < this.monitor_count; i++ ) {
    832899                        this.monitors[i] = thrd->monitors[i];
     
    835902}
    836903
    837 static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
    838 
    839         __queue_t(thread_desc) & entry_queue = monitors[0]->entry_queue;
     904static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
     905
     906        __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
    840907
    841908        // For each thread in the entry-queue
    842         for(    thread_desc ** thrd_it = &entry_queue.head;
    843                 *thrd_it;
     909        for(    $thread ** thrd_it = &entry_queue.head;
     910                *thrd_it != 1p;
    844911                thrd_it = &(*thrd_it)->next
    845912        ) {
     
    884951}
    885952
    886 static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
     953static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
    887954        __lock_size_t size = 0;
    888955        for( __lock_size_t i = 0; i < mask.size; i++ ) {
  • libcfa/src/concurrency/monitor.hfa

    r41efd33 r04e6f93  
    2323
    2424trait is_monitor(dtype T) {
    25         monitor_desc * get_monitor( T & );
     25        $monitor * get_monitor( T & );
    2626        void ^?{}( T & mutex );
    2727};
    2828
    29 static inline void ?{}(monitor_desc & this) with( this ) {
     29static inline void ?{}($monitor & this) with( this ) {
    3030        lock{};
    3131        entry_queue{};
     
    3939}
    4040
    41 static inline void ^?{}(monitor_desc & ) {}
     41static inline void ^?{}($monitor & ) {}
    4242
    4343struct monitor_guard_t {
    44         monitor_desc **         m;
     44        $monitor **     m;
    4545        __lock_size_t           count;
    4646        __monitor_group_t prev;
    4747};
    4848
    49 void ?{}( monitor_guard_t & this, monitor_desc ** m, __lock_size_t count, void (*func)() );
     49void ?{}( monitor_guard_t & this, $monitor ** m, __lock_size_t count, void (*func)() );
    5050void ^?{}( monitor_guard_t & this );
    5151
    5252struct monitor_dtor_guard_t {
    53         monitor_desc *    m;
     53        $monitor *    m;
    5454        __monitor_group_t prev;
    5555};
    5656
    57 void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, void (*func)() );
     57void ?{}( monitor_dtor_guard_t & this, $monitor ** m, void (*func)() );
    5858void ^?{}( monitor_dtor_guard_t & this );
    5959
     
    7272
    7373        // The monitor this criterion concerns
    74         monitor_desc * target;
     74        $monitor * target;
    7575
    7676        // The parent node to which this criterion belongs
     
    8787struct __condition_node_t {
    8888        // Thread that needs to be woken when all criteria are met
    89         thread_desc * waiting_thread;
     89        $thread * waiting_thread;
    9090
    9191        // Array of criteria (Criterions are contiguous in memory)
     
    106106}
    107107
    108 void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info );
     108void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info );
    109109void ?{}(__condition_criterion_t & this );
    110 void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner );
     110void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t * owner );
    111111
    112112struct condition {
     
    115115
    116116        // Array of monitor pointers (Monitors are NOT contiguous in memory)
    117         monitor_desc ** monitors;
     117        $monitor ** monitors;
    118118
    119119        // Number of monitors in the array
     
    133133              bool signal      ( condition & this );
    134134              bool signal_block( condition & this );
    135 static inline bool is_empty    ( condition & this ) { return !this.blocked.head; }
     135static inline bool is_empty    ( condition & this ) { return this.blocked.head == 1p; }
    136136         uintptr_t front       ( condition & this );
    137137
  • libcfa/src/concurrency/mutex.cfa

    r41efd33 r04e6f93  
    4040        if( is_locked ) {
    4141                append( blocked_threads, kernelTLS.this_thread );
    42                 BlockInternal( &lock );
     42                unlock( lock );
     43                park();
    4344        }
    4445        else {
     
    6263        lock( this.lock __cfaabi_dbg_ctx2 );
    6364        this.is_locked = (this.blocked_threads != 0);
    64         WakeThread(
     65        unpark(
    6566                pop_head( this.blocked_threads )
    6667        );
     
    9495        else {
    9596                append( blocked_threads, kernelTLS.this_thread );
    96                 BlockInternal( &lock );
     97                unlock( lock );
     98                park();
    9799        }
    98100}
     
    118120        recursion_count--;
    119121        if( recursion_count == 0 ) {
    120                 thread_desc * thrd = pop_head( blocked_threads );
     122                $thread * thrd = pop_head( blocked_threads );
    121123                owner = thrd;
    122124                recursion_count = (thrd ? 1 : 0);
    123                 WakeThread( thrd );
     125                unpark( thrd );
    124126        }
    125127        unlock( lock );
     
    138140void notify_one(condition_variable & this) with(this) {
    139141        lock( lock __cfaabi_dbg_ctx2 );
    140         WakeThread(
     142        unpark(
    141143                pop_head( this.blocked_threads )
    142144        );
     
    147149        lock( lock __cfaabi_dbg_ctx2 );
    148150        while(this.blocked_threads) {
    149                 WakeThread(
     151                unpark(
    150152                        pop_head( this.blocked_threads )
    151153                );
     
    157159        lock( this.lock __cfaabi_dbg_ctx2 );
    158160        append( this.blocked_threads, kernelTLS.this_thread );
    159         BlockInternal( &this.lock );
     161        unlock( this.lock );
     162        park();
    160163}
    161164
     
    164167        lock( this.lock __cfaabi_dbg_ctx2 );
    165168        append( this.blocked_threads, kernelTLS.this_thread );
    166         void __unlock(void) {
    167                 unlock(l);
    168                 unlock(this.lock);
    169         }
    170         BlockInternal( __unlock );
     169        unlock(l);
     170        unlock(this.lock);
     171        park();
    171172        lock(l);
    172173}
  • libcfa/src/concurrency/mutex.hfa

    r41efd33 r04e6f93  
    3636
    3737        // List of blocked threads
    38         __queue_t(struct thread_desc) blocked_threads;
     38        __queue_t(struct $thread) blocked_threads;
    3939
    4040        // Locked flag
     
    5555
    5656        // List of blocked threads
    57         __queue_t(struct thread_desc) blocked_threads;
     57        __queue_t(struct $thread) blocked_threads;
    5858
    5959        // Current thread owning the lock
    60         struct thread_desc * owner;
     60        struct $thread * owner;
    6161
    6262        // Number of recursion level
     
    8383
    8484        // List of blocked threads
    85         __queue_t(struct thread_desc) blocked_threads;
     85        __queue_t(struct $thread) blocked_threads;
    8686};
    8787
  • libcfa/src/concurrency/preemption.cfa

    r41efd33 r04e6f93  
    3939// FwdDeclarations : timeout handlers
    4040static void preempt( processor   * this );
    41 static void timeout( thread_desc * this );
     41static void timeout( $thread * this );
    4242
    4343// FwdDeclarations : Signal handlers
     
    184184
    185185        // Enable interrupts by decrementing the counter
    186         // If counter reaches 0, execute any pending CtxSwitch
     186        // If counter reaches 0, execute any pending __cfactx_switch
    187187        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    188188                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
    189                 thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic store
    190189
    191190                with( kernelTLS.preemption_state ){
     
    209208                                if( proc->pending_preemption ) {
    210209                                        proc->pending_preemption = false;
    211                                         BlockInternal( thrd );
     210                                        force_yield( __POLL_PREEMPTION );
    212211                                }
    213212                        }
     
    219218
    220219        // Disable interrupts by incrementint the counter
    221         // Don't execute any pending CtxSwitch even if counter reaches 0
     220        // Don't execute any pending __cfactx_switch even if counter reaches 0
    222221        void enable_interrupts_noPoll() {
    223222                unsigned short prev = kernelTLS.preemption_state.disable_count;
     
    268267
    269268// reserved for future use
    270 static void timeout( thread_desc * this ) {
     269static void timeout( $thread * this ) {
    271270        //TODO : implement waking threads
    272271}
    273272
    274273// KERNEL ONLY
    275 // Check if a CtxSwitch signal handler shoud defer
     274// Check if a __cfactx_switch signal handler shoud defer
    276275// If true  : preemption is safe
    277276// If false : preemption is unsafe and marked as pending
     
    303302
    304303        // Setup proper signal handlers
    305         __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler
     304        __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler
    306305
    307306        signal_block( SIGALRM );
    308307
    309         alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p );
     308        alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p );
    310309}
    311310
     
    394393        // Preemption can occur here
    395394
    396         BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch
     395        force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch
    397396}
    398397
  • libcfa/src/concurrency/thread.cfa

    r41efd33 r04e6f93  
    2323#include "invoke.h"
    2424
    25 extern "C" {
    26         #include <fenv.h>
    27         #include <stddef.h>
    28 }
    29 
    30 //extern volatile thread_local processor * this_processor;
    31 
    3225//-----------------------------------------------------------------------------
    3326// Thread ctors and dtors
    34 void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
     27void ?{}($thread & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
    3528        context{ 0p, 0p };
    3629        self_cor{ name, storage, storageSize };
    3730        state = Start;
     31        preempted = __NO_PREEMPTION;
    3832        curr_cor = &self_cor;
    3933        self_mon.owner = &this;
     
    5044}
    5145
    52 void ^?{}(thread_desc& this) with( this ) {
     46void ^?{}($thread& this) with( this ) {
    5347        unregister(curr_cluster, this);
    5448        ^self_cor{};
    5549}
    5650
     51//-----------------------------------------------------------------------------
     52// Starting and stopping threads
     53forall( dtype T | is_thread(T) )
     54void __thrd_start( T & this, void (*main_p)(T &) ) {
     55        $thread * this_thrd = get_thread(this);
     56
     57        disable_interrupts();
     58        __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
     59
     60        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
     61        verify( this_thrd->context.SP );
     62
     63        __schedule_thread(this_thrd);
     64        enable_interrupts( __cfaabi_dbg_ctx );
     65}
     66
     67//-----------------------------------------------------------------------------
     68// Support for threads that don't ues the thread keyword
    5769forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } )
    5870void ?{}( scoped(T)& this ) with( this ) {
     
    7284}
    7385
    74 //-----------------------------------------------------------------------------
    75 // Starting and stopping threads
    76 forall( dtype T | is_thread(T) )
    77 void __thrd_start( T & this, void (*main_p)(T &) ) {
    78         thread_desc * this_thrd = get_thread(this);
    79         thread_desc * curr_thrd = TL_GET( this_thread );
    80 
    81         disable_interrupts();
    82         CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);
    83 
    84         this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
    85         verify( this_thrd->context.SP );
    86         // CtxSwitch( &curr_thrd->context, &this_thrd->context );
    87 
    88         ScheduleThread(this_thrd);
    89         enable_interrupts( __cfaabi_dbg_ctx );
    90 }
    91 
    92 void yield( void ) {
    93         // Safety note : This could cause some false positives due to preemption
    94       verify( TL_GET( preemption_state.enabled ) );
    95         BlockInternal( TL_GET( this_thread ) );
    96         // Safety note : This could cause some false positives due to preemption
    97       verify( TL_GET( preemption_state.enabled ) );
    98 }
    99 
    100 void yield( unsigned times ) {
    101         for( unsigned i = 0; i < times; i++ ) {
    102                 yield();
    103         }
    104 }
    105 
    10686// Local Variables: //
    10787// mode: c //
  • libcfa/src/concurrency/thread.hfa

    r41efd33 r04e6f93  
    2828      void ^?{}(T& mutex this);
    2929      void main(T& this);
    30       thread_desc* get_thread(T& this);
     30      $thread* get_thread(T& this);
    3131};
    3232
    33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this)
     33// define that satisfies the trait without using the thread keyword
     34#define DECL_THREAD(X) $thread* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this)
     35
     36// Inline getters for threads/coroutines/monitors
     37forall( dtype T | is_thread(T) )
     38static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; }
    3439
    3540forall( dtype T | is_thread(T) )
    36 static inline coroutine_desc* get_coroutine(T & this) {
    37         return &get_thread(this)->self_cor;
    38 }
     41static inline $monitor  * get_monitor  (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; }
    3942
    40 forall( dtype T | is_thread(T) )
    41 static inline monitor_desc* get_monitor(T & this) {
    42         return &get_thread(this)->self_mon;
    43 }
     43static inline $coroutine* get_coroutine($thread * this) __attribute__((const)) { return &this->self_cor; }
     44static inline $monitor  * get_monitor  ($thread * this) __attribute__((const)) { return &this->self_mon; }
    4445
    45 static inline coroutine_desc* get_coroutine(thread_desc * this) {
    46         return &this->self_cor;
    47 }
    48 
    49 static inline monitor_desc* get_monitor(thread_desc * this) {
    50         return &this->self_mon;
    51 }
    52 
     46//-----------------------------------------------------------------------------
     47// forward declarations needed for threads
    5348extern struct cluster * mainCluster;
    5449
     
    5853//-----------------------------------------------------------------------------
    5954// Ctors and dtors
    60 void ?{}(thread_desc & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );
    61 void ^?{}(thread_desc & this);
     55void ?{}($thread & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );
     56void ^?{}($thread & this);
    6257
    63 static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }
    64 static inline void ?{}(thread_desc & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }
    65 static inline void ?{}(thread_desc & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
    66 static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, 0p, 65000 }; }
    67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0p, stackSize }; }
    68 static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
    69 static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, 0p, 65000 }; }
    70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, 0p, 65000 }; }
    71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }
     58static inline void ?{}($thread & this)                                                                  { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }
     59static inline void ?{}($thread & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }
     60static inline void ?{}($thread & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
     61static inline void ?{}($thread & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, 0p, 65000 }; }
     62static inline void ?{}($thread & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0p, stackSize }; }
     63static inline void ?{}($thread & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
     64static inline void ?{}($thread & this, const char * const name)                                         { this{ name, *mainCluster, 0p, 65000 }; }
     65static inline void ?{}($thread & this, const char * const name, struct cluster & cl )                   { this{ name, cl, 0p, 65000 }; }
     66static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }
    7267
    7368//-----------------------------------------------------------------------------
     
    8883void ^?{}( scoped(T)& this );
    8984
    90 void yield();
    91 void yield( unsigned times );
     85//-----------------------------------------------------------------------------
     86// Thread getters
     87static inline struct $thread * active_thread () { return TL_GET( this_thread ); }
    9288
    93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); }
     89//-----------------------------------------------------------------------------
     90// Scheduler API
     91
     92//----------
     93// Park thread: block until corresponding call to unpark, won't block if unpark is already called
     94void park( void );
     95
     96//----------
     97// Unpark a thread, if the thread is already blocked, schedule it
     98//                  if the thread is not yet block, signal that it should rerun immediately
     99void unpark( $thread * this );
     100
     101forall( dtype T | is_thread(T) )
     102static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );}
     103
     104//----------
     105// Yield: force thread to block and be rescheduled
     106bool force_yield( enum __Preemption_Reason );
     107
     108static inline void yield() {
     109        force_yield(__MANUAL_PREEMPTION);
     110}
     111
     112// Yield: yield N times
     113static inline void yield( unsigned times ) {
     114        for( times ) {
     115                yield();
     116        }
     117}
    94118
    95119// Local Variables: //
  • libcfa/src/exception.c

    r41efd33 r04e6f93  
    248248}
    249249
    250 #if defined(PIC)
    251 #warning Exceptions not yet supported when using Position-Independent Code
    252 __attribute__((noinline))
    253 void __cfaabi_ehm__try_terminate(void (*try_block)(),
    254                 void (*catch_block)(int index, exception_t * except),
    255                 __attribute__((unused)) int (*match_block)(exception_t * except)) {
    256         abort();
    257 }
    258 #else // PIC
     250#pragma GCC push_options
     251#pragma GCC optimize("O0")
     252
    259253// This is our personality routine. For every stack frame annotated with
    260254// ".cfi_personality 0x3,__gcfa_personality_v0" this function will be called twice when unwinding.
     
    431425
    432426        // Setup the personality routine and exception table.
     427#ifdef __PIC__
     428        asm volatile (".cfi_personality 0x9b,CFA.ref.__gcfa_personality_v0");
     429        asm volatile (".cfi_lsda 0x1b, .LLSDACFA2");
     430#else
    433431        asm volatile (".cfi_personality 0x3,__gcfa_personality_v0");
    434432        asm volatile (".cfi_lsda 0x3, .LLSDACFA2");
     433#endif
    435434
    436435        // Label which defines the start of the area for which the handler is setup.
     
    464463// have a single call to the try routine.
    465464
     465#ifdef __PIC__
     466#if defined( __i386 ) || defined( __x86_64 )
     467asm (
     468        // HEADER
     469        ".LFECFA1:\n"
     470        "       .globl  __gcfa_personality_v0\n"
     471        "       .section        .gcc_except_table,\"a\",@progbits\n"
     472        // TABLE HEADER (important field is the BODY length at the end)
     473        ".LLSDACFA2:\n"
     474        "       .byte   0xff\n"
     475        "       .byte   0xff\n"
     476        "       .byte   0x1\n"
     477        "       .uleb128 .LLSDACSECFA2-.LLSDACSBCFA2\n"
     478        // BODY (language specific data)
     479        // This uses language specific data and can be modified arbitrarily
     480        // We use handled area offset, handled area length,
     481        // handler landing pad offset and 1 (action code, gcc seems to use 0).
     482        ".LLSDACSBCFA2:\n"
     483        "       .uleb128 .TRYSTART-__cfaabi_ehm__try_terminate\n"
     484        "       .uleb128 .TRYEND-.TRYSTART\n"
     485        "       .uleb128 .CATCH-__cfaabi_ehm__try_terminate\n"
     486        "       .uleb128 1\n"
     487        ".LLSDACSECFA2:\n"
     488        // TABLE FOOTER
     489        "       .text\n"
     490        "       .size   __cfaabi_ehm__try_terminate, .-__cfaabi_ehm__try_terminate\n"
     491);
     492
     493// Somehow this piece of helps with the resolution of debug symbols.
     494__attribute__((unused)) static const int dummy = 0;
     495
     496asm (
     497        // Add a hidden symbol which points at the function.
     498        "       .hidden CFA.ref.__gcfa_personality_v0\n"
     499        "       .weak   CFA.ref.__gcfa_personality_v0\n"
     500        // No clue what this does specifically
     501        "       .section        .data.rel.local.CFA.ref.__gcfa_personality_v0,\"awG\",@progbits,CFA.ref.__gcfa_personality_v0,comdat\n"
     502        "       .align 8\n"
     503        "       .type CFA.ref.__gcfa_personality_v0, @object\n"
     504        "       .size CFA.ref.__gcfa_personality_v0, 8\n"
     505        "CFA.ref.__gcfa_personality_v0:\n"
     506#if defined( __x86_64 )
     507        "       .quad __gcfa_personality_v0\n"
     508#else // then __i386
     509        "   .long __gcfa_personality_v0\n"
     510#endif
     511);
     512#else
     513#error Exception Handling: unknown architecture for position independent code.
     514#endif // __i386 || __x86_64
     515#else // __PIC__
    466516#if defined( __i386 ) || defined( __x86_64 )
    467517asm (
     
    491541        "       .size   __cfaabi_ehm__try_terminate, .-__cfaabi_ehm__try_terminate\n"
    492542        "       .ident  \"GCC: (Ubuntu 6.2.0-3ubuntu11~16.04) 6.2.0 20160901\"\n"
    493 //      "       .section        .note.GNU-stack,\"x\",@progbits\n"
     543        "       .section        .note.GNU-stack,\"x\",@progbits\n"
    494544);
     545#else
     546#error Exception Handling: unknown architecture for position dependent code.
    495547#endif // __i386 || __x86_64
    496 #endif // PIC
     548#endif // __PIC__
     549
     550#pragma GCC pop_options
  • libcfa/src/fstream.hfa

    r41efd33 r04e6f93  
    1010// Created On       : Wed May 27 17:56:53 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Feb  7 19:00:51 2020
    13 // Update Count     : 174
     12// Last Modified On : Mon Feb 17 08:29:23 2020
     13// Update Count     : 175
    1414//
    1515
     
    6767void close( ofstream & );
    6868ofstream & write( ofstream &, const char data[], size_t size );
    69 int fmt( ofstream &, const char format[], ... );
     69int fmt( ofstream &, const char format[], ... ) __attribute__(( format(printf, 2, 3) ));
    7070
    7171void ?{}( ofstream & os );
     
    9797ifstream & read( ifstream & is, char * data, size_t size );
    9898ifstream & ungetc( ifstream & is, char c );
    99 int fmt( ifstream &, const char format[], ... );
     99int fmt( ifstream &, const char format[], ... ) __attribute__(( format(scanf, 2, 3) ));
    100100
    101101void ?{}( ifstream & is );
  • libcfa/src/interpose.cfa

    r41efd33 r04e6f93  
    1010// Created On       : Wed Mar 29 16:10:31 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Feb  8 08:40:34 2020
    13 // Update Count     : 163
     12// Last Modified On : Mon Feb 17 10:18:53 2020
     13// Update Count     : 166
    1414//
    1515
     
    237237        if ( fmt[strlen( fmt ) - 1] != '\n' ) {                         // add optional newline if missing at the end of the format text
    238238                __cfaabi_dbg_write( "\n", 1 );
    239         }
    240 
     239        } // if
    241240        kernel_abort_msg( kernel_data, abort_text, abort_text_size );
    242         __cfaabi_backtrace( signalAbort ? 4 : 3 );
     241
     242        __cfaabi_backtrace( signalAbort ? 4 : 2 );
    243243
    244244        __cabi_libc.abort();                                                            // print stack trace in handler
  • libcfa/src/iostream.cfa

    r41efd33 r04e6f93  
    1010// Created On       : Wed May 27 17:56:53 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Feb  7 18:48:38 2020
    13 // Update Count     : 825
     12// Last Modified On : Thu Feb 20 15:53:23 2020
     13// Update Count     : 829
    1414//
    1515
     
    1919#include <stdio.h>
    2020#include <stdbool.h>                                                                    // true/false
     21#include <stdint.h>                                                                             // UINT64_MAX
    2122//#include <string.h>                                                                   // strlen, strcmp
    2223extern size_t strlen (const char *__s) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
     
    159160                (ostype &)(os | ulli); ends( os );
    160161        } // ?|?
     162
     163#if defined( __SIZEOF_INT128__ )
     164        //      UINT64_MAX 18_446_744_073_709_551_615_ULL
     165        #define P10_UINT64 10_000_000_000_000_000_000_ULL       // 19 zeroes
     166
     167        static void base10_128( ostype & os, unsigned int128 val ) {
     168                if ( val > UINT64_MAX ) {
     169                        base10_128( os, val / P10_UINT64 );                     // recursive
     170                        fmt( os, "%.19lu", (uint64_t)(val % P10_UINT64) );
     171                } else {
     172                        fmt( os, "%lu", (uint64_t)val );
     173                } // if
     174        } // base10_128
     175
     176        static void base10_128( ostype & os, int128 val ) {
     177                if ( val < 0 ) {
     178                        fmt( os, "-" );                                                         // leading negative sign
     179                        val = -val;
     180                } // if
     181                base10_128( os, (unsigned int128)val );                 // print zero/positive value
     182        } // base10_128
     183
     184        ostype & ?|?( ostype & os, int128 llli ) {
     185                if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) );
     186                base10_128( os, llli );
     187                return os;
     188        } // ?|?
     189        void & ?|?( ostype & os, int128 llli ) {
     190                (ostype &)(os | llli); ends( os );
     191        } // ?|?
     192
     193        ostype & ?|?( ostype & os, unsigned int128 ullli ) {
     194                if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) );
     195                base10_128( os, ullli );
     196                return os;
     197        } // ?|?
     198        void & ?|?( ostype & os, unsigned int128 ullli ) {
     199                (ostype &)(os | ullli); ends( os );
     200        } // ?|?
     201#endif // __SIZEOF_INT128__
    161202
    162203        #define PrintWithDP( os, format, val, ... ) \
     
    464505\
    465506                if ( ! f.flags.pc ) {                                                   /* no precision */ \
    466                         /* printf( "%s\n", &fmtstr[star] ); */ \
    467507                        fmtstr[sizeof(IFMTNP)-2] = f.base;                      /* sizeof includes '\0' */ \
     508                        /* printf( "%s %c %c\n", &fmtstr[star], f.base, CODE ); */ \
    468509                        fmt( os, &fmtstr[star], f.wd, f.val ); \
    469510                } else {                                                                                /* precision */ \
    470511                        fmtstr[sizeof(IFMTP)-2] = f.base;                       /* sizeof includes '\0' */ \
    471                         /* printf( "%s\n", &fmtstr[star] ); */ \
     512                        /* printf( "%s %c %c\n", &fmtstr[star], f.base, CODE ); */ \
    472513                        fmt( os, &fmtstr[star], f.wd, f.pc, f.val ); \
    473514                } /* if */ \
     
    487528IntegralFMTImpl( signed long long int, 'd', "%    *ll ", "%    *.*ll " )
    488529IntegralFMTImpl( unsigned long long int, 'u', "%    *ll ", "%    *.*ll " )
     530
     531
     532#if defined( __SIZEOF_INT128__ )
     533// Default prefix for non-decimal prints is 0b, 0, 0x.
     534#define IntegralFMTImpl128( T, SIGNED, CODE, IFMTNP, IFMTP ) \
     535forall( dtype ostype | ostream( ostype ) ) \
     536static void base10_128( ostype & os, _Ostream_Manip(T) fmt ) { \
     537        if ( fmt.val > UINT64_MAX ) { \
     538                fmt.val /= P10_UINT64; \
     539                base10_128( os, fmt ); /* recursive */ \
     540                _Ostream_Manip(unsigned long long int) fmt2 @= { (uint64_t)(fmt.val % P10_UINT64), 0, 19, 'u', { .all : 0 } }; \
     541                fmt2.flags.nobsdp = true; \
     542                printf( "fmt2 %c %lld %d\n", fmt2.base, fmt2.val, fmt2.all );   \
     543                sepOff( os ); \
     544                (ostype &)(os | fmt2); \
     545        } else { \
     546                printf( "fmt %c %lld %d\n", fmt.base, fmt.val, fmt.all ); \
     547                (ostype &)(os | fmt); \
     548        } /* if */ \
     549} /* base10_128 */                                                 \
     550forall( dtype ostype | ostream( ostype ) ) { \
     551        ostype & ?|?( ostype & os, _Ostream_Manip(T) f ) { \
     552                if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); \
     553\
     554                if ( f.base == 'b' | f.base == 'o' | f.base == 'x' | f.base == 'X' ) { \
     555                        unsigned long long int msig = (unsigned long long int)(f.val >> 64); \
     556                        unsigned long long int lsig = (unsigned long long int)(f.val); \
     557                        _Ostream_Manip(SIGNED long long int) fmt @= { msig, f.wd, f.pc, f.base, { .all : f.all } }; \
     558                        _Ostream_Manip(unsigned long long int) fmt2 @= { lsig, 0, 0, f.base, { .all : 0 } }; \
     559                        if ( msig == 0 ) { \
     560                                fmt.val = lsig; \
     561                                (ostype &)(os | fmt); \
     562                        } else { \
     563                                fmt2.flags.pad0 = fmt2.flags.nobsdp = true;     \
     564                                if ( f.base == 'b' ) { \
     565                                        if ( f.wd > 64 ) fmt.wd = f.wd - 64; \
     566                                        fmt2.wd = 64; \
     567                                        (ostype &)(os | fmt | "" | fmt2); \
     568                                } else if ( f.base == 'o' ) { \
     569                                        fmt.val = (unsigned long long int)fmt.val >> 2; \
     570                                        if ( f.wd > 21 ) fmt.wd = f.wd - 21; \
     571                                        fmt2.wd = 1; \
     572                                        fmt2.val = ((msig & 0x3) << 1) + 1; \
     573                                        (ostype &)(os | fmt | "" | fmt2); \
     574                                        sepOff( os ); \
     575                                        fmt2.wd = 21; \
     576                                        fmt2.val = lsig & 0x7fffffffffffffff; \
     577                                        (ostype &)(os | fmt2); \
     578                                } else { \
     579                                        if ( f.flags.left ) { \
     580                                                if ( f.wd > 16 ) fmt2.wd = f.wd - 16;   \
     581                                                fmt.wd = 16;                                                    \
     582                                        } else { \
     583                                                if ( f.wd > 16 ) fmt.wd = f.wd - 16;    \
     584                                                fmt2.wd = 16;                                                   \
     585                                        } /* if */ \
     586                                        (ostype &)(os | fmt | "" | fmt2); \
     587                                } /* if */ \
     588                        } /* if */ \
     589                } else { \
     590                        base10_128( os, f ); \
     591                } /* if */ \
     592                return os; \
     593        } /* ?|? */ \
     594        void ?|?( ostype & os, _Ostream_Manip(T) f ) { (ostype &)(os | f); ends( os ); } \
     595} // distribution
     596
     597IntegralFMTImpl128( int128, signed, 'd', "%    *ll ", "%    *.*ll " )
     598IntegralFMTImpl128( unsigned int128, unsigned, 'u', "%    *ll ", "%    *.*ll " )
     599#endif // __SIZEOF_INT128__
    489600
    490601//*********************************** floating point ***********************************
  • libcfa/src/iostream.hfa

    r41efd33 r04e6f93  
    1010// Created On       : Wed May 27 17:56:53 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Feb  7 17:53:52 2020
    13 // Update Count     : 336
     12// Last Modified On : Thu Feb 20 15:30:56 2020
     13// Update Count     : 337
    1414//
    1515
     
    9898        ostype & ?|?( ostype &, unsigned long long int );
    9999        void ?|?( ostype &, unsigned long long int );
     100#if defined( __SIZEOF_INT128__ )
     101        ostype & ?|?( ostype &, int128 );
     102        void ?|?( ostype &, int128 );
     103        ostype & ?|?( ostype &, unsigned int128 );
     104        void ?|?( ostype &, unsigned int128 );
     105#endif // __SIZEOF_INT128__
    100106
    101107        ostype & ?|?( ostype &, float );
     
    206212IntegralFMTDecl( signed long long int, 'd' )
    207213IntegralFMTDecl( unsigned long long int, 'u' )
     214#if defined( __SIZEOF_INT128__ )
     215IntegralFMTDecl( int128, 'd' )
     216IntegralFMTDecl( unsigned int128, 'u' )
     217#endif
    208218
    209219//*********************************** floating point ***********************************
Note: See TracChangeset for help on using the changeset viewer.