Changeset c7a900a


Ignore:
Timestamp:
Feb 21, 2020, 5:31:19 PM (20 months ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
arm-eh, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr
Children:
a8078ee
Parents:
a505021
Message:

More renames and clean-ups

Location:
libcfa/src/concurrency
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/CtxSwitch-arm.S

    ra505021 rc7a900a  
    1313        .text
    1414        .align  2
    15         .global CtxSwitch
    16         .type   CtxSwitch, %function
     15        .global __cfactx_switch
     16        .type   __cfactx_switch, %function
    1717
    18 CtxSwitch:
     18__cfactx_switch:
    1919        @ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification)
    2020        @ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved
     
    5252        mov r15, r14
    5353        #endif // R9_SPECIAL
    54        
     54
    5555        .text
    5656        .align  2
    57         .global CtxInvokeStub
    58         .type   CtxInvokeStub, %function
     57        .global __cfactx_invoke_stub
     58        .type   __cfactx_invoke_stub, %function
    5959
    60 CtxInvokeStub:
     60__cfactx_invoke_stub:
    6161        ldmfd r13!, {r0-r1}
    6262        mov r15, r1
  • libcfa/src/concurrency/CtxSwitch-i386.S

    ra505021 rc7a900a  
    4343        .text
    4444        .align 2
    45         .globl CtxSwitch
    46         .type  CtxSwitch, @function
    47 CtxSwitch:
     45        .globl __cfactx_switch
     46        .type  __cfactx_switch, @function
     47__cfactx_switch:
    4848
    4949        // Copy the "from" context argument from the stack to register eax
     
    8383
    8484        ret
    85         .size  CtxSwitch, .-CtxSwitch
     85        .size  __cfactx_switch, .-__cfactx_switch
    8686
    8787// Local Variables: //
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    ra505021 rc7a900a  
    4444        .text
    4545        .align 2
    46         .globl CtxSwitch
    47         .type  CtxSwitch, @function
    48 CtxSwitch:
     46        .globl __cfactx_switch
     47        .type  __cfactx_switch, @function
     48__cfactx_switch:
    4949
    5050        // Save volatile registers on the stack.
     
    7777
    7878        ret
    79         .size  CtxSwitch, .-CtxSwitch
     79        .size  __cfactx_switch, .-__cfactx_switch
    8080
    8181//-----------------------------------------------------------------------------
     
    8383        .text
    8484        .align 2
    85         .globl CtxInvokeStub
    86         .type    CtxInvokeStub, @function
    87 CtxInvokeStub:
     85        .globl __cfactx_invoke_stub
     86        .type    __cfactx_invoke_stub, @function
     87__cfactx_invoke_stub:
    8888        movq %rbx, %rdi
    8989        movq %r12, %rsi
    9090        jmp *%r13
    91         .size  CtxInvokeStub, .-CtxInvokeStub
     91        .size  __cfactx_invoke_stub, .-__cfactx_invoke_stub
    9292
    9393// Local Variables: //
  • libcfa/src/concurrency/coroutine.cfa

    ra505021 rc7a900a  
    187187// is not inline (We can't inline Cforall in C)
    188188extern "C" {
    189         void __leave_coroutine( struct coroutine_desc * src ) {
     189        void __cfactx_cor_leave( struct coroutine_desc * src ) {
    190190                coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
    191191
     
    204204        }
    205205
    206         struct coroutine_desc * __finish_coroutine(void) {
     206        struct coroutine_desc * __cfactx_cor_finish(void) {
    207207                struct coroutine_desc * cor = kernelTLS.this_thread->curr_cor;
    208208
  • libcfa/src/concurrency/coroutine.hfa

    ra505021 rc7a900a  
    6161// Start coroutine routines
    6262extern "C" {
    63         void CtxInvokeCoroutine(void (*main)(void *), void * this);
     63        void __cfactx_invoke_coroutine(void (*main)(void *), void * this);
    6464
    6565        forall(dtype T)
    66         void CtxStart(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *));
     66        void __cfactx_start(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *));
    6767
    68         extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     68        extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
    6969
    70         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
     70        extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
    7171}
    7272
     
    8282        // context switch to specified coroutine
    8383        verify( dst->context.SP );
    84         CtxSwitch( &src->context, &dst->context );
    85         // when CtxSwitch returns we are back in the src coroutine
     84        __cfactx_switch( &src->context, &dst->context );
     85        // when __cfactx_switch returns we are back in the src coroutine
    8686
    8787        // set state of new coroutine to active
     
    8989
    9090        if( unlikely(src->cancellation != 0p) ) {
    91                 _CtxCoroutine_Unwind(src->cancellation, src);
     91                __cfactx_coroutine_unwind(src->cancellation, src);
    9292        }
    9393}
     
    130130                TL_GET( this_thread )->curr_cor = dst;
    131131                __stack_prepare(&dst->stack, 65000);
    132                 CtxStart(main, dst, cor, CtxInvokeCoroutine);
     132                __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine);
    133133                TL_GET( this_thread )->curr_cor = src;
    134134        }
  • libcfa/src/concurrency/invoke.c

    ra505021 rc7a900a  
    2929// Called from the kernel when starting a coroutine or task so must switch back to user mode.
    3030
    31 extern void __leave_coroutine ( struct coroutine_desc * );
    32 extern struct coroutine_desc * __finish_coroutine(void);
    33 extern void __leave_thread_monitor();
     31extern struct coroutine_desc * __cfactx_cor_finish(void);
     32extern void __cfactx_cor_leave ( struct coroutine_desc * );
     33extern void __cfactx_thrd_leave();
     34
    3435extern void disable_interrupts() OPTIONAL_THREAD;
    3536extern void enable_interrupts( __cfaabi_dbg_ctx_param );
    3637
    37 void CtxInvokeCoroutine(
     38void __cfactx_invoke_coroutine(
    3839        void (*main)(void *),
    3940        void *this
    4041) {
    4142        // Finish setting up the coroutine by setting its state
    42         struct coroutine_desc * cor = __finish_coroutine();
     43        struct coroutine_desc * cor = __cfactx_cor_finish();
    4344
    4445        // Call the main of the coroutine
     
    4647
    4748        //Final suspend, should never return
    48         __leave_coroutine( cor );
     49        __cfactx_cor_leave( cor );
    4950        __cabi_abort( "Resumed dead coroutine" );
    5051}
    5152
    52 static _Unwind_Reason_Code _CtxCoroutine_UnwindStop(
     53static _Unwind_Reason_Code __cfactx_coroutine_unwindstop(
    5354        __attribute((__unused__)) int version,
    5455        _Unwind_Action actions,
     
    6162                // We finished unwinding the coroutine,
    6263                // leave it
    63                 __leave_coroutine( param );
     64                __cfactx_cor_leave( param );
    6465                __cabi_abort( "Resumed dead coroutine" );
    6566        }
     
    6970}
    7071
    71 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
    72 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
    73         _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _CtxCoroutine_UnwindStop, cor );
     72void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));
     73void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {
     74        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
    7475        printf("UNWIND ERROR %d after force unwind\n", ret);
    7576        abort();
    7677}
    7778
    78 void CtxInvokeThread(
     79void __cfactx_invoke_thread(
    7980        void (*main)(void *),
    8081        void *this
     
    9394        // The order of these 4 operations is very important
    9495        //Final suspend, should never return
    95         __leave_thread_monitor();
     96        __cfactx_thrd_leave();
    9697        __cabi_abort( "Resumed dead thread" );
    9798}
    9899
    99 void CtxStart(
     100void __cfactx_start(
    100101        void (*main)(void *),
    101102        struct coroutine_desc * cor,
     
    139140
    140141        fs->dummyReturn = NULL;
    141         fs->rturn = CtxInvokeStub;
     142        fs->rturn = __cfactx_invoke_stub;
    142143        fs->fixedRegisters[0] = main;
    143144        fs->fixedRegisters[1] = this;
     
    157158        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
    158159
    159         fs->intRegs[8] = CtxInvokeStub;
     160        fs->intRegs[8] = __cfactx_invoke_stub;
    160161        fs->arg[0] = this;
    161162        fs->arg[1] = invoke;
  • libcfa/src/concurrency/invoke.h

    ra505021 rc7a900a  
    9696
    9797        struct coroutine_desc {
    98                 // context that is switch during a CtxSwitch
     98                // context that is switch during a __cfactx_switch
    9999                struct __stack_context_t context;
    100100
     
    161161        struct thread_desc {
    162162                // Core threading fields
    163                 // context that is switch during a CtxSwitch
     163                // context that is switch during a __cfactx_switch
    164164                struct __stack_context_t context;
    165165
     
    204204                }
    205205
    206                 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) /*__attribute__((const))*/ {
     206                static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) __attribute__((const)) {
    207207                        return this.node.[next, prev];
    208208                }
     
    254254
    255255        // assembler routines that performs the context switch
    256         extern void CtxInvokeStub( void );
    257         extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
     256        extern void __cfactx_invoke_stub( void );
     257        extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
    258258        // void CtxStore ( void * this ) asm ("CtxStore");
    259259        // void CtxRet   ( void * dst  ) asm ("CtxRet");
  • libcfa/src/concurrency/kernel.cfa

    ra505021 rc7a900a  
    208208}
    209209
    210 static void * CtxInvokeProcessor(void * arg);
     210static void * __invoke_processor(void * arg);
    211211
    212212void ?{}(processor & this, const char name[], cluster & cltr) with( this ) {
     
    224224        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
    225225
    226         this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this );
     226        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
    227227
    228228        __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
     
    347347                // set context switch to the thread that the processor is executing
    348348                verify( thrd_dst->context.SP );
    349                 CtxSwitch( &proc_cor->context, &thrd_dst->context );
    350                 // when CtxSwitch returns we are back in the processor coroutine
     349                __cfactx_switch( &proc_cor->context, &thrd_dst->context );
     350                // when __cfactx_switch returns we are back in the processor coroutine
    351351
    352352                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    410410                #endif
    411411                verify( proc_cor->context.SP );
    412                 CtxSwitch( &thrd_src->context, &proc_cor->context );
     412                __cfactx_switch( &thrd_src->context, &proc_cor->context );
    413413                #if defined( __i386 ) || defined( __x86_64 )
    414414                        __x87_load;
     
    424424// This is the entry point for processors (kernel threads)
    425425// It effectively constructs a coroutine by stealing the pthread stack
    426 static void * CtxInvokeProcessor(void * arg) {
     426static void * __invoke_processor(void * arg) {
    427427        processor * proc = (processor *) arg;
    428428        kernelTLS.this_processor = proc;
     
    502502        kernelTLS.this_thread->curr_cor = dst;
    503503        __stack_prepare( &dst->stack, 65000 );
    504         CtxStart(main, dst, this->runner, CtxInvokeCoroutine);
     504        __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
    505505
    506506        verify( ! kernelTLS.preemption_state.enabled );
     
    514514        // context switch to specified coroutine
    515515        verify( dst->context.SP );
    516         CtxSwitch( &src->context, &dst->context );
    517         // when CtxSwitch returns we are back in the src coroutine
     516        __cfactx_switch( &src->context, &dst->context );
     517        // when __cfactx_switch returns we are back in the src coroutine
    518518
    519519        mainThread->curr_cor = &mainThread->self_cor;
     
    535535
    536536        // context switch to the processor
    537         CtxSwitch( &src->context, &dst->context );
     537        __cfactx_switch( &src->context, &dst->context );
    538538}
    539539
     
    729729
    730730        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    731         // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
     731        // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
    732732        // mainThread is on the ready queue when this call is made.
    733733        __kernel_first_resume( kernelTLS.this_processor );
  • libcfa/src/concurrency/kernel.hfa

    ra505021 rc7a900a  
    108108static inline void  ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
    109109
    110 static inline [processor *&, processor *& ] __get( processor & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; }
     110static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; }
    111111
    112112//-----------------------------------------------------------------------------
     
    151151static inline void ?{} (cluster & this, const char name[])        { this{name, default_preemption()}; }
    152152
    153 static inline [cluster *&, cluster *& ] __get( cluster & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; }
     153static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
    154154
    155155static inline struct processor * active_processor() __attribute__((const)) { return TL_GET( this_processor ); } // UNSAFE
  • libcfa/src/concurrency/kernel_private.hfa

    ra505021 rc7a900a  
    7171// Threads
    7272extern "C" {
    73       void CtxInvokeThread(void (*main)(void *), void * this);
     73      void __cfactx_invoke_thread(void (*main)(void *), void * this);
    7474}
    7575
  • libcfa/src/concurrency/monitor.cfa

    ra505021 rc7a900a  
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
     29static inline void __set_owner ( monitor_desc * this, thread_desc * owner );
     30static inline void __set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
    3131static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    3232static inline void reset_mask( monitor_desc * this );
     
    8080//-----------------------------------------------------------------------------
    8181// Enter/Leave routines
    82 
    83 
    84 extern "C" {
    85         // Enter single monitor
    86         static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
    87                 // Lock the monitor spinlock
    88                 lock( this->lock __cfaabi_dbg_ctx2 );
    89                 // Interrupts disable inside critical section
    90                 thread_desc * thrd = kernelTLS.this_thread;
    91 
    92                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
    93 
    94                 if( !this->owner ) {
    95                         // No one has the monitor, just take it
    96                         set_owner( this, thrd );
    97 
    98                         __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
    99                 }
    100                 else if( this->owner == thrd) {
    101                         // We already have the monitor, just note how many times we took it
    102                         this->recursion += 1;
    103 
    104                         __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
    105                 }
    106                 else if( is_accepted( this, group) ) {
    107                         // Some one was waiting for us, enter
    108                         set_owner( this, thrd );
    109 
    110                         // Reset mask
    111                         reset_mask( this );
    112 
    113                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
    114                 }
    115                 else {
    116                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    117 
    118                         // Some one else has the monitor, wait in line for it
    119                         /* paranoid */ verify( thrd->next == 0p );
    120                         append( this->entry_queue, thrd );
    121                         /* paranoid */ verify( thrd->next == 1p );
    122 
    123                         unlock( this->lock );
    124                         park();
    125 
    126                         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    127 
    128                         /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    129                         return;
    130                 }
     82// Enter single monitor
     83static void __enter( monitor_desc * this, const __monitor_group_t & group ) {
     84        // Lock the monitor spinlock
     85        lock( this->lock __cfaabi_dbg_ctx2 );
     86        // Interrupts disable inside critical section
     87        thread_desc * thrd = kernelTLS.this_thread;
     88
     89        __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     90
     91        if( !this->owner ) {
     92                // No one has the monitor, just take it
     93                __set_owner( this, thrd );
     94
     95                __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     96        }
     97        else if( this->owner == thrd) {
     98                // We already have the monitor, just note how many times we took it
     99                this->recursion += 1;
     100
     101                __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     102        }
     103        else if( is_accepted( this, group) ) {
     104                // Some one was waiting for us, enter
     105                __set_owner( this, thrd );
     106
     107                // Reset mask
     108                reset_mask( this );
     109
     110                __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
     111        }
     112        else {
     113                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     114
     115                // Some one else has the monitor, wait in line for it
     116                /* paranoid */ verify( thrd->next == 0p );
     117                append( this->entry_queue, thrd );
     118                /* paranoid */ verify( thrd->next == 1p );
     119
     120                unlock( this->lock );
     121                park();
    131122
    132123                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    133124
    134125                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    135                 /* paranoid */ verify( this->lock.lock );
    136 
    137                 // Release the lock and leave
     126                return;
     127        }
     128
     129        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     130
     131        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     132        /* paranoid */ verify( this->lock.lock );
     133
     134        // Release the lock and leave
     135        unlock( this->lock );
     136        return;
     137}
     138
     139static void __dtor_enter( monitor_desc * this, fptr_t func ) {
     140        // Lock the monitor spinlock
     141        lock( this->lock __cfaabi_dbg_ctx2 );
     142        // Interrupts disable inside critical section
     143        thread_desc * thrd = kernelTLS.this_thread;
     144
     145        __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     146
     147
     148        if( !this->owner ) {
     149                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     150
     151                // No one has the monitor, just take it
     152                __set_owner( this, thrd );
     153
     154                verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     155
    138156                unlock( this->lock );
    139157                return;
    140158        }
    141 
    142         static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
    143                 // Lock the monitor spinlock
    144                 lock( this->lock __cfaabi_dbg_ctx2 );
    145                 // Interrupts disable inside critical section
    146                 thread_desc * thrd = kernelTLS.this_thread;
    147 
    148                 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
    149 
    150 
    151                 if( !this->owner ) {
    152                         __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
    153 
    154                         // No one has the monitor, just take it
    155                         set_owner( this, thrd );
    156 
    157                         verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    158 
    159                         unlock( this->lock );
    160                         return;
     159        else if( this->owner == thrd) {
     160                // We already have the monitor... but where about to destroy it so the nesting will fail
     161                // Abort!
     162                abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     163        }
     164
     165        __lock_size_t count = 1;
     166        monitor_desc ** monitors = &this;
     167        __monitor_group_t group = { &this, 1, func };
     168        if( is_accepted( this, group) ) {
     169                __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
     170
     171                // Wake the thread that is waiting for this
     172                __condition_criterion_t * urgent = pop( this->signal_stack );
     173                /* paranoid */ verify( urgent );
     174
     175                // Reset mask
     176                reset_mask( this );
     177
     178                // Create the node specific to this wait operation
     179                wait_ctx_primed( thrd, 0 )
     180
     181                // Some one else has the monitor, wait for him to finish and then run
     182                unlock( this->lock );
     183
     184                // Release the next thread
     185                /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     186                unpark( urgent->owner->waiting_thread );
     187
     188                // Park current thread waiting
     189                park();
     190
     191                // Some one was waiting for us, enter
     192                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     193        }
     194        else {
     195                __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     196
     197                wait_ctx( thrd, 0 )
     198                this->dtor_node = &waiter;
     199
     200                // Some one else has the monitor, wait in line for it
     201                /* paranoid */ verify( thrd->next == 0p );
     202                append( this->entry_queue, thrd );
     203                /* paranoid */ verify( thrd->next == 1p );
     204                unlock( this->lock );
     205
     206                // Park current thread waiting
     207                park();
     208
     209                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     210                return;
     211        }
     212
     213        __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
     214
     215}
     216
     217// Leave single monitor
     218void __leave( monitor_desc * this ) {
     219        // Lock the monitor spinlock
     220        lock( this->lock __cfaabi_dbg_ctx2 );
     221
     222        __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
     223
     224        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     225
     226        // Leaving a recursion level, decrement the counter
     227        this->recursion -= 1;
     228
     229        // If we haven't left the last level of recursion
     230        // it means we don't need to do anything
     231        if( this->recursion != 0) {
     232                __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
     233                unlock( this->lock );
     234                return;
     235        }
     236
     237        // Get the next thread, will be null on low contention monitor
     238        thread_desc * new_owner = next_thread( this );
     239
     240        // Check the new owner is consistent with who we wake-up
     241        // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
     242        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     243
     244        // We can now let other threads in safely
     245        unlock( this->lock );
     246
     247        //We need to wake-up the thread
     248        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     249        unpark( new_owner );
     250}
     251
     252// Leave single monitor for the last time
     253void __dtor_leave( monitor_desc * this ) {
     254        __cfaabi_dbg_debug_do(
     255                if( TL_GET( this_thread ) != this->owner ) {
     256                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    161257                }
    162                 else if( this->owner == thrd) {
    163                         // We already have the monitor... but where about to destroy it so the nesting will fail
    164                         // Abort!
    165                         abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd );
     258                if( this->recursion != 1 ) {
     259                        abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    166260                }
    167 
    168                 __lock_size_t count = 1;
    169                 monitor_desc ** monitors = &this;
    170                 __monitor_group_t group = { &this, 1, func };
    171                 if( is_accepted( this, group) ) {
    172                         __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
    173 
    174                         // Wake the thread that is waiting for this
    175                         __condition_criterion_t * urgent = pop( this->signal_stack );
    176                         /* paranoid */ verify( urgent );
    177 
    178                         // Reset mask
    179                         reset_mask( this );
    180 
    181                         // Create the node specific to this wait operation
    182                         wait_ctx_primed( thrd, 0 )
    183 
    184                         // Some one else has the monitor, wait for him to finish and then run
    185                         unlock( this->lock );
    186 
    187                         // Release the next thread
    188                         /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    189                         unpark( urgent->owner->waiting_thread );
    190 
    191                         // Park current thread waiting
    192                         park();
    193 
    194                         // Some one was waiting for us, enter
    195                         /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    196                 }
    197                 else {
    198                         __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    199 
    200                         wait_ctx( thrd, 0 )
    201                         this->dtor_node = &waiter;
    202 
    203                         // Some one else has the monitor, wait in line for it
    204                         /* paranoid */ verify( thrd->next == 0p );
    205                         append( this->entry_queue, thrd );
    206                         /* paranoid */ verify( thrd->next == 1p );
    207                         unlock( this->lock );
    208 
    209                         // Park current thread waiting
    210                         park();
    211 
    212                         /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    213                         return;
    214                 }
    215 
    216                 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);
    217 
    218         }
    219 
    220         // Leave single monitor
    221         void __leave_monitor_desc( monitor_desc * this ) {
    222                 // Lock the monitor spinlock
    223                 lock( this->lock __cfaabi_dbg_ctx2 );
    224 
    225                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    226 
    227                 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    228 
    229                 // Leaving a recursion level, decrement the counter
    230                 this->recursion -= 1;
    231 
    232                 // If we haven't left the last level of recursion
    233                 // it means we don't need to do anything
    234                 if( this->recursion != 0) {
    235                         __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
    236                         unlock( this->lock );
    237                         return;
    238                 }
    239 
    240                 // Get the next thread, will be null on low contention monitor
    241                 thread_desc * new_owner = next_thread( this );
    242 
    243                 // Check the new owner is consistent with who we wake-up
    244                 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
    245                 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    246 
    247                 // We can now let other threads in safely
    248                 unlock( this->lock );
    249 
    250                 //We need to wake-up the thread
    251                 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    252                 unpark( new_owner );
    253         }
    254 
    255         // Leave single monitor for the last time
    256         void __leave_dtor_monitor_desc( monitor_desc * this ) {
    257                 __cfaabi_dbg_debug_do(
    258                         if( TL_GET( this_thread ) != this->owner ) {
    259                                 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner);
    260                         }
    261                         if( this->recursion != 1 ) {
    262                                 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    263                         }
    264                 )
    265         }
    266 
     261        )
     262}
     263
     264extern "C" {
    267265        // Leave the thread monitor
    268266        // last routine called by a thread.
    269267        // Should never return
    270         void __leave_thread_monitor() {
     268        void __cfactx_thrd_leave() {
    271269                thread_desc * thrd = TL_GET( this_thread );
    272270                monitor_desc * this = &thrd->self_mon;
     
    313311static inline void enter( __monitor_group_t monitors ) {
    314312        for( __lock_size_t i = 0; i < monitors.size; i++) {
    315                 __enter_monitor_desc( monitors[i], monitors );
     313                __enter( monitors[i], monitors );
    316314        }
    317315}
     
    321319static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
    322320        for( __lock_size_t i = count - 1; i >= 0; i--) {
    323                 __leave_monitor_desc( monitors[i] );
     321                __leave( monitors[i] );
    324322        }
    325323}
     
    381379        (thrd->monitors){m, 1, func};
    382380
    383         __enter_monitor_dtor( this.m, func );
     381        __dtor_enter( this.m, func );
    384382}
    385383
     
    387385void ^?{}( monitor_dtor_guard_t & this ) {
    388386        // Leave the monitors in order
    389         __leave_dtor_monitor_desc( this.m );
     387        __dtor_leave( this.m );
    390388
    391389        // Restore thread context
     
    537535        thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    538536        /* paranoid */ verify( signallee->next == 0p );
    539         set_owner( monitors, count, signallee );
     537        __set_owner( monitors, count, signallee );
    540538
    541539        __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
     
    643641
    644642                                // Set the owners to be the next thread
    645                                 set_owner( monitors, count, next );
     643                                __set_owner( monitors, count, next );
    646644
    647645                                // unlock all the monitors
     
    711709// Utilities
    712710
    713 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
     711static inline void __set_owner( monitor_desc * this, thread_desc * owner ) {
    714712        /* paranoid */ verify( this->lock.lock );
    715713
     
    721719}
    722720
    723 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
     721static inline void __set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    724722        /* paranoid */ verify ( monitors[0]->lock.lock );
    725723        /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     
    755753                //we need to set the monitor as in use
    756754                /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    757                 set_owner( this,  urgent->owner->waiting_thread );
     755                __set_owner( this,  urgent->owner->waiting_thread );
    758756
    759757                return check_condition( urgent );
     
    765763        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    766764        /* paranoid */ verify( !new_owner || new_owner->next == 0p );
    767         set_owner( this, new_owner );
     765        __set_owner( this, new_owner );
    768766
    769767        return new_owner;
  • libcfa/src/concurrency/preemption.cfa

    ra505021 rc7a900a  
    184184
    185185        // Enable interrupts by decrementing the counter
    186         // If counter reaches 0, execute any pending CtxSwitch
     186        // If counter reaches 0, execute any pending __cfactx_switch
    187187        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    188188                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
     
    218218
    219219        // Disable interrupts by incrementint the counter
    220         // Don't execute any pending CtxSwitch even if counter reaches 0
     220        // Don't execute any pending __cfactx_switch even if counter reaches 0
    221221        void enable_interrupts_noPoll() {
    222222                unsigned short prev = kernelTLS.preemption_state.disable_count;
     
    272272
    273273// KERNEL ONLY
    274 // Check if a CtxSwitch signal handler shoud defer
     274// Check if a __cfactx_switch signal handler shoud defer
    275275// If true  : preemption is safe
    276276// If false : preemption is unsafe and marked as pending
     
    302302
    303303        // Setup proper signal handlers
    304         __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler
     304        __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler
    305305
    306306        signal_block( SIGALRM );
     
    393393        // Preemption can occur here
    394394
    395         force_yield( __ALARM_PREEMPTION ); // Do the actual CtxSwitch
     395        force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch
    396396}
    397397
  • libcfa/src/concurrency/thread.cfa

    ra505021 rc7a900a  
    5656
    5757        disable_interrupts();
    58         CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);
     58        __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
    5959
    6060        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
Note: See TracChangeset for help on using the changeset viewer.