Changeset b2f6113


Ignore:
Timestamp:
Apr 4, 2019, 3:37:55 PM (3 years ago)
Author:
tdelisle <tdelisle@…>
Branches:
arm-eh, cleanup-dtors, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr
Children:
8c01e1b
Parents:
2fabdc02
Message:

Swapped memory storage for context and stack information inside the coroutine implementation

Location:
libcfa/src/concurrency
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/CtxSwitch-i386.S

    r2fabdc02 rb2f6113  
    5353        // Save floating & SSE control words on the stack.
    5454
    55         sub    $8,%esp
    56         stmxcsr 0(%esp)         // 4 bytes
    57         fnstcw  4(%esp)         // 2 bytes
     55        sub    $8,%esp
     56        stmxcsr 0(%esp)         // 4 bytes
     57        fnstcw  4(%esp)         // 2 bytes
    5858
    5959        // Save volatile registers on the stack.
     
    6767        movl %esp,SP_OFFSET(%eax)
    6868        movl %ebp,FP_OFFSET(%eax)
    69 //      movl 4(%ebp),%ebx       // save previous eip for debugger
    70 //      movl %ebx,PC_OFFSET(%eax)
    7169
    7270        // Copy the "to" context argument from the stack to register eax
     
    8987        // Load floating & SSE control words from the stack.
    9088
    91         fldcw   4(%esp)
    92         ldmxcsr 0(%esp)
    93         add    $8,%esp
     89        fldcw   4(%esp)
     90        ldmxcsr 0(%esp)
     91        add    $8,%esp
    9492
    9593        // Return to thread.
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    r2fabdc02 rb2f6113  
    3939#define SP_OFFSET       ( 0 * PTR_BYTE )
    4040#define FP_OFFSET       ( 1 * PTR_BYTE )
    41 #define PC_OFFSET       ( 2 * PTR_BYTE )
    4241
    4342.text
  • libcfa/src/concurrency/coroutine.cfa

    r2fabdc02 rb2f6113  
    3535
    3636extern "C" {
    37       void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
    38       static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    39       static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
    40             abort();
    41       }
     37        void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     38        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
     39        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     40                abort();
     41        }
    4242}
    4343
     
    4747// minimum feasible stack size in bytes
    4848#define MinStackSize 1000
    49 static size_t pageSize = 0;                             // architecture pagesize HACK, should go in proper runtime singleton
     49extern size_t __page_size;                              // architecture pagesize HACK, should go in proper runtime singleton
     50
     51void __stack_prepare( __stack_info_t * this, size_t create_size );
    5052
    5153//-----------------------------------------------------------------------------
    5254// Coroutine ctors and dtors
    53 void ?{}( coStack_t & this, void * storage, size_t storageSize ) with( this ) {
    54       size               = storageSize == 0 ? 65000 : storageSize; // size of stack
    55       this.storage = storage;                                // pointer to stack
    56       limit              = NULL;                                   // stack grows towards stack limit
    57       base               = NULL;                                   // base of stack
    58       context    = NULL;                                   // address of cfa_context_t
    59       top                = NULL;                                   // address of top of storage
    60       userStack  = storage != NULL;
    61 }
    62 
    63 void ^?{}(coStack_t & this) {
    64       if ( ! this.userStack && this.storage ) {
    65             __cfaabi_dbg_debug_do(
    66                   if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) {
    67                         abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
    68                   }
    69             );
    70             free( this.storage );
    71       }
     55void ?{}( __stack_info_t & this, void * storage, size_t storageSize ) {
     56        this.storage   = (__stack_t *)storage;
     57
     58        // Did we get a piece of storage ?
     59        if (this.storage || storageSize != 0) {
     60                // We either got a piece of storage or the user asked for a specific size
     61                // Immediately create the stack
     62                // (This is slightly unintuitive that non-default sized coroutines create are eagerly created
     63                // but it avoids that all coroutines carry an unnecessary size)
     64                verify( storageSize != 0 );
     65                __stack_prepare( &this, storageSize );
     66        }
     67}
     68
     69void ^?{}(__stack_info_t & this) {
     70        if ( ! this.userStack && this.storage ) {
     71                void * storage = (char*)(this.storage) - this.storage->size;
     72                __cfaabi_dbg_debug_do(
     73                        storage = (char*)(storage) - __page_size;
     74                        if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) {
     75                                abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
     76                        }
     77                );
     78                __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage);
     79                free( storage );
     80        }
    7281}
    7382
    7483void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {
    75       (this.stack){storage, storageSize};
    76       this.name = name;
    77       errno_ = 0;
    78       state = Start;
    79       starter = NULL;
    80       last = NULL;
    81       cancellation = NULL;
     84        (this.stack){storage, storageSize};
     85        this.name = name;
     86        state = Start;
     87        starter = NULL;
     88        last = NULL;
     89        cancellation = NULL;
    8290}
    8391
    8492void ^?{}(coroutine_desc& this) {
    85       if(this.state != Halted && this.state != Start) {
    86             coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    87             coroutine_desc * dst = &this;
    88 
    89             struct _Unwind_Exception storage;
    90             storage.exception_class = -1;
    91             storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
    92             this.cancellation = &storage;
    93             this.last = src;
    94 
    95               // not resuming self ?
    96               if ( src == dst ) {
    97                       abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
    98             }
    99 
    100               CoroutineCtxSwitch( src, dst );
    101       }
     93        if(this.state != Halted && this.state != Start) {
     94                coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     95                coroutine_desc * dst = &this;
     96
     97                struct _Unwind_Exception storage;
     98                storage.exception_class = -1;
     99                storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
     100                this.cancellation = &storage;
     101                this.last = src;
     102
     103                // not resuming self ?
     104                if ( src == dst ) {
     105                        abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
     106                }
     107
     108                CoroutineCtxSwitch( src, dst );
     109        }
    102110}
    103111
     
    106114forall(dtype T | is_coroutine(T))
    107115void prime(T& cor) {
    108       coroutine_desc* this = get_coroutine(cor);
    109       assert(this->state == Start);
    110 
    111       this->state = Primed;
    112       resume(cor);
     116        coroutine_desc* this = get_coroutine(cor);
     117        assert(this->state == Start);
     118
     119        this->state = Primed;
     120        resume(cor);
    113121}
    114122
    115123// Wrapper for co
    116124void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    117       // Safety note : Preemption must be disabled since there is a race condition
    118       // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times
    119       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    120       disable_interrupts();
    121 
    122       // set state of current coroutine to inactive
    123       src->state = src->state == Halted ? Halted : Inactive;
    124 
    125       // set new coroutine that task is executing
    126       TL_GET( this_thread )->curr_cor = dst;
    127 
    128       // context switch to specified coroutine
    129       assert( src->stack.context );
    130       CtxSwitch( src->stack.context, dst->stack.context );
    131       // when CtxSwitch returns we are back in the src coroutine
    132 
    133       // set state of new coroutine to active
    134       src->state = Active;
    135 
    136       enable_interrupts( __cfaabi_dbg_ctx );
    137       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    138 
    139 
    140       if( unlikely(src->cancellation != NULL) ) {
    141             _CtxCoroutine_Unwind(src->cancellation, src);
    142       }
    143 } //ctxSwitchDirect
    144 
    145 void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) {
    146       //TEMP HACK do this on proper kernel startup
    147       if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
    148 
    149       size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
    150 
    151       if ( !storage ) {
    152             __cfaabi_dbg_print_safe("Kernel : Creating stack of size %zu for stack obj %p\n", cxtSize + size + 8, this);
    153 
    154             userStack = false;
    155             size = libCeiling( storageSize, 16 );
    156             // use malloc/memalign because "new" raises an exception for out-of-memory
    157 
    158             // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
    159             __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) );
    160             __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) );
    161 
    162             __cfaabi_dbg_debug_do(
    163                   if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) {
    164                         abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) );
    165                   } // if
    166             );
    167 
    168             if ( (intptr_t)storage == 0 ) {
    169                   abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size );
    170             } // if
    171 
    172             __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize );
    173             __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment
    174 
    175       } else {
    176             __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%u bytes)\n", this, storage, storageSize);
    177 
    178             assertf( ((size_t)storage & (libAlign() - 1)) == 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() );
    179             userStack = true;
    180             size = storageSize - cxtSize;
    181 
    182             if ( size % 16 != 0u ) size -= 8;
    183 
    184             limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment
    185       } // if
    186       assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
    187 
    188       base = (char *)limit + size;
    189       context = base;
    190       top = (char *)context + cxtSize;
     125        // Safety note : Preemption must be disabled since there is a race condition
     126        // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times
     127        verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     128        disable_interrupts();
     129
     130        // set state of current coroutine to inactive
     131        src->state = src->state == Halted ? Halted : Inactive;
     132
     133        // set new coroutine that task is executing
     134        TL_GET( this_thread )->curr_cor = dst;
     135
     136        // context switch to specified coroutine
     137        CtxSwitch( &src->context, &dst->context );
     138        // when CtxSwitch returns we are back in the src coroutine
     139
     140        // set state of new coroutine to active
     141        src->state = Active;
     142
     143        enable_interrupts( __cfaabi_dbg_ctx );
     144        verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     145
     146        if( unlikely(src->cancellation != NULL) ) {
     147                _CtxCoroutine_Unwind(src->cancellation, src);
     148        }
     149}
     150
     151[void *, size_t] __stack_alloc( size_t storageSize ) {
     152        static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
     153        assert(__page_size != 0l);
     154        size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
     155
     156        // If we are running debug, we also need to allocate a guardpage to catch stack overflows.
     157        void * storage;
     158        __cfaabi_dbg_debug_do(
     159                storage = memalign( __page_size, size + __page_size );
     160        );
     161        __cfaabi_dbg_no_debug_do(
     162                storage = (void*)malloc(size);
     163        );
     164
     165        __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size);
     166        __cfaabi_dbg_debug_do(
     167                if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
     168                        abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );
     169                }
     170                storage = (void *)(((intptr_t)storage) + __page_size);
     171        );
     172
     173        verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul );
     174        return [storage, size];
     175}
     176
     177void __stack_prepare( __stack_info_t * this, size_t create_size ) {
     178        static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
     179        bool userStack;
     180        void * storage;
     181        size_t size;
     182        if ( !this->storage ) {
     183                userStack = false;
     184                [storage, size] = __stack_alloc( create_size );
     185        } else {
     186                userStack = true;
     187                __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%zu bytes)\n", this, this->storage, this->storage->size);
     188
     189                // The stack must be aligned, advance the pointer to the next align data
     190                storage = (void*)libCeiling( (intptr_t)this->storage, libAlign());
     191
     192                // The size needs to be shrinked to fit all the extra data structure and be aligned
     193                ptrdiff_t diff = (intptr_t)storage - (intptr_t)this->storage;
     194                size = libFloor(create_size - stack_data_size - diff, libAlign());
     195        } // if
     196        assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
     197
     198        this->storage = (__stack_t *)((intptr_t)storage + size);
     199        this->storage->size  = size;
     200        this->storage->limit = storage;
     201        this->storage->base  = (void*)((intptr_t)storage + size);
     202        this->userStack = userStack;
    191203}
    192204
     
    194206// is not inline (We can't inline Cforall in C)
    195207extern "C" {
    196       void __suspend_internal(void) {
    197             suspend();
    198       }
    199 
    200       void __leave_coroutine( coroutine_desc * src ) {
    201             coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
    202 
    203             src->state = Halted;
    204 
    205             assertf( starter != 0,
    206                   "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
    207                   "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
    208                   src->name, src );
    209             assertf( starter->state != Halted,
    210                   "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
    211                   "Possible cause is terminated coroutine's main routine has already returned.",
    212                   src->name, src, starter->name, starter );
    213 
    214             CoroutineCtxSwitch( src, starter );
    215       }
     208        void __suspend_internal(void) {
     209                suspend();
     210        }
     211
     212        void __leave_coroutine( coroutine_desc * src ) {
     213                coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
     214
     215                src->state = Halted;
     216
     217                assertf( starter != 0,
     218                        "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
     219                        "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
     220                        src->name, src );
     221                assertf( starter->state != Halted,
     222                        "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
     223                        "Possible cause is terminated coroutine's main routine has already returned.",
     224                        src->name, src, starter->name, starter );
     225
     226                CoroutineCtxSwitch( src, starter );
     227        }
    216228}
    217229
  • libcfa/src/concurrency/coroutine.hfa

    r2fabdc02 rb2f6113  
    6868// Private wrappers for context switch and stack creation
    6969extern void CoroutineCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
    70 extern void create_stack( coStack_t * this, unsigned int storageSize );
     70extern void __stack_prepare   ( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
    7171
    7272// Suspend implementation inlined for performance
     
    102102        coroutine_desc * dst = get_coroutine(cor);
    103103
    104         if( unlikely(!dst->stack.base) ) {
    105                 create_stack(&dst->stack, dst->stack.size);
     104        if( unlikely(!dst->stack.storage || !dst->stack.storage->base) ) {
     105                __stack_prepare(&dst->stack, 65000);
    106106                CtxStart(&cor, CtxInvokeCoroutine);
    107107        }
  • libcfa/src/concurrency/invoke.c

    r2fabdc02 rb2f6113  
    122122        void (*invoke)(void *)
    123123) {
    124         struct coStack_t* stack = &get_coroutine( this )->stack;
     124        struct coroutine_desc * cor = get_coroutine( this );
     125        struct __stack_t * stack = cor->stack.storage;
    125126
    126127#if defined( __i386 )
     
    136137        };
    137138
    138         ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
    139         ((struct machine_context_t *)stack->context)->FP = NULL;                // terminate stack with NULL fp
     139        cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
     140        cor->context.FP = NULL;         // terminate stack with NULL fp
    140141
    141         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;
    142         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->argument[0] = this;     // argument to invoke
    143         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke;
    144         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    145         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
     142        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
     143
     144        fs->dummyReturn = NULL;
     145        fs->argument[0] = this;     // argument to invoke
     146        fs->rturn = invoke;
     147        fs->mxcr = 0x1F80; //Vol. 2A 3-520
     148        fs->fcw = 0x037F;  //Vol. 1 8-7
    146149
    147150#elif defined( __x86_64 )
     
    155158        };
    156159
    157         ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
    158         ((struct machine_context_t *)stack->context)->FP = NULL;                // terminate stack with NULL fp
     160        cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
     161        cor->context.FP = NULL;         // terminate stack with NULL fp
    159162
    160         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;
    161         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = CtxInvokeStub;
    162         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[0] = this;
    163         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke;
    164         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    165         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
     163        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
     164
     165        fs->dummyReturn = NULL;
     166        fs->rturn = CtxInvokeStub;
     167        fs->fixedRegisters[0] = this;
     168        fs->fixedRegisters[1] = invoke;
     169        fs->mxcr = 0x1F80; //Vol. 2A 3-520
     170        fs->fcw = 0x037F;  //Vol. 1 8-7
    166171
    167172#elif defined( __ARM_ARCH )
     
    173178        };
    174179
    175         ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
    176         ((struct machine_context_t *)stack->context)->FP = NULL;
     180        cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
     181        cor->context.FP = NULL;
    177182
    178         struct FakeStack *fs = (struct FakeStack *)((struct machine_context_t *)stack->context)->SP;
     183        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
    179184
    180185        fs->intRegs[8] = CtxInvokeStub;
  • libcfa/src/concurrency/invoke.h

    r2fabdc02 rb2f6113  
    6262        #endif
    6363
    64         struct coStack_t {
    65                 size_t size;                                                                    // size of stack
    66                 void * storage;                                                                 // pointer to stack
    67                 void * limit;                                                                   // stack grows towards stack limit
    68                 void * base;                                                                    // base of stack
    69                 void * context;                                                                 // address of cfa_context_t
    70                 void * top;                                                                             // address of top of storage
    71                 bool userStack;                                                                 // whether or not the user allocated the stack
     64        struct __stack_context_t {
     65                void * SP;
     66
     67                void * FP;
     68                // copy of global UNIX variable errno
     69                int errno_;
     70        };
     71
     72        // low adresses  :           +----------------------+ <- start of allocation
     73        //                           |  optional guard page |
     74        //                           +----------------------+ <- __stack_t.limit
     75        //                           |                      |
     76        //                           |       /\ /\ /\       |
     77        //                           |       || || ||       |
     78        //                           |                      |
     79        //                           |    program  stack    |
     80        //                           |                      |
     81        // __stack_info_t.storage -> +----------------------+ <- __stack_t.base
     82        //                           |      __stack_t       |
     83        // high adresses :           +----------------------+ <- end of allocation
     84
     85        struct __stack_t {
     86                // size of stack
     87                size_t size;
     88
     89                // stack grows towards stack limit
     90                void * limit;
     91
     92                // base of stack
     93                void * base;
     94        };
     95
     96        struct __stack_info_t {
     97                // pointer to stack
     98                struct __stack_t * storage;
     99
     100                // whether or not the user allocated the stack
     101                bool userStack;
    72102        };
    73103
     
    75105
    76106        struct coroutine_desc {
     107                // context that is switch during a CtxSwitch
     108                struct __stack_context_t context;
     109
    77110                // stack information of the coroutine
    78                 struct coStack_t stack;
     111                struct __stack_info_t stack;
    79112
    80113                // textual name for coroutine/task, initialized by uC++ generated code
    81114                const char * name;
    82115
    83                 // copy of global UNIX variable errno
    84                 int errno_;
    85 
    86116                // current execution status for coroutine
    87117                enum coroutine_state state;
     118
    88119                // first coroutine to resume this one
    89120                struct coroutine_desc * starter;
     
    230261        // assembler routines that performs the context switch
    231262        extern void CtxInvokeStub( void );
    232         void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
     263        void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
    233264        // void CtxStore ( void * this ) asm ("CtxStore");
    234265        // void CtxRet   ( void * dst  ) asm ("CtxRet");
  • libcfa/src/concurrency/kernel.cfa

    r2fabdc02 rb2f6113  
    4242//-----------------------------------------------------------------------------
    4343// Kernel storage
    44 KERNEL_STORAGE(cluster,           mainCluster);
    45 KERNEL_STORAGE(processor,         mainProcessor);
    46 KERNEL_STORAGE(thread_desc,       mainThread);
    47 KERNEL_STORAGE(machine_context_t, mainThreadCtx);
     44KERNEL_STORAGE(cluster,         mainCluster);
     45KERNEL_STORAGE(processor,       mainProcessor);
     46KERNEL_STORAGE(thread_desc,     mainThread);
     47KERNEL_STORAGE(__stack_t,       mainThreadCtx);
    4848
    4949cluster     * mainCluster;
     
    5454struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    5555}
     56
     57size_t __page_size = 0;
    5658
    5759//-----------------------------------------------------------------------------
     
    6668// Struct to steal stack
    6769struct current_stack_info_t {
    68         machine_context_t ctx;
     70        __stack_t * storage;            // pointer to stack object
    6971        unsigned int size;              // size of stack
    7072        void *base;                             // base of stack
    71         void *storage;                  // pointer to stack
    7273        void *limit;                    // stack grows towards stack limit
    7374        void *context;                  // address of cfa_context_t
    74         void *top;                              // address of top of storage
    7575};
    7676
    7777void ?{}( current_stack_info_t & this ) {
    78         CtxGet( this.ctx );
    79         this.base = this.ctx.FP;
    80         this.storage = this.ctx.SP;
     78        __stack_context_t ctx;
     79        CtxGet( ctx );
     80        this.base = ctx.FP;
    8181
    8282        rlimit r;
     
    8686        this.limit = (void *)(((intptr_t)this.base) - this.size);
    8787        this.context = &storage_mainThreadCtx;
    88         this.top = this.base;
    8988}
    9089
    9190//-----------------------------------------------------------------------------
    9291// Main thread construction
    93 void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
    94         size      = info->size;
    95         storage   = info->storage;
    96         limit     = info->limit;
    97         base      = info->base;
    98         context   = info->context;
    99         top       = info->top;
    100         userStack = true;
    101 }
    10292
    10393void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
    104         stack{ info };
     94        context.errno_ = 0;
     95        stack.storage = info->storage;
     96        stack.userStack = true;
     97        with(*stack.storage) {
     98                size      = info->size;
     99                limit     = info->limit;
     100                base      = info->base;
     101        }
    105102        name = "Main Thread";
    106         errno_ = 0;
    107103        state = Start;
    108104        starter = NULL;
     105        last = NULL;
     106        cancellation = NULL;
    109107}
    110108
     
    312310        // to waste the perfectly valid stack create by pthread.
    313311        current_stack_info_t info;
    314         machine_context_t ctx;
    315         info.context = &ctx;
     312        __stack_t ctx;
     313        info.storage = &ctx;
    316314        (proc->runner){ proc, &info };
    317315
    318         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.base);
     316        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
    319317
    320318        //Set global state
     
    353351        verify( ! kernelTLS.preemption_state.enabled );
    354352
    355         create_stack(&dst->stack, dst->stack.size);
     353        __stack_prepare( &dst->stack, 65000 );
    356354        CtxStart(&this->runner, CtxInvokeCoroutine);
    357355
     
    372370
    373371        // context switch to specified coroutine
    374         assert( src->stack.context );
    375         CtxSwitch( src->stack.context, dst->stack.context );
     372        CtxSwitch( &src->context, &dst->context );
    376373        // when CtxSwitch returns we are back in the src coroutine
    377374
     
    545542        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    546543
     544        __page_size = sysconf( _SC_PAGESIZE );
     545
    547546        __cfa_dbg_global_clusters.list{ __get };
    548547        __cfa_dbg_global_clusters.lock{};
     
    559558        mainThread = (thread_desc *)&storage_mainThread;
    560559        current_stack_info_t info;
     560        info.storage = (__stack_t*)&storage_mainThreadCtx;
    561561        (*mainThread){ &info };
    562562
  • libcfa/src/concurrency/thread.cfa

    r2fabdc02 rb2f6113  
    8080
    8181        disable_interrupts();
    82         create_stack(&thrd_c->stack, thrd_c->stack.size);
     82        assert( thrd_c->stack.storage );
    8383        CtxStart(&this, CtxInvokeThread);
    84         assert( thrd_c->last->stack.context );
    85         CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context );
     84        CtxSwitch( &thrd_c->last->context, &thrd_c->context );
    8685
    8786        ScheduleThread(thrd_h);
     
    118117        // set new coroutine that the processor is executing
    119118        // and context switch to it
    120         assert( src->stack.context );
    121         CtxSwitch( src->stack.context, dst->stack.context );
     119        CtxSwitch( &src->context, &dst->context );
    122120
    123121        // set state of new coroutine to active
  • libcfa/src/concurrency/thread.hfa

    r2fabdc02 rb2f6113  
    6161void ^?{}(thread_desc & this);
    6262
    63 static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 0 }; }
     63static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }
    6464static inline void ?{}(thread_desc & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }
    6565static inline void ?{}(thread_desc & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
    66 static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 0 }; }
    67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0, stackSize }; }
     66static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 65000 }; }
     67static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, NULL, stackSize }; }
    6868static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
    69 static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 0 }; }
    70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 0 }; }
     69static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 65000 }; }
     70static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 65000 }; }
    7171static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }
    7272
Note: See TracChangeset for help on using the changeset viewer.