Ignore:
Timestamp:
Jul 13, 2017, 3:57:04 PM (9 years ago)
Author:
Rob Schluntz <rschlunt@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, stuck-waitfor-destruct, with_gc
Children:
0720e049, 9a1e509
Parents:
55a68c3 (diff), d6ff3ff (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:/u/cforall/software/cfa/cfa-cc

Location:
src/libcfa/concurrency
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/CtxSwitch-i386.S

    r55a68c3 r3d4b23fa  
    9898        ret
    9999
    100 .text
    101         .align 2
    102 .globl  CtxGet
    103 CtxGet:
    104         movl %esp,SP_OFFSET(%eax)
    105         movl %ebp,FP_OFFSET(%eax)
    106 
    107         ret
    108 
    109100// Local Variables: //
    110101// compile-command: "make install" //
  • src/libcfa/concurrency/CtxSwitch-x86_64.S

    r55a68c3 r3d4b23fa  
    1 //                               -*- Mode: Asm -*- 
     1//                               -*- Mode: Asm -*-
    22//
    33// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
     
    1818// Free Software  Foundation; either  version 2.1 of  the License, or  (at your
    1919// option) any later version.
    20 // 
     20//
    2121// This library is distributed in the  hope that it will be useful, but WITHOUT
    2222// ANY  WARRANTY;  without even  the  implied  warranty  of MERCHANTABILITY  or
    2323// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
    2424// for more details.
    25 // 
     25//
    2626// You should  have received a  copy of the  GNU Lesser General  Public License
    2727// along  with this library.
    28 // 
     28//
    2929
    3030// This context switch routine depends on the fact that the stack of a new
     
    9393.globl  CtxInvokeStub
    9494CtxInvokeStub:
    95         movq %rbx, %rdi 
     95        movq %rbx, %rdi
    9696        jmp *%r12
    97 
    98 .text
    99         .align 2
    100 .globl  CtxGet
    101 CtxGet:
    102         movq %rsp,SP_OFFSET(%rdi)
    103         movq %rbp,FP_OFFSET(%rdi)
    104 
    105         ret
    10697
    10798// Local Variables: //
  • src/libcfa/concurrency/alarm.c

    r55a68c3 r3d4b23fa  
    1616
    1717extern "C" {
     18#include <errno.h>
     19#include <stdio.h>
     20#include <string.h>
    1821#include <time.h>
     22#include <unistd.h>
    1923#include <sys/time.h>
    2024}
     25
     26#include "libhdr.h"
    2127
    2228#include "alarm.h"
     
    3137        timespec curr;
    3238        clock_gettime( CLOCK_REALTIME, &curr );
    33         return ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;
     39        __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;
     40        // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time );
     41        return curr_time;
    3442}
    3543
    3644void __kernel_set_timer( __cfa_time_t alarm ) {
     45        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %lu\n", (__cfa_time_t)alarm );
    3746        itimerval val;
    3847        val.it_value.tv_sec = alarm / TIMEGRAN;                 // seconds
     
    7180}
    7281
     82LIB_DEBUG_DO( bool validate( alarm_list_t * this ) {
     83        alarm_node_t ** it = &this->head;
     84        while( (*it) ) {
     85                it = &(*it)->next;
     86        }
     87
     88        return it == this->tail;
     89})
     90
    7391static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) {
    74         assert( !n->next );
     92        verify( !n->next );
    7593        if( p == this->tail ) {
    7694                this->tail = &n->next;
     
    8098        }
    8199        *p = n;
     100
     101        verify( validate( this ) );
    82102}
    83103
     
    89109
    90110        insert_at( this, n, it );
     111
     112        verify( validate( this ) );
    91113}
    92114
     
    100122                head->next = NULL;
    101123        }
     124        verify( validate( this ) );
    102125        return head;
    103126}
     
    105128static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) {
    106129        verify( it );
    107         verify( (*it)->next == n );
     130        verify( (*it) == n );
    108131
    109         (*it)->next = n->next;
     132        (*it) = n->next;
    110133        if( !n-> next ) {
    111134                this->tail = it;
    112135        }
    113136        n->next = NULL;
     137
     138        verify( validate( this ) );
    114139}
    115140
    116141static inline void remove( alarm_list_t * this, alarm_node_t * n ) {
    117142        alarm_node_t ** it = &this->head;
    118         while( (*it) && (*it)->next != n ) {
     143        while( (*it) && (*it) != n ) {
    119144                it = &(*it)->next;
    120145        }
    121146
     147        verify( validate( this ) );
     148
    122149        if( *it ) { remove_at( this, n, it ); }
     150
     151        verify( validate( this ) );
    123152}
    124153
    125154void register_self( alarm_node_t * this ) {
    126155        disable_interrupts();
    127         assert( !systemProcessor->pending_alarm );
    128         lock( &systemProcessor->alarm_lock );
     156        verify( !systemProcessor->pending_alarm );
     157        lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
    129158        {
     159                verify( validate( &systemProcessor->alarms ) );
     160                bool first = !systemProcessor->alarms.head;
     161
    130162                insert( &systemProcessor->alarms, this );
    131163                if( systemProcessor->pending_alarm ) {
    132164                        tick_preemption();
    133165                }
     166                if( first ) {
     167                        __kernel_set_timer( systemProcessor->alarms.head->alarm - __kernel_get_time() );
     168                }
    134169        }
    135170        unlock( &systemProcessor->alarm_lock );
    136171        this->set = true;
    137         enable_interrupts();
     172        enable_interrupts( DEBUG_CTX );
    138173}
    139174
    140175void unregister_self( alarm_node_t * this ) {
     176        // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : unregister %p start\n", this );
    141177        disable_interrupts();
    142         lock( &systemProcessor->alarm_lock );
    143         remove( &systemProcessor->alarms, this );
     178        lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
     179        {
     180                verify( validate( &systemProcessor->alarms ) );
     181                remove( &systemProcessor->alarms, this );
     182        }
    144183        unlock( &systemProcessor->alarm_lock );
    145         disable_interrupts();
     184        enable_interrupts( DEBUG_CTX );
    146185        this->set = false;
     186        // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Kernel : unregister %p end\n", this );
    147187}
  • src/libcfa/concurrency/coroutine

    r55a68c3 r3d4b23fa  
    6363
    6464// Get current coroutine
    65 coroutine_desc * this_coroutine(void);
     65extern volatile thread_local coroutine_desc * this_coroutine;
    6666
    6767// Private wrappers for context switch and stack creation
     
    7171// Suspend implementation inlined for performance
    7272static inline void suspend() {
    73         coroutine_desc * src = this_coroutine();                // optimization
     73        coroutine_desc * src = this_coroutine;          // optimization
    7474
    7575        assertf( src->last != 0,
     
    8888forall(dtype T | is_coroutine(T))
    8989static inline void resume(T * cor) {
    90         coroutine_desc * src = this_coroutine();                // optimization
     90        coroutine_desc * src = this_coroutine;          // optimization
    9191        coroutine_desc * dst = get_coroutine(cor);
    9292
     
    112112
    113113static inline void resume(coroutine_desc * dst) {
    114         coroutine_desc * src = this_coroutine();                // optimization
     114        coroutine_desc * src = this_coroutine;          // optimization
    115115
    116116        // not resuming self ?
  • src/libcfa/concurrency/coroutine.c

    r55a68c3 r3d4b23fa  
    3232#include "invoke.h"
    3333
    34 extern thread_local processor * this_processor;
     34extern volatile thread_local processor * this_processor;
    3535
    3636//-----------------------------------------------------------------------------
     
    4444// Coroutine ctors and dtors
    4545void ?{}(coStack_t* this) {
    46         this->size              = 10240;        // size of stack
     46        this->size              = 65000;        // size of stack
    4747        this->storage   = NULL; // pointer to stack
    4848        this->limit             = NULL; // stack grows towards stack limit
     
    5050        this->context   = NULL; // address of cfa_context_t
    5151        this->top               = NULL; // address of top of storage
    52         this->userStack = false;       
     52        this->userStack = false;
    5353}
    5454
     
    106106
    107107        // set state of current coroutine to inactive
    108         src->state = Inactive;
     108        src->state = src->state == Halted ? Halted : Inactive;
    109109
    110110        // set new coroutine that task is executing
    111         this_processor->current_coroutine = dst;
     111        this_coroutine = dst;
    112112
    113113        // context switch to specified coroutine
     114        assert( src->stack.context );
    114115        CtxSwitch( src->stack.context, dst->stack.context );
    115         // when CtxSwitch returns we are back in the src coroutine             
     116        // when CtxSwitch returns we are back in the src coroutine
    116117
    117118        // set state of new coroutine to active
     
    131132                this->size = libCeiling( storageSize, 16 );
    132133                // use malloc/memalign because "new" raises an exception for out-of-memory
    133                
     134
    134135                // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
    135136                LIB_DEBUG_DO( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) );
  • src/libcfa/concurrency/invoke.c

    r55a68c3 r3d4b23fa  
    2929
    3030extern void __suspend_internal(void);
    31 extern void __leave_monitor_desc( struct monitor_desc * this );
     31extern void __leave_thread_monitor( struct thread_desc * this );
     32extern void disable_interrupts();
     33extern void enable_interrupts( DEBUG_CTX_PARAM );
    3234
    3335void CtxInvokeCoroutine(
    34       void (*main)(void *), 
    35       struct coroutine_desc *(*get_coroutine)(void *), 
     36      void (*main)(void *),
     37      struct coroutine_desc *(*get_coroutine)(void *),
    3638      void *this
    3739) {
     
    5658
    5759void CtxInvokeThread(
    58       void (*dtor)(void *), 
    59       void (*main)(void *), 
    60       struct thread_desc *(*get_thread)(void *), 
     60      void (*dtor)(void *),
     61      void (*main)(void *),
     62      struct thread_desc *(*get_thread)(void *),
    6163      void *this
    6264) {
     65      // First suspend, once the thread arrives here,
     66      // the function pointer to main can be invalidated without risk
    6367      __suspend_internal();
    6468
     69      // Fetch the thread handle from the user defined thread structure
    6570      struct thread_desc* thrd = get_thread( this );
    66       struct coroutine_desc* cor = &thrd->cor;
    67       struct monitor_desc* mon = &thrd->mon;
    68       cor->state = Active;
    6971
    70       // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this);
     72      // Officially start the thread by enabling preemption
     73      enable_interrupts( DEBUG_CTX );
     74
     75      // Call the main of the thread
    7176      main( this );
    7277
    73       __leave_monitor_desc( mon );
    74 
     78      // To exit a thread we must :
     79      // 1 - Mark it as halted
     80      // 2 - Leave its monitor
     81      // 3 - Disable the interupts
     82      // 4 - Final suspend
     83      // The order of these 4 operations is very important
    7584      //Final suspend, should never return
    76       __suspend_internal();
     85      __leave_thread_monitor( thrd );
    7786      abortf("Resumed dead thread");
    7887}
     
    8089
    8190void CtxStart(
    82       void (*main)(void *), 
    83       struct coroutine_desc *(*get_coroutine)(void *), 
    84       void *this, 
     91      void (*main)(void *),
     92      struct coroutine_desc *(*get_coroutine)(void *),
     93      void *this,
    8594      void (*invoke)(void *)
    8695) {
     
    108117        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke;
    109118      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    110       ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7 
     119      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    111120
    112121#elif defined( __x86_64__ )
     
    128137      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke;
    129138      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    130       ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7 
     139      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    131140#else
    132141      #error Only __i386__ and __x86_64__ is supported for threads in cfa
  • src/libcfa/concurrency/invoke.h

    r55a68c3 r3d4b23fa  
    3131      struct spinlock {
    3232            volatile int lock;
     33            #ifdef __CFA_DEBUG__
     34                  const char * prev_name;
     35                  void* prev_thrd;
     36            #endif
    3337      };
    3438
     
    8387            struct __thread_queue_t entry_queue;      // queue of threads that are blocked waiting for the monitor
    8488            struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    85             struct monitor_desc * stack_owner;        // if bulk acquiring was used we need to synchronize signals with an other monitor
    8689            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    8790      };
     
    99102#ifndef _INVOKE_PRIVATE_H_
    100103#define _INVOKE_PRIVATE_H_
    101      
     104
    102105      struct machine_context_t {
    103106            void *SP;
     
    109112      extern void CtxInvokeStub( void );
    110113      void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
    111       void CtxGet( void * this ) asm ("CtxGet");
     114
     115      #if   defined( __x86_64__ )
     116      #define CtxGet( ctx ) __asm__ ( \
     117                  "movq %%rsp,%0\n"   \
     118                  "movq %%rbp,%1\n"   \
     119            : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     120      #elif defined( __i386__ )
     121      #define CtxGet( ctx ) __asm__ ( \
     122                  "movl %%esp,%0\n"   \
     123                  "movl %%ebp,%1\n"   \
     124            : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     125      #endif
    112126
    113127#endif //_INVOKE_PRIVATE_H_
  • src/libcfa/concurrency/kernel

    r55a68c3 r3d4b23fa  
    2828//-----------------------------------------------------------------------------
    2929// Locks
    30 bool try_lock( spinlock * );
    31 void lock( spinlock * );
    32 void unlock( spinlock * );
     30bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );
     31void lock      ( spinlock * DEBUG_CTX_PARAM2 );
     32void lock_yield( spinlock * DEBUG_CTX_PARAM2 );
     33void unlock    ( spinlock * );
    3334
    34 struct signal_once {
    35         volatile bool cond;
    36         struct spinlock lock;
    37         struct __thread_queue_t blocked;
     35struct semaphore {
     36        spinlock lock;
     37        int count;
     38        __thread_queue_t waiting;
    3839};
    3940
    40 void ?{}(signal_once * this);
    41 void ^?{}(signal_once * this);
     41void  ?{}(semaphore * this, int count = 1);
     42void ^?{}(semaphore * this);
     43void P(semaphore * this);
     44void V(semaphore * this);
    4245
    43 void wait( signal_once * );
    44 void signal( signal_once * );
    4546
    4647//-----------------------------------------------------------------------------
     
    6869        unsigned short thrd_count;
    6970};
    70 static inline void ?{}(FinishAction * this) { 
     71static inline void ?{}(FinishAction * this) {
    7172        this->action_code = No_Action;
    7273        this->thrd = NULL;
     
    7879        struct processorCtx_t * runner;
    7980        cluster * cltr;
    80         coroutine_desc * current_coroutine;
    81         thread_desc * current_thread;
    8281        pthread_t kernel_thread;
    83        
    84         signal_once terminated;
     82
     83        semaphore terminated;
    8584        volatile bool is_terminated;
    8685
     
    9089        unsigned int preemption;
    9190
    92         unsigned short disable_preempt_count;
     91        bool pending_preemption;
    9392
    94         bool pending_preemption;
     93        char * last_enable;
    9594};
    9695
  • src/libcfa/concurrency/kernel.c

    r55a68c3 r3d4b23fa  
    1515//
    1616
    17 #include "startup.h"
    18 
    19 //Start and stop routine for the kernel, declared first to make sure they run first
    20 void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    21 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    22 
    23 //Header
    24 #include "kernel_private.h"
     17#include "libhdr.h"
    2518
    2619//C Includes
     
    3528
    3629//CFA Includes
    37 #include "libhdr.h"
     30#include "kernel_private.h"
    3831#include "preemption.h"
     32#include "startup.h"
    3933
    4034//Private includes
     
    4236#include "invoke.h"
    4337
     38//Start and stop routine for the kernel, declared first to make sure they run first
     39void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     40void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     41
    4442//-----------------------------------------------------------------------------
    4543// Kernel storage
    46 #define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)]
     44#define KERNEL_STORAGE(T,X) static char X##Storage[sizeof(T)]
    4745
    4846KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
     
    5048KERNEL_STORAGE(system_proc_t, systemProcessor);
    5149KERNEL_STORAGE(thread_desc, mainThread);
    52 KERNEL_STORAGE(machine_context_t, mainThread_context);
     50KERNEL_STORAGE(machine_context_t, mainThreadCtx);
    5351
    5452cluster * systemCluster;
     
    5957// Global state
    6058
    61 thread_local processor * this_processor;
    62 
    63 coroutine_desc * this_coroutine(void) {
    64         return this_processor->current_coroutine;
    65 }
    66 
    67 thread_desc * this_thread(void) {
    68         return this_processor->current_thread;
    69 }
     59volatile thread_local processor * this_processor;
     60volatile thread_local coroutine_desc * this_coroutine;
     61volatile thread_local thread_desc * this_thread;
     62volatile thread_local bool preemption_in_progress = 0;
     63volatile thread_local unsigned short disable_preempt_count = 1;
    7064
    7165//-----------------------------------------------------------------------------
    7266// Main thread construction
    7367struct current_stack_info_t {
    74         machine_context_t ctx; 
     68        machine_context_t ctx;
    7569        unsigned int size;              // size of stack
    7670        void *base;                             // base of stack
     
    8276
    8377void ?{}( current_stack_info_t * this ) {
    84         CtxGet( &this->ctx );
     78        CtxGet( this->ctx );
    8579        this->base = this->ctx.FP;
    8680        this->storage = this->ctx.SP;
     
    9185
    9286        this->limit = (void *)(((intptr_t)this->base) - this->size);
    93         this->context = &mainThread_context_storage;
     87        this->context = &mainThreadCtxStorage;
    9488        this->top = this->base;
    9589}
     
    106100
    107101void ?{}( coroutine_desc * this, current_stack_info_t * info) {
    108         (&this->stack){ info }; 
     102        (&this->stack){ info };
    109103        this->name = "Main Thread";
    110104        this->errno_ = 0;
     
    136130void ?{}(processor * this, cluster * cltr) {
    137131        this->cltr = cltr;
    138         this->current_coroutine = NULL;
    139         this->current_thread = NULL;
    140         (&this->terminated){};
     132        (&this->terminated){ 0 };
    141133        this->is_terminated = false;
    142134        this->preemption_alarm = NULL;
    143135        this->preemption = default_preemption();
    144         this->disable_preempt_count = 1;                //Start with interrupts disabled
    145136        this->pending_preemption = false;
    146137
     
    150141void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) {
    151142        this->cltr = cltr;
    152         this->current_coroutine = NULL;
    153         this->current_thread = NULL;
    154         (&this->terminated){};
     143        (&this->terminated){ 0 };
    155144        this->is_terminated = false;
    156         this->disable_preempt_count = 0;
     145        this->preemption_alarm = NULL;
     146        this->preemption = default_preemption();
    157147        this->pending_preemption = false;
     148        this->kernel_thread = pthread_self();
    158149
    159150        this->runner = runner;
    160         LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);
     151        LIB_DEBUG_PRINT_SAFE("Kernel : constructing system processor context %p\n", runner);
    161152        runner{ this };
    162153}
     154
     155LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
    163156
    164157void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {
     
    168161
    169162        (&this->proc){ cltr, runner };
     163
     164        verify( validate( &this->alarms ) );
    170165}
    171166
     
    174169                LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this);
    175170                this->is_terminated = true;
    176                 wait( &this->terminated );
     171                P( &this->terminated );
     172                pthread_join( this->kernel_thread, NULL );
    177173        }
    178174}
     
    184180
    185181void ^?{}(cluster * this) {
    186        
     182
    187183}
    188184
     
    203199
    204200                thread_desc * readyThread = NULL;
    205                 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 
     201                for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ )
    206202                {
    207203                        readyThread = nextThread( this->cltr );
     
    209205                        if(readyThread)
    210206                        {
     207                                verify( disable_preempt_count > 0 );
     208
    211209                                runThread(this, readyThread);
     210
     211                                verify( disable_preempt_count > 0 );
    212212
    213213                                //Some actions need to be taken from the kernel
     
    225225        }
    226226
    227         signal( &this->terminated );
     227        V( &this->terminated );
     228
    228229        LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this);
    229230}
    230231
    231 // runThread runs a thread by context switching 
    232 // from the processor coroutine to the target thread 
     232// runThread runs a thread by context switching
     233// from the processor coroutine to the target thread
    233234void runThread(processor * this, thread_desc * dst) {
    234235        coroutine_desc * proc_cor = get_coroutine(this->runner);
    235236        coroutine_desc * thrd_cor = get_coroutine(dst);
    236        
     237
    237238        //Reset the terminating actions here
    238239        this->finish.action_code = No_Action;
    239240
    240241        //Update global state
    241         this->current_thread = dst;
     242        this_thread = dst;
    242243
    243244        // Context Switch to the thread
     
    246247}
    247248
    248 // Once a thread has finished running, some of 
     249// Once a thread has finished running, some of
    249250// its final actions must be executed from the kernel
    250251void finishRunning(processor * this) {
     
    256257        }
    257258        else if( this->finish.action_code == Release_Schedule ) {
    258                 unlock( this->finish.lock );           
     259                unlock( this->finish.lock );
    259260                ScheduleThread( this->finish.thrd );
    260261        }
     
    289290        processor * proc = (processor *) arg;
    290291        this_processor = proc;
     292        this_coroutine = NULL;
     293        this_thread = NULL;
     294        disable_preempt_count = 1;
    291295        // SKULLDUGGERY: We want to create a context for the processor coroutine
    292296        // which is needed for the 2-step context switch. However, there is no reason
    293         // to waste the perfectly valid stack create by pthread. 
     297        // to waste the perfectly valid stack create by pthread.
    294298        current_stack_info_t info;
    295299        machine_context_t ctx;
     
    300304
    301305        //Set global state
    302         proc->current_coroutine = &proc->runner->__cor;
    303         proc->current_thread = NULL;
     306        this_coroutine = &proc->runner->__cor;
     307        this_thread = NULL;
    304308
    305309        //We now have a proper context from which to schedule threads
    306310        LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);
    307311
    308         // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 
    309         // resume it to start it like it normally would, it will just context switch 
    310         // back to here. Instead directly call the main since we already are on the 
     312        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
     313        // resume it to start it like it normally would, it will just context switch
     314        // back to here. Instead directly call the main since we already are on the
    311315        // appropriate stack.
    312316        proc_cor_storage.__cor.state = Active;
     
    315319
    316320        // Main routine of the core returned, the core is now fully terminated
    317         LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 
     321        LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner);
    318322
    319323        return NULL;
     
    322326void start(processor * this) {
    323327        LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this);
    324        
     328
    325329        pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
    326330
    327         LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);       
     331        LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);
    328332}
    329333
     
    331335// Scheduler routines
    332336void ScheduleThread( thread_desc * thrd ) {
    333         if( !thrd ) return;
     337        // if( !thrd ) return;
     338        assert( thrd );
     339        assert( thrd->cor.state != Halted );
     340
     341        verify( disable_preempt_count > 0 );
    334342
    335343        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    336        
    337         lock( &systemProcessor->proc.cltr->lock );
     344
     345        lock( &systemProcessor->proc.cltr->lock DEBUG_CTX2 );
    338346        append( &systemProcessor->proc.cltr->ready_queue, thrd );
    339347        unlock( &systemProcessor->proc.cltr->lock );
     348
     349        verify( disable_preempt_count > 0 );
    340350}
    341351
    342352thread_desc * nextThread(cluster * this) {
    343         lock( &this->lock );
     353        verify( disable_preempt_count > 0 );
     354        lock( &this->lock DEBUG_CTX2 );
    344355        thread_desc * head = pop_head( &this->ready_queue );
    345356        unlock( &this->lock );
     357        verify( disable_preempt_count > 0 );
    346358        return head;
    347359}
    348360
    349 void ScheduleInternal() {
     361void BlockInternal() {
     362        disable_interrupts();
     363        verify( disable_preempt_count > 0 );
    350364        suspend();
    351 }
    352 
    353 void ScheduleInternal( spinlock * lock ) {
     365        verify( disable_preempt_count > 0 );
     366        enable_interrupts( DEBUG_CTX );
     367}
     368
     369void BlockInternal( spinlock * lock ) {
     370        disable_interrupts();
    354371        this_processor->finish.action_code = Release;
    355372        this_processor->finish.lock = lock;
     373
     374        verify( disable_preempt_count > 0 );
    356375        suspend();
    357 }
    358 
    359 void ScheduleInternal( thread_desc * thrd ) {
     376        verify( disable_preempt_count > 0 );
     377
     378        enable_interrupts( DEBUG_CTX );
     379}
     380
     381void BlockInternal( thread_desc * thrd ) {
     382        disable_interrupts();
     383        assert( thrd->cor.state != Halted );
    360384        this_processor->finish.action_code = Schedule;
    361385        this_processor->finish.thrd = thrd;
     386
     387        verify( disable_preempt_count > 0 );
    362388        suspend();
    363 }
    364 
    365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) {
     389        verify( disable_preempt_count > 0 );
     390
     391        enable_interrupts( DEBUG_CTX );
     392}
     393
     394void BlockInternal( spinlock * lock, thread_desc * thrd ) {
     395        disable_interrupts();
    366396        this_processor->finish.action_code = Release_Schedule;
    367397        this_processor->finish.lock = lock;
    368398        this_processor->finish.thrd = thrd;
     399
     400        verify( disable_preempt_count > 0 );
    369401        suspend();
    370 }
    371 
    372 void ScheduleInternal(spinlock ** locks, unsigned short count) {
     402        verify( disable_preempt_count > 0 );
     403
     404        enable_interrupts( DEBUG_CTX );
     405}
     406
     407void BlockInternal(spinlock ** locks, unsigned short count) {
     408        disable_interrupts();
    373409        this_processor->finish.action_code = Release_Multi;
    374410        this_processor->finish.locks = locks;
    375411        this_processor->finish.lock_count = count;
     412
     413        verify( disable_preempt_count > 0 );
    376414        suspend();
    377 }
    378 
    379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
     415        verify( disable_preempt_count > 0 );
     416
     417        enable_interrupts( DEBUG_CTX );
     418}
     419
     420void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
     421        disable_interrupts();
    380422        this_processor->finish.action_code = Release_Multi_Schedule;
    381423        this_processor->finish.locks = locks;
     
    383425        this_processor->finish.thrds = thrds;
    384426        this_processor->finish.thrd_count = thrd_count;
     427
     428        verify( disable_preempt_count > 0 );
     429        suspend();
     430        verify( disable_preempt_count > 0 );
     431
     432        enable_interrupts( DEBUG_CTX );
     433}
     434
     435void LeaveThread(spinlock * lock, thread_desc * thrd) {
     436        verify( disable_preempt_count > 0 );
     437        this_processor->finish.action_code = thrd ? Release_Schedule : Release;
     438        this_processor->finish.lock = lock;
     439        this_processor->finish.thrd = thrd;
     440
    385441        suspend();
    386442}
     
    392448// Kernel boot procedures
    393449void kernel_startup(void) {
    394         LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");   
     450        LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");
    395451
    396452        // Start by initializing the main thread
    397         // SKULLDUGGERY: the mainThread steals the process main thread 
     453        // SKULLDUGGERY: the mainThread steals the process main thread
    398454        // which will then be scheduled by the systemProcessor normally
    399         mainThread = (thread_desc *)&mainThread_storage;
     455        mainThread = (thread_desc *)&mainThreadStorage;
    400456        current_stack_info_t info;
    401457        mainThread{ &info };
     
    403459        LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n");
    404460
     461        // Initialize the system cluster
     462        systemCluster = (cluster *)&systemClusterStorage;
     463        systemCluster{};
     464
     465        LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n");
     466
     467        // Initialize the system processor and the system processor ctx
     468        // (the coroutine that contains the processing control flow)
     469        systemProcessor = (system_proc_t *)&systemProcessorStorage;
     470        systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtxStorage };
     471
     472        // Add the main thread to the ready queue
     473        // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread
     474        ScheduleThread(mainThread);
     475
     476        //initialize the global state variables
     477        this_processor = &systemProcessor->proc;
     478        this_thread = mainThread;
     479        this_coroutine = &mainThread->cor;
     480        disable_preempt_count = 1;
     481
    405482        // Enable preemption
    406483        kernel_start_preemption();
    407484
    408         // Initialize the system cluster
    409         systemCluster = (cluster *)&systemCluster_storage;
    410         systemCluster{};
    411 
    412         LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n");
    413 
    414         // Initialize the system processor and the system processor ctx
    415         // (the coroutine that contains the processing control flow)
    416         systemProcessor = (system_proc_t *)&systemProcessor_storage;
    417         systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };
    418 
    419         // Add the main thread to the ready queue
    420         // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread
    421         ScheduleThread(mainThread);
    422 
    423         //initialize the global state variables
    424         this_processor = &systemProcessor->proc;
    425         this_processor->current_thread = mainThread;
    426         this_processor->current_coroutine = &mainThread->cor;
    427 
    428485        // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
    429486        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    430         // mainThread is on the ready queue when this call is made. 
     487        // mainThread is on the ready queue when this call is made.
    431488        resume( systemProcessor->proc.runner );
    432489
     
    435492        // THE SYSTEM IS NOW COMPLETELY RUNNING
    436493        LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n");
     494
     495        enable_interrupts( DEBUG_CTX );
    437496}
    438497
    439498void kernel_shutdown(void) {
    440499        LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n");
     500
     501        disable_interrupts();
    441502
    442503        // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
     
    448509        // THE SYSTEM IS NOW COMPLETELY STOPPED
    449510
     511        // Disable preemption
     512        kernel_stop_preemption();
     513
    450514        // Destroy the system processor and its context in reverse order of construction
    451515        // These were manually constructed so we need manually destroy them
     
    457521        ^(mainThread){};
    458522
    459         LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");   
     523        LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");
    460524}
    461525
     
    467531        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
    468532        // the globalAbort flag is true.
    469         lock( &kernel_abort_lock );
     533        lock( &kernel_abort_lock DEBUG_CTX2 );
    470534
    471535        // first task to abort ?
     
    473537                kernel_abort_called = true;
    474538                unlock( &kernel_abort_lock );
    475         } 
     539        }
    476540        else {
    477541                unlock( &kernel_abort_lock );
    478                
     542
    479543                sigset_t mask;
    480544                sigemptyset( &mask );
     
    482546                sigaddset( &mask, SIGUSR1 );                    // block SIGUSR1 signals
    483547                sigsuspend( &mask );                            // block the processor to prevent further damage during abort
    484                 _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it             
    485         }
    486 
    487         return this_thread();
     548                _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it
     549        }
     550
     551        return this_thread;
    488552}
    489553
     
    494558        __lib_debug_write( STDERR_FILENO, abort_text, len );
    495559
    496         if ( thrd != this_coroutine() ) {
    497                 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine()->name, this_coroutine() );
     560        if ( thrd != this_coroutine ) {
     561                len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine );
    498562                __lib_debug_write( STDERR_FILENO, abort_text, len );
    499         } 
     563        }
    500564        else {
    501565                __lib_debug_write( STDERR_FILENO, ".\n", 2 );
     
    505569extern "C" {
    506570        void __lib_debug_acquire() {
    507                 lock(&kernel_debug_lock);
     571                lock( &kernel_debug_lock DEBUG_CTX2 );
    508572        }
    509573
    510574        void __lib_debug_release() {
    511                 unlock(&kernel_debug_lock);
     575                unlock( &kernel_debug_lock );
    512576        }
    513577}
     
    525589}
    526590
    527 bool try_lock( spinlock * this ) {
     591bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) {
    528592        return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0;
    529593}
    530594
    531 void lock( spinlock * this ) {
     595void lock( spinlock * this DEBUG_CTX_PARAM2 ) {
    532596        for ( unsigned int i = 1;; i += 1 ) {
    533                 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break;
    534         }
    535 }
     597                if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }
     598        }
     599        LIB_DEBUG_DO(
     600                this->prev_name = caller;
     601                this->prev_thrd = this_thread;
     602        )
     603}
     604
     605void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) {
     606        for ( unsigned int i = 1;; i += 1 ) {
     607                if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }
     608                yield();
     609        }
     610        LIB_DEBUG_DO(
     611                this->prev_name = caller;
     612                this->prev_thrd = this_thread;
     613        )
     614}
     615
    536616
    537617void unlock( spinlock * this ) {
     
    539619}
    540620
    541 void ?{}( signal_once * this ) {
    542         this->cond = false;
    543 }
    544 void ^?{}( signal_once * this ) {
    545 
    546 }
    547 
    548 void wait( signal_once * this ) {
    549         lock( &this->lock );
    550         if( !this->cond ) {
    551                 append( &this->blocked, this_thread() );
    552                 ScheduleInternal( &this->lock );
    553                 lock( &this->lock );
    554         }
     621void  ?{}( semaphore * this, int count = 1 ) {
     622        (&this->lock){};
     623        this->count = count;
     624        (&this->waiting){};
     625}
     626void ^?{}(semaphore * this) {}
     627
     628void P(semaphore * this) {
     629        lock( &this->lock DEBUG_CTX2 );
     630        this->count -= 1;
     631        if ( this->count < 0 ) {
     632                // queue current task
     633                append( &this->waiting, (thread_desc *)this_thread );
     634
     635                // atomically release spin lock and block
     636                BlockInternal( &this->lock );
     637        }
     638        else {
     639            unlock( &this->lock );
     640        }
     641}
     642
     643void V(semaphore * this) {
     644        thread_desc * thrd = NULL;
     645        lock( &this->lock DEBUG_CTX2 );
     646        this->count += 1;
     647        if ( this->count <= 0 ) {
     648                // remove task at head of waiting list
     649                thrd = pop_head( &this->waiting );
     650        }
     651
    555652        unlock( &this->lock );
    556 }
    557 
    558 void signal( signal_once * this ) {
    559         lock( &this->lock );
    560         {
    561                 this->cond = true;
    562 
    563                 thread_desc * it;
    564                 while( it = pop_head( &this->blocked) ) {
    565                         ScheduleThread( it );
    566                 }
    567         }
    568         unlock( &this->lock );
     653
     654        // make new owner
     655        WakeThread( thrd );
    569656}
    570657
     
    590677                }
    591678                head->next = NULL;
    592         }       
     679        }
    593680        return head;
    594681}
     
    609696                this->top = top->next;
    610697                top->next = NULL;
    611         }       
     698        }
    612699        return top;
    613700}
  • src/libcfa/concurrency/kernel_private.h

    r55a68c3 r3d4b23fa  
    1818#define KERNEL_PRIVATE_H
    1919
     20#include "libhdr.h"
     21
    2022#include "kernel"
    2123#include "thread"
     
    2325#include "alarm.h"
    2426
    25 #include "libhdr.h"
    2627
    2728//-----------------------------------------------------------------------------
    2829// Scheduler
     30
     31extern "C" {
     32        void disable_interrupts();
     33        void enable_interrupts_noRF();
     34        void enable_interrupts( DEBUG_CTX_PARAM );
     35}
     36
    2937void ScheduleThread( thread_desc * );
     38static inline void WakeThread( thread_desc * thrd ) {
     39        if( !thrd ) return;
     40
     41        disable_interrupts();
     42        ScheduleThread( thrd );
     43        enable_interrupts( DEBUG_CTX );
     44}
    3045thread_desc * nextThread(cluster * this);
    3146
    32 void ScheduleInternal(void);
    33 void ScheduleInternal(spinlock * lock);
    34 void ScheduleInternal(thread_desc * thrd);
    35 void ScheduleInternal(spinlock * lock, thread_desc * thrd);
    36 void ScheduleInternal(spinlock ** locks, unsigned short count);
    37 void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);
     47void BlockInternal(void);
     48void BlockInternal(spinlock * lock);
     49void BlockInternal(thread_desc * thrd);
     50void BlockInternal(spinlock * lock, thread_desc * thrd);
     51void BlockInternal(spinlock ** locks, unsigned short count);
     52void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);
     53void LeaveThread(spinlock * lock, thread_desc * thrd);
    3854
    3955//-----------------------------------------------------------------------------
     
    6076extern cluster * systemCluster;
    6177extern system_proc_t * systemProcessor;
    62 extern thread_local processor * this_processor;
    63 
    64 static inline void disable_interrupts() {
    65         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST );
    66         assert( prev != (unsigned short) -1 );
    67 }
    68 
    69 static inline void enable_interrupts_noRF() {
    70         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    71         verify( prev != (unsigned short) 0 );
    72 }
    73 
    74 static inline void enable_interrupts() {
    75         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    76         verify( prev != (unsigned short) 0 );
    77         if( prev == 1 && this_processor->pending_preemption ) {
    78                 ScheduleInternal( this_processor->current_thread );
    79                 this_processor->pending_preemption = false;
    80         }
    81 }
     78extern volatile thread_local processor * this_processor;
     79extern volatile thread_local coroutine_desc * this_coroutine;
     80extern volatile thread_local thread_desc * this_thread;
     81extern volatile thread_local bool preemption_in_progress;
     82extern volatile thread_local unsigned short disable_preempt_count;
    8283
    8384//-----------------------------------------------------------------------------
  • src/libcfa/concurrency/monitor

    r55a68c3 r3d4b23fa  
    2626static inline void ?{}(monitor_desc * this) {
    2727        this->owner = NULL;
    28         this->stack_owner = NULL;
    2928        this->recursion = 0;
    3029}
  • src/libcfa/concurrency/monitor.c

    r55a68c3 r3d4b23fa  
    1919#include <stdlib>
    2020
     21#include "libhdr.h"
    2122#include "kernel_private.h"
    22 #include "libhdr.h"
    2323
    2424//-----------------------------------------------------------------------------
     
    4444
    4545extern "C" {
    46         void __enter_monitor_desc(monitor_desc * this) {
    47                 lock( &this->lock );
    48                 thread_desc * thrd = this_thread();
    49 
    50                 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
     46        void __enter_monitor_desc( monitor_desc * this ) {
     47                lock_yield( &this->lock DEBUG_CTX2 );
     48                thread_desc * thrd = this_thread;
     49
     50                // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    5151
    5252                if( !this->owner ) {
     
    6262                        //Some one else has the monitor, wait in line for it
    6363                        append( &this->entry_queue, thrd );
    64                         LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
    65                         ScheduleInternal( &this->lock );
    66 
    67                         //ScheduleInternal will unlock spinlock, no need to unlock ourselves
    68                         return; 
     64                        // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
     65                        BlockInternal( &this->lock );
     66
     67                        //BlockInternal will unlock spinlock, no need to unlock ourselves
     68                        return;
    6969                }
    7070
     
    7575        // leave pseudo code :
    7676        //      TODO
    77         void __leave_monitor_desc(monitor_desc * this) {
    78                 lock( &this->lock );
    79 
    80                 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    81                 verifyf( this_thread() == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );
     77        void __leave_monitor_desc( monitor_desc * this ) {
     78                lock_yield( &this->lock DEBUG_CTX2 );
     79
     80                // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);
     81                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
    8282
    8383                //Leaving a recursion level, decrement the counter
     
    9696                unlock( &this->lock );
    9797
    98                 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
     98                // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
    9999
    100100                //We need to wake-up the thread
    101                 ScheduleThread( new_owner );
     101                WakeThread( new_owner );
     102        }
     103
     104        void __leave_thread_monitor( thread_desc * thrd ) {
     105                monitor_desc * this = &thrd->mon;
     106                lock_yield( &this->lock DEBUG_CTX2 );
     107
     108                disable_interrupts();
     109
     110                thrd->cor.state = Halted;
     111
     112                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
     113
     114                //Leaving a recursion level, decrement the counter
     115                this->recursion -= 1;
     116
     117                //If we haven't left the last level of recursion
     118                //it means we don't need to do anything
     119                if( this->recursion != 0) {
     120                        unlock( &this->lock );
     121                        return;
     122                }
     123
     124                thread_desc * new_owner = next_thread( this );
     125
     126                LeaveThread( &this->lock, new_owner );
    102127        }
    103128}
     
    121146        enter( this->m, this->count );
    122147
    123         this->prev_mntrs = this_thread()->current_monitors;
    124         this->prev_count = this_thread()->current_monitor_count;
    125 
    126         this_thread()->current_monitors      = m;
    127         this_thread()->current_monitor_count = count;
     148        this->prev_mntrs = this_thread->current_monitors;
     149        this->prev_count = this_thread->current_monitor_count;
     150
     151        this_thread->current_monitors      = m;
     152        this_thread->current_monitor_count = count;
    128153}
    129154
     
    131156        leave( this->m, this->count );
    132157
    133         this_thread()->current_monitors      = this->prev_mntrs;
    134         this_thread()->current_monitor_count = this->prev_count;
     158        this_thread->current_monitors      = this->prev_mntrs;
     159        this_thread->current_monitor_count = this->prev_count;
    135160}
    136161
     
    159184// Internal scheduling
    160185void wait( condition * this, uintptr_t user_info = 0 ) {
    161         LIB_DEBUG_PRINT_SAFE("Waiting\n");
     186        // LIB_DEBUG_PRINT_SAFE("Waiting\n");
    162187
    163188        brand_condition( this );
     
    170195        unsigned short count = this->monitor_count;
    171196        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    172         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
    173 
    174         LIB_DEBUG_PRINT_SAFE("count %i\n", count);
    175 
    176         __condition_node_t waiter = { this_thread(), count, user_info };
     197        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
     198
     199        // LIB_DEBUG_PRINT_SAFE("count %i\n", count);
     200
     201        __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info };
    177202
    178203        __condition_criterion_t criteria[count];
    179204        for(int i = 0; i < count; i++) {
    180205                (&criteria[i]){ this->monitors[i], &waiter };
    181                 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
     206                // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    182207        }
    183208
     
    201226        }
    202227
    203         LIB_DEBUG_PRINT_SAFE("Will unblock: ");
     228        // LIB_DEBUG_PRINT_SAFE("Will unblock: ");
    204229        for(int i = 0; i < thread_count; i++) {
    205                 LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
    206         }
    207         LIB_DEBUG_PRINT_SAFE("\n");
     230                // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
     231        }
     232        // LIB_DEBUG_PRINT_SAFE("\n");
    208233
    209234        // Everything is ready to go to sleep
    210         ScheduleInternal( locks, count, threads, thread_count );
     235        BlockInternal( locks, count, threads, thread_count );
    211236
    212237
     
    222247bool signal( condition * this ) {
    223248        if( is_empty( this ) ) {
    224                 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
     249                // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
    225250                return false;
    226251        }
     
    231256
    232257        unsigned short count = this->monitor_count;
    233        
     258
    234259        //Some more checking in debug
    235260        LIB_DEBUG_DO(
    236                 thread_desc * this_thrd = this_thread();
     261                thread_desc * this_thrd = this_thread;
    237262                if ( this->monitor_count != this_thrd->current_monitor_count ) {
    238263                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
     
    248273        //Lock all the monitors
    249274        lock_all( this->monitors, NULL, count );
    250         LIB_DEBUG_PRINT_SAFE("Signalling");
     275        // LIB_DEBUG_PRINT_SAFE("Signalling");
    251276
    252277        //Pop the head of the waiting queue
     
    256281        for(int i = 0; i < count; i++) {
    257282                __condition_criterion_t * crit = &node->criteria[i];
    258                 LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
     283                // LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
    259284                assert( !crit->ready );
    260285                push( &crit->target->signal_stack, crit );
    261286        }
    262287
    263         LIB_DEBUG_PRINT_SAFE("\n");
     288        // LIB_DEBUG_PRINT_SAFE("\n");
    264289
    265290        //Release
     
    281306        unsigned short count = this->monitor_count;
    282307        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    283         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
     308        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    284309
    285310        lock_all( this->monitors, locks, count );
    286311
    287312        //create creteria
    288         __condition_node_t waiter = { this_thread(), count, 0 };
     313        __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 };
    289314
    290315        __condition_criterion_t criteria[count];
    291316        for(int i = 0; i < count; i++) {
    292317                (&criteria[i]){ this->monitors[i], &waiter };
    293                 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
     318                // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    294319                push( &criteria[i].target->signal_stack, &criteria[i] );
    295320        }
     
    309334
    310335        //Everything is ready to go to sleep
    311         ScheduleInternal( locks, count, &signallee, 1 );
     336        BlockInternal( locks, count, &signallee, 1 );
    312337
    313338
     
    325350
    326351uintptr_t front( condition * this ) {
    327         verifyf( !is_empty(this), 
     352        verifyf( !is_empty(this),
    328353                "Attempt to access user data on an empty condition.\n"
    329354                "Possible cause is not checking if the condition is empty before reading stored data."
     
    335360// Internal scheduling
    336361void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
    337         // thread_desc * this = this_thread();
     362        // thread_desc * this = this_thread;
    338363
    339364        // unsigned short count = this->current_monitor_count;
    340365        // unsigned int recursions[ count ];            //Save the current recursion levels to restore them later
    341         // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to ScheduleInternal
     366        // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to BlockInternal
    342367
    343368        // lock_all( this->current_monitors, locks, count );
     
    348373
    349374        // // // Everything is ready to go to sleep
    350         // // ScheduleInternal( locks, count, threads, thread_count );
     375        // // BlockInternal( locks, count, threads, thread_count );
    351376
    352377
     
    393418static inline void lock_all( spinlock ** locks, unsigned short count ) {
    394419        for( int i = 0; i < count; i++ ) {
    395                 lock( locks[i] );
     420                lock_yield( locks[i] DEBUG_CTX2 );
    396421        }
    397422}
     
    400425        for( int i = 0; i < count; i++ ) {
    401426                spinlock * l = &source[i]->lock;
    402                 lock( l );
     427                lock_yield( l DEBUG_CTX2 );
    403428                if(locks) locks[i] = l;
    404429        }
     
    443468        for(    int i = 0; i < count; i++ ) {
    444469
    445                 LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
     470                // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
    446471                if( &criteria[i] == target ) {
    447472                        criteria[i].ready = true;
    448                         LIB_DEBUG_PRINT_SAFE( "True\n" );
     473                        // LIB_DEBUG_PRINT_SAFE( "True\n" );
    449474                }
    450475
     
    452477        }
    453478
    454         LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
     479        // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
    455480        return ready2run ? node->waiting_thread : NULL;
    456481}
    457482
    458483static inline void brand_condition( condition * this ) {
    459         thread_desc * thrd = this_thread();
     484        thread_desc * thrd = this_thread;
    460485        if( !this->monitors ) {
    461                 LIB_DEBUG_PRINT_SAFE("Branding\n");
     486                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    462487                assertf( thrd->current_monitors != NULL, "No current monitor to brand condition", thrd->current_monitors );
    463488                this->monitor_count = thrd->current_monitor_count;
  • src/libcfa/concurrency/preemption.c

    r55a68c3 r3d4b23fa  
    1515//
    1616
     17#include "libhdr.h"
    1718#include "preemption.h"
    1819
    1920extern "C" {
     21#include <errno.h>
     22#include <execinfo.h>
     23#define __USE_GNU
    2024#include <signal.h>
    21 }
    22 
    23 #define __CFA_DEFAULT_PREEMPTION__ 10
     25#undef __USE_GNU
     26#include <stdio.h>
     27#include <string.h>
     28#include <unistd.h>
     29}
     30
     31
     32#ifdef __USE_STREAM__
     33#include "fstream"
     34#endif
     35
     36#define __CFA_DEFAULT_PREEMPTION__ 10000
    2437
    2538__attribute__((weak)) unsigned int default_preemption() {
     
    2740}
    2841
     42#define __CFA_SIGCXT__ ucontext_t *
     43#define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt
     44
    2945static void preempt( processor   * this );
    3046static void timeout( thread_desc * this );
    3147
     48void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
     49void sigHandler_alarm    ( __CFA_SIGPARMS__ );
     50void sigHandler_segv     ( __CFA_SIGPARMS__ );
     51void sigHandler_abort    ( __CFA_SIGPARMS__ );
     52
     53static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags );
     54LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
     55
     56#ifdef __x86_64__
     57#define CFA_REG_IP REG_RIP
     58#else
     59#define CFA_REG_IP REG_EIP
     60#endif
     61
     62
    3263//=============================================================================================
    3364// Kernel Preemption logic
    3465//=============================================================================================
    3566
    36 void kernel_start_preemption() {
    37 
    38 }
    39 
    4067void tick_preemption() {
     68        // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption\n" );
     69
    4170        alarm_list_t * alarms = &systemProcessor->alarms;
    4271        __cfa_time_t currtime = __kernel_get_time();
    4372        while( alarms->head && alarms->head->alarm < currtime ) {
    4473                alarm_node_t * node = pop(alarms);
     74                // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node );
     75
    4576                if( node->kernel_alarm ) {
    4677                        preempt( node->proc );
     
    5081                }
    5182
     83                verify( validate( alarms ) );
     84
    5285                if( node->period > 0 ) {
    53                         node->alarm += node->period;
     86                        node->alarm = currtime + node->period;
    5487                        insert( alarms, node );
    5588                }
     
    6295                __kernel_set_timer( alarms->head->alarm - currtime );
    6396        }
     97
     98        verify( validate( alarms ) );
     99        // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" );
    64100}
    65101
    66102void update_preemption( processor * this, __cfa_time_t duration ) {
    67         //     assert( THREAD_GETMEM( disableInt ) && THREAD_GETMEM( disableIntCnt ) == 1 );
     103        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %lu\n", this, duration );
     104
    68105        alarm_node_t * alarm = this->preemption_alarm;
     106        duration *= 1000;
    69107
    70108        // Alarms need to be enabled
     
    89127}
    90128
     129//=============================================================================================
     130// Kernel Signal Tools
     131//=============================================================================================
     132
     133LIB_DEBUG_DO( static thread_local void * last_interrupt = 0; )
     134
     135extern "C" {
     136        void disable_interrupts() {
     137                __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST );
     138                verify( new_val < (unsigned short)65_000 );
     139                verify( new_val != (unsigned short) 0 );
     140        }
     141
     142        void enable_interrupts_noRF() {
     143                __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     144                verify( prev != (unsigned short) 0 );
     145        }
     146
     147        void enable_interrupts( DEBUG_CTX_PARAM ) {
     148                processor * proc   = this_processor;
     149                thread_desc * thrd = this_thread;
     150                unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     151                verify( prev != (unsigned short) 0 );
     152                if( prev == 1 && proc->pending_preemption ) {
     153                        proc->pending_preemption = false;
     154                        BlockInternal( thrd );
     155                }
     156
     157                LIB_DEBUG_DO( proc->last_enable = caller; )
     158        }
     159}
     160
     161static inline void signal_unblock( int sig ) {
     162        sigset_t mask;
     163        sigemptyset( &mask );
     164        sigaddset( &mask, sig );
     165
     166        if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) {
     167            abortf( "internal error, pthread_sigmask" );
     168        }
     169}
     170
     171static inline void signal_block( int sig ) {
     172        sigset_t mask;
     173        sigemptyset( &mask );
     174        sigaddset( &mask, sig );
     175
     176        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     177            abortf( "internal error, pthread_sigmask" );
     178        }
     179}
     180
     181static inline bool preemption_ready() {
     182        return disable_preempt_count == 0 && !preemption_in_progress;
     183}
     184
     185static inline void defer_ctxSwitch() {
     186        this_processor->pending_preemption = true;
     187}
     188
     189static inline void defer_alarm() {
     190        systemProcessor->pending_alarm = true;
     191}
     192
     193static void preempt( processor * this ) {
     194        pthread_kill( this->kernel_thread, SIGUSR1 );
     195}
     196
     197static void timeout( thread_desc * this ) {
     198        //TODO : implement waking threads
     199}
     200
     201//=============================================================================================
     202// Kernel Signal Startup/Shutdown logic
     203//=============================================================================================
     204
     205static pthread_t alarm_thread;
     206void * alarm_loop( __attribute__((unused)) void * args );
     207
     208void kernel_start_preemption() {
     209        LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n");
     210        __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO );
     211        // __kernel_sigaction( SIGSEGV, sigHandler_segv     , SA_SIGINFO );
     212        // __kernel_sigaction( SIGBUS , sigHandler_segv     , SA_SIGINFO );
     213
     214        signal_block( SIGALRM );
     215
     216        pthread_create( &alarm_thread, NULL, alarm_loop, NULL );
     217}
     218
     219void kernel_stop_preemption() {
     220        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n");
     221
     222        sigset_t mask;
     223        sigfillset( &mask );
     224        sigprocmask( SIG_BLOCK, &mask, NULL );
     225
     226        sigval val = { 1 };
     227        pthread_sigqueue( alarm_thread, SIGALRM, val );
     228        pthread_join( alarm_thread, NULL );
     229        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n");
     230}
     231
    91232void ?{}( preemption_scope * this, processor * proc ) {
    92233        (&this->alarm){ proc };
     
    97238
    98239void ^?{}( preemption_scope * this ) {
     240        disable_interrupts();
     241
    99242        update_preemption( this->proc, 0 );
    100243}
    101244
    102245//=============================================================================================
    103 // Kernel Signal logic
    104 //=============================================================================================
    105 
    106 static inline bool preemption_ready() {
    107         return this_processor->disable_preempt_count == 0;
    108 }
    109 
    110 static inline void defer_ctxSwitch() {
    111         this_processor->pending_preemption = true;
    112 }
    113 
    114 static inline void defer_alarm() {
    115         systemProcessor->pending_alarm = true;
    116 }
    117 
    118 void sigHandler_ctxSwitch( __attribute__((unused)) int sig ) {
     246// Kernel Signal Handlers
     247//=============================================================================================
     248
     249void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
     250        LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); )
    119251        if( preemption_ready() ) {
    120                 ScheduleInternal( this_processor->current_thread );
     252                preemption_in_progress = true;
     253                signal_unblock( SIGUSR1 );
     254                this_processor->pending_preemption = false;
     255                preemption_in_progress = false;
     256                BlockInternal( (thread_desc*)this_thread );
    121257        }
    122258        else {
     
    125261}
    126262
    127 void sigHandler_alarm( __attribute__((unused)) int sig ) {
    128         if( try_lock( &systemProcessor->alarm_lock ) ) {
    129                 tick_preemption();
    130                 unlock( &systemProcessor->alarm_lock );
    131         }
    132         else {
    133                 defer_alarm();
    134         }
    135 }
    136 
    137 static void preempt( processor * this ) {
    138         pthread_kill( this->kernel_thread, SIGUSR1 );
    139 }
    140 
    141 static void timeout( thread_desc * this ) {
    142         //TODO : implement waking threads
    143 }
     263void * alarm_loop( __attribute__((unused)) void * args ) {
     264        sigset_t mask;
     265        sigemptyset( &mask );
     266        sigaddset( &mask, SIGALRM );
     267
     268        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     269            abortf( "internal error, pthread_sigmask" );
     270        }
     271
     272        while( true ) {
     273                siginfo_t info;
     274                int sig = sigwaitinfo( &mask, &info );
     275                if( sig < 0 ) {
     276                        abortf( "internal error, sigwait" );
     277                }
     278                else if( sig == SIGALRM )
     279                {
     280                        LIB_DEBUG_PRINT_SAFE("Kernel : Caught signal %d (%d)\n", sig, info.si_value.sival_int );
     281                        if( info.si_value.sival_int == 0 )
     282                        {
     283                                LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n");
     284                                lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
     285                                tick_preemption();
     286                                unlock( &systemProcessor->alarm_lock );
     287                        }
     288                        else if( info.si_value.sival_int == 1 )
     289                        {
     290                                break;
     291                        }
     292                }
     293                else
     294                {
     295                        LIB_DEBUG_PRINT_SAFE("Kernel : Unexpected signal %d (%d)\n", sig, info.si_value.sival_int);
     296                }
     297        }
     298
     299        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread stopping\n");
     300        return NULL;
     301}
     302
     303static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) {
     304        struct sigaction act;
     305
     306        act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler;
     307        act.sa_flags = flags;
     308
     309        if ( sigaction( sig, &act, NULL ) == -1 ) {
     310                LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     311                        " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n",
     312                        sig, handler, flags, errno, strerror( errno )
     313                );
     314                _exit( EXIT_FAILURE );
     315        }
     316}
     317
     318typedef void (*sa_handler_t)(int);
     319
     320static void __kernel_sigdefault( int sig ) {
     321        struct sigaction act;
     322
     323        // act.sa_handler = SIG_DFL;
     324        act.sa_flags = 0;
     325        sigemptyset( &act.sa_mask );
     326
     327        if ( sigaction( sig, &act, NULL ) == -1 ) {
     328                LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     329                        " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n",
     330                        sig, errno, strerror( errno )
     331                );
     332                _exit( EXIT_FAILURE );
     333        }
     334}
     335
     336//=============================================================================================
     337// Terminating Signals logic
     338//=============================================================================================
     339
     340LIB_DEBUG_DO(
     341        static void __kernel_backtrace( int start ) {
     342                // skip first N stack frames
     343
     344                enum { Frames = 50 };
     345                void * array[Frames];
     346                int size = backtrace( array, Frames );
     347                char ** messages = backtrace_symbols( array, size );
     348
     349                // find executable name
     350                *index( messages[0], '(' ) = '\0';
     351                #ifdef __USE_STREAM__
     352                serr | "Stack back trace for:" | messages[0] | endl;
     353                #else
     354                fprintf( stderr, "Stack back trace for: %s\n", messages[0]);
     355                #endif
     356
     357                // skip last 2 stack frames after main
     358                for ( int i = start; i < size && messages != NULL; i += 1 ) {
     359                        char * name = NULL;
     360                        char * offset_begin = NULL;
     361                        char * offset_end = NULL;
     362
     363                        for ( char *p = messages[i]; *p; ++p ) {
     364                                // find parantheses and +offset
     365                                if ( *p == '(' ) {
     366                                        name = p;
     367                                }
     368                                else if ( *p == '+' ) {
     369                                        offset_begin = p;
     370                                }
     371                                else if ( *p == ')' ) {
     372                                        offset_end = p;
     373                                        break;
     374                                }
     375                        }
     376
     377                        // if line contains symbol print it
     378                        int frameNo = i - start;
     379                        if ( name && offset_begin && offset_end && name < offset_begin ) {
     380                                // delimit strings
     381                                *name++ = '\0';
     382                                *offset_begin++ = '\0';
     383                                *offset_end++ = '\0';
     384
     385                                #ifdef __USE_STREAM__
     386                                serr    | "("  | frameNo | ")" | messages[i] | ":"
     387                                        | name | "+" | offset_begin | offset_end | endl;
     388                                #else
     389                                fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end);
     390                                #endif
     391                        }
     392                        // otherwise, print the whole line
     393                        else {
     394                                #ifdef __USE_STREAM__
     395                                serr | "(" | frameNo | ")" | messages[i] | endl;
     396                                #else
     397                                fprintf( stderr, "(%i) %s\n", frameNo, messages[i] );
     398                                #endif
     399                        }
     400                }
     401
     402                free( messages );
     403        }
     404)
     405
     406// void sigHandler_segv( __CFA_SIGPARMS__ ) {
     407//      LIB_DEBUG_DO(
     408//              #ifdef __USE_STREAM__
     409//              serr    | "*CFA runtime error* program cfa-cpp terminated with"
     410//                      | (sig == SIGSEGV ? "segment fault." : "bus error.")
     411//                      | endl;
     412//              #else
     413//              fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." );
     414//              #endif
     415
     416//              // skip first 2 stack frames
     417//              __kernel_backtrace( 1 );
     418//      )
     419//      exit( EXIT_FAILURE );
     420// }
     421
     422// void sigHandler_abort( __CFA_SIGPARMS__ ) {
     423//      // skip first 6 stack frames
     424//      LIB_DEBUG_DO( __kernel_backtrace( 6 ); )
     425
     426//      // reset default signal handler
     427//      __kernel_sigdefault( SIGABRT );
     428
     429//      raise( SIGABRT );
     430// }
  • src/libcfa/concurrency/thread

    r55a68c3 r3d4b23fa  
    5454}
    5555
    56 thread_desc * this_thread(void);
     56extern volatile thread_local thread_desc * this_thread;
    5757
    5858forall( dtype T | is_thread(T) )
  • src/libcfa/concurrency/thread.c

    r55a68c3 r3d4b23fa  
    2828}
    2929
    30 extern thread_local processor * this_processor;
     30extern volatile thread_local processor * this_processor;
    3131
    3232//-----------------------------------------------------------------------------
     
    7171        coroutine_desc* thrd_c = get_coroutine(this);
    7272        thread_desc*  thrd_h = get_thread   (this);
    73         thrd_c->last = this_coroutine();
    74         this_processor->current_coroutine = thrd_c;
     73        thrd_c->last = this_coroutine;
    7574
    76         LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
     75        // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
    7776
     77        disable_interrupts();
    7878        create_stack(&thrd_c->stack, thrd_c->stack.size);
     79        this_coroutine = thrd_c;
    7980        CtxStart(this, CtxInvokeThread);
     81        assert( thrd_c->last->stack.context );
    8082        CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context );
    8183
    8284        ScheduleThread(thrd_h);
     85        enable_interrupts( DEBUG_CTX );
    8386}
    8487
    8588void yield( void ) {
    86         ScheduleInternal( this_processor->current_thread );
     89        BlockInternal( (thread_desc *)this_thread );
    8790}
    8891
     
    9598void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    9699        // set state of current coroutine to inactive
    97         src->state = Inactive;
     100        src->state = src->state == Halted ? Halted : Inactive;
    98101        dst->state = Active;
    99102
     
    103106        // set new coroutine that the processor is executing
    104107        // and context switch to it
    105         this_processor->current_coroutine = dst;
     108        this_coroutine = dst;
     109        assert( src->stack.context );
    106110        CtxSwitch( src->stack.context, dst->stack.context );
    107         this_processor->current_coroutine = src;
     111        this_coroutine = src;
    108112
    109113        // set state of new coroutine to active
    110         dst->state = Inactive;
     114        dst->state = dst->state == Halted ? Halted : Inactive;
    111115        src->state = Active;
    112116}
Note: See TracChangeset for help on using the changeset viewer.