Changes in / [4c03e63:7bbba76]


Ignore:
Location:
src
Files:
1 added
18 edited

Legend:

Unmodified
Added
Removed
  • src/benchmark/CorCtxSwitch.c

    r4c03e63 r7bbba76  
    3131
    3232        StartTime = Time();
    33         // for ( volatile unsigned int i = 0; i < NoOfTimes; i += 1 ) {
    34         //      resume( this_coroutine() );
    35         //      // resume( &s );       
    36         // }
    3733        resumer( &s, NoOfTimes );
    3834        EndTime = Time();
  • src/benchmark/csv-data.c

    r4c03e63 r7bbba76  
    3838
    3939        StartTime = Time();
    40         // for ( volatile unsigned int i = 0; i < NoOfTimes; i += 1 ) {
    41         //      resume( this_coroutine() );
    42         //      // resume( &s );
    43         // }
    4440        resumer( &s, NoOfTimes );
    4541        EndTime = Time();
  • src/libcfa/concurrency/alarm.c

    r4c03e63 r7bbba76  
    1616
    1717extern "C" {
     18#include <errno.h>
     19#include <stdio.h>
     20#include <string.h>
    1821#include <time.h>
     22#include <unistd.h>
    1923#include <sys/time.h>
    2024}
     25
     26#include "libhdr.h"
    2127
    2228#include "alarm.h"
     
    3137        timespec curr;
    3238        clock_gettime( CLOCK_REALTIME, &curr );
    33         return ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;
     39        __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;
     40        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time );
     41        return curr_time;
    3442}
    3543
    3644void __kernel_set_timer( __cfa_time_t alarm ) {
     45        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %lu\n", (__cfa_time_t)alarm );
    3746        itimerval val;
    3847        val.it_value.tv_sec = alarm / TIMEGRAN;                 // seconds
     
    7180}
    7281
     82LIB_DEBUG_DO( bool validate( alarm_list_t * this ) {
     83        alarm_node_t ** it = &this->head;
     84        while( (*it) ) {
     85                it = &(*it)->next;
     86        }
     87
     88        return it == this->tail;
     89})
     90
    7391static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) {
    74         assert( !n->next );
     92        verify( !n->next );
    7593        if( p == this->tail ) {
    7694                this->tail = &n->next;
     
    8098        }
    8199        *p = n;
     100
     101        verify( validate( this ) );
    82102}
    83103
     
    89109
    90110        insert_at( this, n, it );
     111
     112        verify( validate( this ) );
    91113}
    92114
     
    100122                head->next = NULL;
    101123        }
     124        verify( validate( this ) );
    102125        return head;
    103126}
     
    105128static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) {
    106129        verify( it );
    107         verify( (*it)->next == n );
     130        verify( (*it) == n );
    108131
    109         (*it)->next = n->next;
     132        (*it) = n->next;
    110133        if( !n-> next ) {
    111134                this->tail = it;
    112135        }
    113136        n->next = NULL;
     137
     138        verify( validate( this ) );
    114139}
    115140
    116141static inline void remove( alarm_list_t * this, alarm_node_t * n ) {
    117142        alarm_node_t ** it = &this->head;
    118         while( (*it) && (*it)->next != n ) {
     143        while( (*it) && (*it) != n ) {
    119144                it = &(*it)->next;
    120145        }
    121146
     147        verify( validate( this ) );
     148
    122149        if( *it ) { remove_at( this, n, it ); }
     150
     151        verify( validate( this ) );
    123152}
    124153
    125154void register_self( alarm_node_t * this ) {
    126155        disable_interrupts();
    127         assert( !systemProcessor->pending_alarm );
    128         lock( &systemProcessor->alarm_lock );
     156        verify( !systemProcessor->pending_alarm );
     157        lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
    129158        {
     159                verify( validate( &systemProcessor->alarms ) );
     160                bool first = !systemProcessor->alarms.head;
     161
    130162                insert( &systemProcessor->alarms, this );
    131163                if( systemProcessor->pending_alarm ) {
    132164                        tick_preemption();
    133165                }
     166                if( first ) {
     167                        __kernel_set_timer( systemProcessor->alarms.head->alarm - __kernel_get_time() );
     168                }
    134169        }
    135170        unlock( &systemProcessor->alarm_lock );
    136171        this->set = true;
    137         enable_interrupts();
     172        enable_interrupts( DEBUG_CTX );
    138173}
    139174
    140175void unregister_self( alarm_node_t * this ) {
     176        // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : unregister %p start\n", this );
    141177        disable_interrupts();
    142         lock( &systemProcessor->alarm_lock );
    143         remove( &systemProcessor->alarms, this );
     178        lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
     179        {
     180                verify( validate( &systemProcessor->alarms ) );
     181                remove( &systemProcessor->alarms, this );
     182        }
    144183        unlock( &systemProcessor->alarm_lock );
    145         disable_interrupts();
     184        enable_interrupts( DEBUG_CTX );
    146185        this->set = false;
     186        // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Kernel : unregister %p end\n", this );
    147187}
  • src/libcfa/concurrency/coroutine

    r4c03e63 r7bbba76  
    6363
    6464// Get current coroutine
    65 coroutine_desc * this_coroutine(void);
     65extern volatile thread_local coroutine_desc * this_coroutine;
    6666
    6767// Private wrappers for context switch and stack creation
     
    7171// Suspend implementation inlined for performance
    7272static inline void suspend() {
    73         coroutine_desc * src = this_coroutine();                // optimization
     73        coroutine_desc * src = this_coroutine;          // optimization
    7474
    7575        assertf( src->last != 0,
     
    8888forall(dtype T | is_coroutine(T))
    8989static inline void resume(T * cor) {
    90         coroutine_desc * src = this_coroutine();                // optimization
     90        coroutine_desc * src = this_coroutine;          // optimization
    9191        coroutine_desc * dst = get_coroutine(cor);
    9292
     
    112112
    113113static inline void resume(coroutine_desc * dst) {
    114         coroutine_desc * src = this_coroutine();                // optimization
     114        coroutine_desc * src = this_coroutine;          // optimization
    115115
    116116        // not resuming self ?
  • src/libcfa/concurrency/coroutine.c

    r4c03e63 r7bbba76  
    3232#include "invoke.h"
    3333
    34 extern thread_local processor * this_processor;
     34extern volatile thread_local processor * this_processor;
    3535
    3636//-----------------------------------------------------------------------------
     
    106106
    107107        // set state of current coroutine to inactive
    108         src->state = Inactive;
     108        src->state = src->state == Halted ? Halted : Inactive;
    109109
    110110        // set new coroutine that task is executing
    111         this_processor->current_coroutine = dst;
     111        this_coroutine = dst;
    112112
    113113        // context switch to specified coroutine
     114        assert( src->stack.context );
    114115        CtxSwitch( src->stack.context, dst->stack.context );
    115116        // when CtxSwitch returns we are back in the src coroutine             
  • src/libcfa/concurrency/invoke.c

    r4c03e63 r7bbba76  
    2929
    3030extern void __suspend_internal(void);
    31 extern void __leave_monitor_desc( struct monitor_desc * this );
     31extern void __leave_thread_monitor( struct thread_desc * this );
     32extern void disable_interrupts();
     33extern void enable_interrupts( DEBUG_CTX_PARAM );
    3234
    3335void CtxInvokeCoroutine(
    34       void (*main)(void *), 
    35       struct coroutine_desc *(*get_coroutine)(void *), 
     36      void (*main)(void *),
     37      struct coroutine_desc *(*get_coroutine)(void *),
    3638      void *this
    3739) {
     
    5658
    5759void CtxInvokeThread(
    58       void (*dtor)(void *), 
    59       void (*main)(void *), 
    60       struct thread_desc *(*get_thread)(void *), 
     60      void (*dtor)(void *),
     61      void (*main)(void *),
     62      struct thread_desc *(*get_thread)(void *),
    6163      void *this
    6264) {
     65      // First suspend, once the thread arrives here,
     66      // the function pointer to main can be invalidated without risk
    6367      __suspend_internal();
    6468
     69      // Fetch the thread handle from the user defined thread structure
    6570      struct thread_desc* thrd = get_thread( this );
    66       struct coroutine_desc* cor = &thrd->cor;
    67       struct monitor_desc* mon = &thrd->mon;
    68       cor->state = Active;
    6971
    70       // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this);
     72      // Officially start the thread by enabling preemption
     73      enable_interrupts( DEBUG_CTX );
     74
     75      // Call the main of the thread
    7176      main( this );
    7277
    73       __leave_monitor_desc( mon );
     78      // To exit a thread we must :
     79      // 1 - Mark it as halted
     80      // 2 - Leave its monitor
     81      // 3 - Disable the interupts
     82      // The order of these 3 operations is very important
     83      __leave_thread_monitor( thrd );
    7484
    7585      //Final suspend, should never return
     
    8090
    8191void CtxStart(
    82       void (*main)(void *), 
    83       struct coroutine_desc *(*get_coroutine)(void *), 
    84       void *this, 
     92      void (*main)(void *),
     93      struct coroutine_desc *(*get_coroutine)(void *),
     94      void *this,
    8595      void (*invoke)(void *)
    8696) {
     
    108118        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke;
    109119      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    110       ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7 
     120      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    111121
    112122#elif defined( __x86_64__ )
     
    128138      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke;
    129139      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    130       ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7 
     140      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    131141#else
    132142      #error Only __i386__ and __x86_64__ is supported for threads in cfa
  • src/libcfa/concurrency/invoke.h

    r4c03e63 r7bbba76  
    3131      struct spinlock {
    3232            volatile int lock;
     33            #ifdef __CFA_DEBUG__
     34                  const char * prev;
     35            #endif
    3336      };
    3437
     
    8386            struct __thread_queue_t entry_queue;      // queue of threads that are blocked waiting for the monitor
    8487            struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    85             struct monitor_desc * stack_owner;        // if bulk acquiring was used we need to synchronize signals with an other monitor
    8688            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    8789      };
  • src/libcfa/concurrency/kernel

    r4c03e63 r7bbba76  
    2828//-----------------------------------------------------------------------------
    2929// Locks
    30 bool try_lock( spinlock * );
    31 void lock( spinlock * );
    32 void unlock( spinlock * );
     30bool try_lock( spinlock * DEBUG_CTX_PARAM2 );
     31void lock    ( spinlock * DEBUG_CTX_PARAM2 );
     32void unlock  ( spinlock * );
    3333
    3434struct signal_once {
     
    6868        unsigned short thrd_count;
    6969};
    70 static inline void ?{}(FinishAction * this) { 
     70static inline void ?{}(FinishAction * this) {
    7171        this->action_code = No_Action;
    7272        this->thrd = NULL;
     
    7878        struct processorCtx_t * runner;
    7979        cluster * cltr;
    80         coroutine_desc * current_coroutine;
    81         thread_desc * current_thread;
    8280        pthread_t kernel_thread;
    83        
     81
    8482        signal_once terminated;
    8583        volatile bool is_terminated;
     
    9088        unsigned int preemption;
    9189
    92         unsigned short disable_preempt_count;
     90        bool pending_preemption;
    9391
    94         bool pending_preemption;
     92        char * last_enable;
    9593};
    9694
  • src/libcfa/concurrency/kernel.c

    r4c03e63 r7bbba76  
    1515//
    1616
    17 #include "startup.h"
    18 
    19 //Start and stop routine for the kernel, declared first to make sure they run first
    20 void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    21 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    22 
    23 //Header
    24 #include "kernel_private.h"
     17#include "libhdr.h"
    2518
    2619//C Includes
     
    3528
    3629//CFA Includes
    37 #include "libhdr.h"
     30#include "kernel_private.h"
    3831#include "preemption.h"
     32#include "startup.h"
    3933
    4034//Private includes
    4135#define __CFA_INVOKE_PRIVATE__
    4236#include "invoke.h"
     37
     38//Start and stop routine for the kernel, declared first to make sure they run first
     39void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     40void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    4341
    4442//-----------------------------------------------------------------------------
     
    5957// Global state
    6058
    61 thread_local processor * this_processor;
    62 
    63 coroutine_desc * this_coroutine(void) {
    64         return this_processor->current_coroutine;
    65 }
    66 
    67 thread_desc * this_thread(void) {
    68         return this_processor->current_thread;
    69 }
     59volatile thread_local processor * this_processor;
     60volatile thread_local coroutine_desc * this_coroutine;
     61volatile thread_local thread_desc * this_thread;
     62volatile thread_local unsigned short disable_preempt_count = 1;
    7063
    7164//-----------------------------------------------------------------------------
    7265// Main thread construction
    7366struct current_stack_info_t {
    74         machine_context_t ctx; 
     67        machine_context_t ctx;
    7568        unsigned int size;              // size of stack
    7669        void *base;                             // base of stack
     
    10699
    107100void ?{}( coroutine_desc * this, current_stack_info_t * info) {
    108         (&this->stack){ info }; 
     101        (&this->stack){ info };
    109102        this->name = "Main Thread";
    110103        this->errno_ = 0;
     
    136129void ?{}(processor * this, cluster * cltr) {
    137130        this->cltr = cltr;
    138         this->current_coroutine = NULL;
    139         this->current_thread = NULL;
    140131        (&this->terminated){};
    141132        this->is_terminated = false;
    142133        this->preemption_alarm = NULL;
    143134        this->preemption = default_preemption();
    144         this->disable_preempt_count = 1;                //Start with interrupts disabled
    145135        this->pending_preemption = false;
    146136
     
    150140void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) {
    151141        this->cltr = cltr;
    152         this->current_coroutine = NULL;
    153         this->current_thread = NULL;
    154142        (&this->terminated){};
    155143        this->is_terminated = false;
    156         this->disable_preempt_count = 0;
     144        this->preemption_alarm = NULL;
     145        this->preemption = default_preemption();
    157146        this->pending_preemption = false;
     147        this->kernel_thread = pthread_self();
    158148
    159149        this->runner = runner;
    160         LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);
     150        LIB_DEBUG_PRINT_SAFE("Kernel : constructing system processor context %p\n", runner);
    161151        runner{ this };
    162152}
     153
     154LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
    163155
    164156void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {
     
    168160
    169161        (&this->proc){ cltr, runner };
     162
     163        verify( validate( &this->alarms ) );
    170164}
    171165
     
    184178
    185179void ^?{}(cluster * this) {
    186        
     180
    187181}
    188182
     
    203197
    204198                thread_desc * readyThread = NULL;
    205                 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 
     199                for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ )
    206200                {
    207201                        readyThread = nextThread( this->cltr );
     
    209203                        if(readyThread)
    210204                        {
     205                                verify( disable_preempt_count > 0 );
     206
    211207                                runThread(this, readyThread);
     208
     209                                verify( disable_preempt_count > 0 );
    212210
    213211                                //Some actions need to be taken from the kernel
     
    229227}
    230228
    231 // runThread runs a thread by context switching 
    232 // from the processor coroutine to the target thread 
     229// runThread runs a thread by context switching
     230// from the processor coroutine to the target thread
    233231void runThread(processor * this, thread_desc * dst) {
    234232        coroutine_desc * proc_cor = get_coroutine(this->runner);
    235233        coroutine_desc * thrd_cor = get_coroutine(dst);
    236        
     234
    237235        //Reset the terminating actions here
    238236        this->finish.action_code = No_Action;
    239237
    240238        //Update global state
    241         this->current_thread = dst;
     239        this_thread = dst;
    242240
    243241        // Context Switch to the thread
     
    246244}
    247245
    248 // Once a thread has finished running, some of 
     246// Once a thread has finished running, some of
    249247// its final actions must be executed from the kernel
    250248void finishRunning(processor * this) {
     
    256254        }
    257255        else if( this->finish.action_code == Release_Schedule ) {
    258                 unlock( this->finish.lock );           
     256                unlock( this->finish.lock );
    259257                ScheduleThread( this->finish.thrd );
    260258        }
     
    289287        processor * proc = (processor *) arg;
    290288        this_processor = proc;
     289        this_coroutine = NULL;
     290        this_thread = NULL;
     291        disable_preempt_count = 1;
    291292        // SKULLDUGGERY: We want to create a context for the processor coroutine
    292293        // which is needed for the 2-step context switch. However, there is no reason
    293         // to waste the perfectly valid stack create by pthread. 
     294        // to waste the perfectly valid stack create by pthread.
    294295        current_stack_info_t info;
    295296        machine_context_t ctx;
     
    300301
    301302        //Set global state
    302         proc->current_coroutine = &proc->runner->__cor;
    303         proc->current_thread = NULL;
     303        this_coroutine = &proc->runner->__cor;
     304        this_thread = NULL;
    304305
    305306        //We now have a proper context from which to schedule threads
    306307        LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);
    307308
    308         // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 
    309         // resume it to start it like it normally would, it will just context switch 
    310         // back to here. Instead directly call the main since we already are on the 
     309        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
     310        // resume it to start it like it normally would, it will just context switch
     311        // back to here. Instead directly call the main since we already are on the
    311312        // appropriate stack.
    312313        proc_cor_storage.__cor.state = Active;
     
    315316
    316317        // Main routine of the core returned, the core is now fully terminated
    317         LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 
     318        LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner);
    318319
    319320        return NULL;
     
    322323void start(processor * this) {
    323324        LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this);
    324        
     325
     326        // SIGALRM must only be caught by the system processor
     327        sigset_t old_mask;
     328        bool is_system_proc = this_processor == &systemProcessor->proc;
     329        if ( is_system_proc ) {
     330                // Child kernel-thread inherits the signal mask from the parent kernel-thread. So one special case for the
     331                // system processor creating the user processor => toggle the blocking SIGALRM on system processor, create user
     332                // processor, and toggle back (below) previous signal mask of the system processor.
     333
     334                sigset_t new_mask;
     335                sigemptyset( &new_mask );
     336                sigemptyset( &old_mask );
     337                sigaddset( &new_mask, SIGALRM );
     338
     339                if ( sigprocmask( SIG_BLOCK, &new_mask, &old_mask ) == -1 ) {
     340                        abortf( "internal error, sigprocmask" );
     341                }
     342
     343                assert( ! sigismember( &old_mask, SIGALRM ) );
     344        }
     345
    325346        pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
    326347
    327         LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);       
     348        // Toggle back previous signal mask of system processor.
     349        if ( is_system_proc ) {
     350                if ( sigprocmask( SIG_SETMASK, &old_mask, NULL ) == -1 ) {
     351                        abortf( "internal error, sigprocmask" );
     352                } // if
     353        } // if
     354
     355        LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);
    328356}
    329357
     
    331359// Scheduler routines
    332360void ScheduleThread( thread_desc * thrd ) {
    333         if( !thrd ) return;
     361        // if( !thrd ) return;
     362        assert( thrd );
     363        assert( thrd->cor.state != Halted );
     364
     365        verify( disable_preempt_count > 0 );
    334366
    335367        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    336        
    337         lock( &systemProcessor->proc.cltr->lock );
     368
     369        lock( &systemProcessor->proc.cltr->lock DEBUG_CTX2 );
    338370        append( &systemProcessor->proc.cltr->ready_queue, thrd );
    339371        unlock( &systemProcessor->proc.cltr->lock );
     372
     373        verify( disable_preempt_count > 0 );
    340374}
    341375
    342376thread_desc * nextThread(cluster * this) {
    343         lock( &this->lock );
     377        verify( disable_preempt_count > 0 );
     378        lock( &this->lock DEBUG_CTX2 );
    344379        thread_desc * head = pop_head( &this->ready_queue );
    345380        unlock( &this->lock );
     381        verify( disable_preempt_count > 0 );
    346382        return head;
    347383}
    348384
    349 void ScheduleInternal() {
     385void BlockInternal() {
     386        disable_interrupts();
     387        verify( disable_preempt_count > 0 );
    350388        suspend();
    351 }
    352 
    353 void ScheduleInternal( spinlock * lock ) {
     389        verify( disable_preempt_count > 0 );
     390        enable_interrupts( DEBUG_CTX );
     391}
     392
     393void BlockInternal( spinlock * lock ) {
     394        disable_interrupts();
    354395        this_processor->finish.action_code = Release;
    355396        this_processor->finish.lock = lock;
     397
     398        verify( disable_preempt_count > 0 );
    356399        suspend();
    357 }
    358 
    359 void ScheduleInternal( thread_desc * thrd ) {
     400        verify( disable_preempt_count > 0 );
     401
     402        enable_interrupts( DEBUG_CTX );
     403}
     404
     405void BlockInternal( thread_desc * thrd ) {
     406        disable_interrupts();
     407        assert( thrd->cor.state != Halted );
    360408        this_processor->finish.action_code = Schedule;
    361409        this_processor->finish.thrd = thrd;
     410
     411        verify( disable_preempt_count > 0 );
    362412        suspend();
    363 }
    364 
    365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) {
     413        verify( disable_preempt_count > 0 );
     414
     415        enable_interrupts( DEBUG_CTX );
     416}
     417
     418void BlockInternal( spinlock * lock, thread_desc * thrd ) {
     419        disable_interrupts();
    366420        this_processor->finish.action_code = Release_Schedule;
    367421        this_processor->finish.lock = lock;
    368422        this_processor->finish.thrd = thrd;
     423
     424        verify( disable_preempt_count > 0 );
    369425        suspend();
    370 }
    371 
    372 void ScheduleInternal(spinlock ** locks, unsigned short count) {
     426        verify( disable_preempt_count > 0 );
     427
     428        enable_interrupts( DEBUG_CTX );
     429}
     430
     431void BlockInternal(spinlock ** locks, unsigned short count) {
     432        disable_interrupts();
    373433        this_processor->finish.action_code = Release_Multi;
    374434        this_processor->finish.locks = locks;
    375435        this_processor->finish.lock_count = count;
     436
     437        verify( disable_preempt_count > 0 );
    376438        suspend();
    377 }
    378 
    379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
     439        verify( disable_preempt_count > 0 );
     440
     441        enable_interrupts( DEBUG_CTX );
     442}
     443
     444void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
     445        disable_interrupts();
    380446        this_processor->finish.action_code = Release_Multi_Schedule;
    381447        this_processor->finish.locks = locks;
     
    383449        this_processor->finish.thrds = thrds;
    384450        this_processor->finish.thrd_count = thrd_count;
     451
     452        verify( disable_preempt_count > 0 );
    385453        suspend();
     454        verify( disable_preempt_count > 0 );
     455
     456        enable_interrupts( DEBUG_CTX );
    386457}
    387458
     
    392463// Kernel boot procedures
    393464void kernel_startup(void) {
    394         LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");   
     465        LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");
    395466
    396467        // Start by initializing the main thread
    397         // SKULLDUGGERY: the mainThread steals the process main thread 
     468        // SKULLDUGGERY: the mainThread steals the process main thread
    398469        // which will then be scheduled by the systemProcessor normally
    399470        mainThread = (thread_desc *)&mainThread_storage;
     
    403474        LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n");
    404475
    405         // Enable preemption
    406         kernel_start_preemption();
    407 
    408476        // Initialize the system cluster
    409477        systemCluster = (cluster *)&systemCluster_storage;
     
    417485        systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };
    418486
    419         // Add the main thread to the ready queue 
     487        // Add the main thread to the ready queue
    420488        // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread
    421489        ScheduleThread(mainThread);
     
    423491        //initialize the global state variables
    424492        this_processor = &systemProcessor->proc;
    425         this_processor->current_thread = mainThread;
    426         this_processor->current_coroutine = &mainThread->cor;
     493        this_thread = mainThread;
     494        this_coroutine = &mainThread->cor;
     495        disable_preempt_count = 1;
     496
     497        // Enable preemption
     498        kernel_start_preemption();
    427499
    428500        // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
    429501        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    430         // mainThread is on the ready queue when this call is made. 
     502        // mainThread is on the ready queue when this call is made.
    431503        resume( systemProcessor->proc.runner );
    432504
     
    435507        // THE SYSTEM IS NOW COMPLETELY RUNNING
    436508        LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n");
     509
     510        enable_interrupts( DEBUG_CTX );
    437511}
    438512
    439513void kernel_shutdown(void) {
    440514        LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n");
     515
     516        disable_interrupts();
    441517
    442518        // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
     
    448524        // THE SYSTEM IS NOW COMPLETELY STOPPED
    449525
     526        // Disable preemption
     527        kernel_stop_preemption();
     528
    450529        // Destroy the system processor and its context in reverse order of construction
    451530        // These were manually constructed so we need manually destroy them
     
    457536        ^(mainThread){};
    458537
    459         LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");   
     538        LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");
    460539}
    461540
     
    467546        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
    468547        // the globalAbort flag is true.
    469         lock( &kernel_abort_lock );
     548        lock( &kernel_abort_lock DEBUG_CTX2 );
    470549
    471550        // first task to abort ?
     
    473552                kernel_abort_called = true;
    474553                unlock( &kernel_abort_lock );
    475         } 
     554        }
    476555        else {
    477556                unlock( &kernel_abort_lock );
    478                
     557
    479558                sigset_t mask;
    480559                sigemptyset( &mask );
     
    482561                sigaddset( &mask, SIGUSR1 );                    // block SIGUSR1 signals
    483562                sigsuspend( &mask );                            // block the processor to prevent further damage during abort
    484                 _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it             
    485         }
    486 
    487         return this_thread();
     563                _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it
     564        }
     565
     566        return this_thread;
    488567}
    489568
     
    494573        __lib_debug_write( STDERR_FILENO, abort_text, len );
    495574
    496         if ( thrd != this_coroutine() ) {
    497                 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine()->name, this_coroutine() );
     575        if ( thrd != this_coroutine ) {
     576                len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine );
    498577                __lib_debug_write( STDERR_FILENO, abort_text, len );
    499         } 
     578        }
    500579        else {
    501580                __lib_debug_write( STDERR_FILENO, ".\n", 2 );
     
    505584extern "C" {
    506585        void __lib_debug_acquire() {
    507                 lock(&kernel_debug_lock);
     586                lock( &kernel_debug_lock DEBUG_CTX2 );
    508587        }
    509588
    510589        void __lib_debug_release() {
    511                 unlock(&kernel_debug_lock);
     590                unlock( &kernel_debug_lock );
    512591        }
    513592}
     
    525604}
    526605
    527 bool try_lock( spinlock * this ) {
    528         return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0;
    529 }
    530 
    531 void lock( spinlock * this ) {
     606bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) {
     607        bool ret = this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0;
     608        LIB_DEBUG_DO( this->prev = caller; )
     609        return ret;
     610}
     611
     612void lock( spinlock * this DEBUG_CTX_PARAM2 ) {
    532613        for ( unsigned int i = 1;; i += 1 ) {
    533614                if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break;
    534615        }
     616        LIB_DEBUG_DO( this->prev = caller; )
    535617}
    536618
     
    547629
    548630void wait( signal_once * this ) {
    549         lock( &this->lock );
     631        lock( &this->lock DEBUG_CTX2 );
    550632        if( !this->cond ) {
    551                 append( &this->blocked, this_thread() );
    552                 ScheduleInternal( &this->lock );
    553                 lock( &this->lock );
    554         }
    555         unlock( &this->lock );
     633                append( &this->blocked, (thread_desc*)this_thread );
     634                BlockInternal( &this->lock );
     635        }
     636        else {
     637                unlock( &this->lock );
     638        }
    556639}
    557640
    558641void signal( signal_once * this ) {
    559         lock( &this->lock );
     642        lock( &this->lock DEBUG_CTX2 );
    560643        {
    561644                this->cond = true;
    562645
     646                disable_interrupts();
    563647                thread_desc * it;
    564648                while( it = pop_head( &this->blocked) ) {
    565649                        ScheduleThread( it );
    566650                }
     651                enable_interrupts( DEBUG_CTX );
    567652        }
    568653        unlock( &this->lock );
     
    590675                }
    591676                head->next = NULL;
    592         }       
     677        }
    593678        return head;
    594679}
     
    609694                this->top = top->next;
    610695                top->next = NULL;
    611         }       
     696        }
    612697        return top;
    613698}
  • src/libcfa/concurrency/kernel_private.h

    r4c03e63 r7bbba76  
    1818#define KERNEL_PRIVATE_H
    1919
     20#include "libhdr.h"
     21
    2022#include "kernel"
    2123#include "thread"
     
    2325#include "alarm.h"
    2426
    25 #include "libhdr.h"
    2627
    2728//-----------------------------------------------------------------------------
    2829// Scheduler
     30
     31extern "C" {
     32        void disable_interrupts();
     33        void enable_interrupts_noRF();
     34        void enable_interrupts( DEBUG_CTX_PARAM );
     35}
     36
    2937void ScheduleThread( thread_desc * );
     38static inline void WakeThread( thread_desc * thrd ) {
     39        if( !thrd ) return;
     40
     41        disable_interrupts();
     42        ScheduleThread( thrd );
     43        enable_interrupts( DEBUG_CTX );
     44}
    3045thread_desc * nextThread(cluster * this);
    3146
    32 void ScheduleInternal(void);
    33 void ScheduleInternal(spinlock * lock);
    34 void ScheduleInternal(thread_desc * thrd);
    35 void ScheduleInternal(spinlock * lock, thread_desc * thrd);
    36 void ScheduleInternal(spinlock ** locks, unsigned short count);
    37 void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);
     47void BlockInternal(void);
     48void BlockInternal(spinlock * lock);
     49void BlockInternal(thread_desc * thrd);
     50void BlockInternal(spinlock * lock, thread_desc * thrd);
     51void BlockInternal(spinlock ** locks, unsigned short count);
     52void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);
    3853
    3954//-----------------------------------------------------------------------------
     
    6075extern cluster * systemCluster;
    6176extern system_proc_t * systemProcessor;
    62 extern thread_local processor * this_processor;
    63 
    64 static inline void disable_interrupts() {
    65         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST );
    66         assert( prev != (unsigned short) -1 );
    67 }
    68 
    69 static inline void enable_interrupts_noRF() {
    70         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    71         verify( prev != (unsigned short) 0 );
    72 }
    73 
    74 static inline void enable_interrupts() {
    75         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    76         verify( prev != (unsigned short) 0 );
    77         if( prev == 1 && this_processor->pending_preemption ) {
    78                 ScheduleInternal( this_processor->current_thread );
    79                 this_processor->pending_preemption = false;
    80         }
    81 }
     77extern volatile thread_local processor * this_processor;
     78extern volatile thread_local coroutine_desc * this_coroutine;
     79extern volatile thread_local thread_desc * this_thread;
     80extern volatile thread_local unsigned short disable_preempt_count;
    8281
    8382//-----------------------------------------------------------------------------
  • src/libcfa/concurrency/monitor

    r4c03e63 r7bbba76  
    2626static inline void ?{}(monitor_desc * this) {
    2727        this->owner = NULL;
    28         this->stack_owner = NULL;
    2928        this->recursion = 0;
    3029}
  • src/libcfa/concurrency/monitor.c

    r4c03e63 r7bbba76  
    1919#include <stdlib>
    2020
     21#include "libhdr.h"
    2122#include "kernel_private.h"
    22 #include "libhdr.h"
    2323
    2424//-----------------------------------------------------------------------------
     
    4444
    4545extern "C" {
    46         void __enter_monitor_desc(monitor_desc * this) {
    47                 lock( &this->lock );
    48                 thread_desc * thrd = this_thread();
    49 
    50                 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
     46        void __enter_monitor_desc( monitor_desc * this ) {
     47                lock( &this->lock DEBUG_CTX2 );
     48                thread_desc * thrd = this_thread;
     49
     50                // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    5151
    5252                if( !this->owner ) {
     
    6262                        //Some one else has the monitor, wait in line for it
    6363                        append( &this->entry_queue, thrd );
    64                         LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
    65                         ScheduleInternal( &this->lock );
    66 
    67                         //ScheduleInternal will unlock spinlock, no need to unlock ourselves
    68                         return; 
     64                        // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
     65                        BlockInternal( &this->lock );
     66
     67                        //BlockInternal will unlock spinlock, no need to unlock ourselves
     68                        return;
    6969                }
    7070
     
    7575        // leave pseudo code :
    7676        //      TODO
    77         void __leave_monitor_desc(monitor_desc * this) {
    78                 lock( &this->lock );
    79 
    80                 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    81                 verifyf( this_thread() == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );
     77        void __leave_monitor_desc( monitor_desc * this ) {
     78                lock( &this->lock DEBUG_CTX2 );
     79
     80                // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);
     81                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
    8282
    8383                //Leaving a recursion level, decrement the counter
     
    9696                unlock( &this->lock );
    9797
    98                 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
     98                // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
    9999
    100100                //We need to wake-up the thread
    101                 ScheduleThread( new_owner );
     101                WakeThread( new_owner );
     102        }
     103
     104        void __leave_thread_monitor( thread_desc * thrd ) {
     105                monitor_desc * this = &thrd->mon;
     106                lock( &this->lock DEBUG_CTX2 );
     107
     108                disable_interrupts();
     109
     110                thrd->cor.state = Halted;
     111
     112                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
     113
     114                //Leaving a recursion level, decrement the counter
     115                this->recursion -= 1;
     116
     117                //If we haven't left the last level of recursion
     118                //it means we don't need to do anything
     119                if( this->recursion != 0) {
     120                        unlock( &this->lock );
     121                        return;
     122                }
     123
     124                thread_desc * new_owner = next_thread( this );
     125
     126                //We can now let other threads in safely
     127                unlock( &this->lock );
     128
     129                //We need to wake-up the thread
     130                if( new_owner) ScheduleThread( new_owner );
    102131        }
    103132}
     
    121150        enter( this->m, this->count );
    122151
    123         this->prev_mntrs = this_thread()->current_monitors;
    124         this->prev_count = this_thread()->current_monitor_count;
    125 
    126         this_thread()->current_monitors      = m;
    127         this_thread()->current_monitor_count = count;
     152        this->prev_mntrs = this_thread->current_monitors;
     153        this->prev_count = this_thread->current_monitor_count;
     154
     155        this_thread->current_monitors      = m;
     156        this_thread->current_monitor_count = count;
    128157}
    129158
     
    131160        leave( this->m, this->count );
    132161
    133         this_thread()->current_monitors      = this->prev_mntrs;
    134         this_thread()->current_monitor_count = this->prev_count;
     162        this_thread->current_monitors      = this->prev_mntrs;
     163        this_thread->current_monitor_count = this->prev_count;
    135164}
    136165
     
    170199        unsigned short count = this->monitor_count;
    171200        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    172         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
     201        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    173202
    174203        LIB_DEBUG_PRINT_SAFE("count %i\n", count);
    175204
    176         __condition_node_t waiter = { this_thread(), count, user_info };
     205        __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info };
    177206
    178207        __condition_criterion_t criteria[count];
     
    208237
    209238        // Everything is ready to go to sleep
    210         ScheduleInternal( locks, count, threads, thread_count );
     239        BlockInternal( locks, count, threads, thread_count );
    211240
    212241
     
    231260
    232261        unsigned short count = this->monitor_count;
    233        
     262
    234263        //Some more checking in debug
    235264        LIB_DEBUG_DO(
    236                 thread_desc * this_thrd = this_thread();
     265                thread_desc * this_thrd = this_thread;
    237266                if ( this->monitor_count != this_thrd->current_monitor_count ) {
    238267                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
     
    281310        unsigned short count = this->monitor_count;
    282311        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    283         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
     312        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    284313
    285314        lock_all( this->monitors, locks, count );
    286315
    287316        //create creteria
    288         __condition_node_t waiter = { this_thread(), count, 0 };
     317        __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 };
    289318
    290319        __condition_criterion_t criteria[count];
     
    309338
    310339        //Everything is ready to go to sleep
    311         ScheduleInternal( locks, count, &signallee, 1 );
     340        BlockInternal( locks, count, &signallee, 1 );
    312341
    313342
     
    325354
    326355uintptr_t front( condition * this ) {
    327         verifyf( !is_empty(this), 
     356        verifyf( !is_empty(this),
    328357                "Attempt to access user data on an empty condition.\n"
    329358                "Possible cause is not checking if the condition is empty before reading stored data."
     
    335364// Internal scheduling
    336365void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
    337         // thread_desc * this = this_thread();
     366        // thread_desc * this = this_thread;
    338367
    339368        // unsigned short count = this->current_monitor_count;
    340369        // unsigned int recursions[ count ];            //Save the current recursion levels to restore them later
    341         // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to ScheduleInternal
     370        // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to BlockInternal
    342371
    343372        // lock_all( this->current_monitors, locks, count );
     
    348377
    349378        // // // Everything is ready to go to sleep
    350         // // ScheduleInternal( locks, count, threads, thread_count );
     379        // // BlockInternal( locks, count, threads, thread_count );
    351380
    352381
     
    393422static inline void lock_all( spinlock ** locks, unsigned short count ) {
    394423        for( int i = 0; i < count; i++ ) {
    395                 lock( locks[i] );
     424                lock( locks[i] DEBUG_CTX2 );
    396425        }
    397426}
     
    400429        for( int i = 0; i < count; i++ ) {
    401430                spinlock * l = &source[i]->lock;
    402                 lock( l );
     431                lock( l DEBUG_CTX2 );
    403432                if(locks) locks[i] = l;
    404433        }
     
    457486
    458487static inline void brand_condition( condition * this ) {
    459         thread_desc * thrd = this_thread();
     488        thread_desc * thrd = this_thread;
    460489        if( !this->monitors ) {
    461490                LIB_DEBUG_PRINT_SAFE("Branding\n");
  • src/libcfa/concurrency/preemption.c

    r4c03e63 r7bbba76  
    1515//
    1616
     17#include "libhdr.h"
    1718#include "preemption.h"
    1819
    1920extern "C" {
     21#include <errno.h>
     22#include <execinfo.h>
     23#define __USE_GNU
    2024#include <signal.h>
    21 }
    22 
    23 #define __CFA_DEFAULT_PREEMPTION__ 10
     25#undef __USE_GNU
     26#include <stdio.h>
     27#include <string.h>
     28#include <unistd.h>
     29}
     30
     31
     32#ifdef __USE_STREAM__
     33#include "fstream"
     34#endif
     35
     36#define __CFA_DEFAULT_PREEMPTION__ 10000
    2437
    2538__attribute__((weak)) unsigned int default_preemption() {
     
    2740}
    2841
     42#define __CFA_SIGCXT__ ucontext_t *
     43#define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt
     44
    2945static void preempt( processor   * this );
    3046static void timeout( thread_desc * this );
    3147
     48void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
     49void sigHandler_alarm    ( __CFA_SIGPARMS__ );
     50void sigHandler_segv     ( __CFA_SIGPARMS__ );
     51void sigHandler_abort    ( __CFA_SIGPARMS__ );
     52
     53static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags );
     54
    3255//=============================================================================================
    3356// Kernel Preemption logic
     
    3558
    3659void kernel_start_preemption() {
    37 
    38 }
     60        LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n");
     61        __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO );
     62        __kernel_sigaction( SIGALRM, sigHandler_alarm    , SA_SIGINFO );
     63        __kernel_sigaction( SIGSEGV, sigHandler_segv     , SA_SIGINFO );
     64        __kernel_sigaction( SIGBUS , sigHandler_segv     , SA_SIGINFO );
     65        // __kernel_sigaction( SIGABRT, sigHandler_abort    , SA_SIGINFO );
     66}
     67
     68void kernel_stop_preemption() {
     69        //Block all signals, we are no longer in a position to handle them
     70        sigset_t mask;
     71        sigfillset( &mask );
     72        sigprocmask( SIG_BLOCK, &mask, NULL );
     73        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n");
     74
     75        // assert( !systemProcessor->alarms.head );
     76        // assert( systemProcessor->alarms.tail == &systemProcessor->alarms.head );
     77}
     78
     79LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
    3980
    4081void tick_preemption() {
     82        // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption\n" );
     83
    4184        alarm_list_t * alarms = &systemProcessor->alarms;
    4285        __cfa_time_t currtime = __kernel_get_time();
    4386        while( alarms->head && alarms->head->alarm < currtime ) {
    4487                alarm_node_t * node = pop(alarms);
     88                // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node );
     89
    4590                if( node->kernel_alarm ) {
    4691                        preempt( node->proc );
     
    5095                }
    5196
     97                verify( validate( alarms ) );
     98
    5299                if( node->period > 0 ) {
    53                         node->alarm += node->period;
     100                        node->alarm = currtime + node->period;
    54101                        insert( alarms, node );
    55102                }
     
    62109                __kernel_set_timer( alarms->head->alarm - currtime );
    63110        }
     111
     112        verify( validate( alarms ) );
     113        // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" );
    64114}
    65115
    66116void update_preemption( processor * this, __cfa_time_t duration ) {
    67         //     assert( THREAD_GETMEM( disableInt ) && THREAD_GETMEM( disableIntCnt ) == 1 );
     117        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %lu\n", this, duration );
     118
    68119        alarm_node_t * alarm = this->preemption_alarm;
     120        duration *= 1000;
    69121
    70122        // Alarms need to be enabled
     
    97149
    98150void ^?{}( preemption_scope * this ) {
     151        disable_interrupts();
     152
    99153        update_preemption( this->proc, 0 );
    100154}
     
    104158//=============================================================================================
    105159
     160extern "C" {
     161        void disable_interrupts() {
     162                __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST );
     163                verify( new_val < (unsigned short)65_000 );
     164                verify( new_val != (unsigned short) 0 );
     165        }
     166
     167        void enable_interrupts_noRF() {
     168                __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     169                verify( prev != (unsigned short) 0 );
     170        }
     171
     172        void enable_interrupts( DEBUG_CTX_PARAM ) {
     173                processor * proc   = this_processor;
     174                thread_desc * thrd = this_thread;
     175                unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     176                verify( prev != (unsigned short) 0 );
     177                if( prev == 1 && proc->pending_preemption ) {
     178                        proc->pending_preemption = false;
     179                        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Executing deferred CtxSwitch on %p\n", this_processor );
     180                        BlockInternal( thrd );
     181                        LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Executing deferred back\n" );
     182                }
     183
     184                LIB_DEBUG_DO( proc->last_enable = caller; )
     185        }
     186}
     187
     188static inline void signal_unblock( int sig ) {
     189        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p unblocking sig %i\n", this_processor, sig );
     190
     191        sigset_t mask;
     192        sigemptyset( &mask );
     193        sigaddset( &mask, sig );
     194
     195        if ( sigprocmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) {
     196            abortf( "internal error, sigprocmask" );
     197        } // if
     198}
     199
    106200static inline bool preemption_ready() {
    107         return this_processor->disable_preempt_count == 0;
     201        return disable_preempt_count == 0;
    108202}
    109203
     
    116210}
    117211
    118 void sigHandler_ctxSwitch( __attribute__((unused)) int sig ) {
     212extern "C" {
     213        __attribute__((noinline)) void __debug_break() {
     214                pthread_kill( pthread_self(), SIGTRAP );
     215        }
     216}
     217
     218void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
     219        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ctx Switch IRH %p running %p @ %p\n", this_processor, this_thread, (void *)(cxt->uc_mcontext.gregs[REG_RIP]) );
     220
    119221        if( preemption_ready() ) {
    120                 ScheduleInternal( this_processor->current_thread );
     222                LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Blocking thread %p on %p\n", this_thread, this_processor );
     223                signal_unblock( SIGUSR1 );
     224                BlockInternal( (thread_desc*)this_thread );
     225                LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Back\n\n");
     226        }
     227        else {
     228                LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Defering\n" );
     229                defer_ctxSwitch();
     230                signal_unblock( SIGUSR1 );
     231        }
     232}
     233
     234void sigHandler_alarm( __CFA_SIGPARMS__ ) {
     235        LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "\nAlarm IRH %p running %p @ %p\n", this_processor, this_thread, (void *)(cxt->uc_mcontext.gregs[REG_RIP]) );
     236
     237        // if( ((intptr_t)cxt->uc_mcontext.gregs[REG_RIP]) > 0xFFFFFF ) __debug_break();
     238
     239        if( try_lock( &systemProcessor->alarm_lock DEBUG_CTX2 ) ) {
     240                tick_preemption();
     241                unlock( &systemProcessor->alarm_lock );
     242        }
     243        else {
     244                defer_alarm();
     245        }
     246
     247        signal_unblock( SIGALRM );
     248
     249        if( preemption_ready() && this_processor->pending_preemption ) {
     250                LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Alarm IRH : Blocking thread %p on %p\n", this_thread, this_processor );
     251                this_processor->pending_preemption = false;
     252                BlockInternal( (thread_desc*)this_thread );
     253                LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Alarm Switch IRH : Back\n\n");
     254        }
     255}
     256
     257static void preempt( processor * this ) {
     258        // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : signalling %p\n", this );
     259
     260        if( this != systemProcessor ) {
     261                pthread_kill( this->kernel_thread, SIGUSR1 );
    121262        }
    122263        else {
     
    125266}
    126267
    127 void sigHandler_alarm( __attribute__((unused)) int sig ) {
    128         if( try_lock( &systemProcessor->alarm_lock ) ) {
    129                 tick_preemption();
    130                 unlock( &systemProcessor->alarm_lock );
    131         }
    132         else {
    133                 defer_alarm();
    134         }
    135 }
    136 
    137 static void preempt( processor * this ) {
    138         pthread_kill( this->kernel_thread, SIGUSR1 );
    139 }
    140 
    141268static void timeout( thread_desc * this ) {
    142269        //TODO : implement waking threads
    143270}
     271
     272static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) {
     273        struct sigaction act;
     274
     275        act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler;
     276        act.sa_flags = flags;
     277
     278        // disabled during signal handler
     279        sigemptyset( &act.sa_mask );
     280        sigaddset( &act.sa_mask, sig );
     281
     282        if ( sigaction( sig, &act, NULL ) == -1 ) {
     283                LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     284                        " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n",
     285                        sig, handler, flags, errno, strerror( errno )
     286                );
     287                _exit( EXIT_FAILURE );
     288        }
     289}
     290
     291typedef void (*sa_handler_t)(int);
     292
     293static void __kernel_sigdefault( int sig ) {
     294        struct sigaction act;
     295
     296        // act.sa_handler = SIG_DFL;
     297        act.sa_flags = 0;
     298        sigemptyset( &act.sa_mask );
     299
     300        if ( sigaction( sig, &act, NULL ) == -1 ) {
     301                LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     302                        " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n",
     303                        sig, errno, strerror( errno )
     304                );
     305                _exit( EXIT_FAILURE );
     306        }
     307}
     308
     309//=============================================================================================
     310// Terminating Signals logic
     311//=============================================================================================
     312
     313LIB_DEBUG_DO(
     314        static void __kernel_backtrace( int start ) {
     315                // skip first N stack frames
     316
     317                enum { Frames = 50 };
     318                void * array[Frames];
     319                int size = backtrace( array, Frames );
     320                char ** messages = backtrace_symbols( array, size );
     321
     322                // find executable name
     323                *index( messages[0], '(' ) = '\0';
     324                #ifdef __USE_STREAM__
     325                serr | "Stack back trace for:" | messages[0] | endl;
     326                #else
     327                fprintf( stderr, "Stack back trace for: %s\n", messages[0]);
     328                #endif
     329
     330                // skip last 2 stack frames after main
     331                for ( int i = start; i < size && messages != NULL; i += 1 ) {
     332                        char * name = NULL;
     333                        char * offset_begin = NULL;
     334                        char * offset_end = NULL;
     335
     336                        for ( char *p = messages[i]; *p; ++p ) {
     337                                // find parantheses and +offset
     338                                if ( *p == '(' ) {
     339                                        name = p;
     340                                }
     341                                else if ( *p == '+' ) {
     342                                        offset_begin = p;
     343                                }
     344                                else if ( *p == ')' ) {
     345                                        offset_end = p;
     346                                        break;
     347                                }
     348                        }
     349
     350                        // if line contains symbol print it
     351                        int frameNo = i - start;
     352                        if ( name && offset_begin && offset_end && name < offset_begin ) {
     353                                // delimit strings
     354                                *name++ = '\0';
     355                                *offset_begin++ = '\0';
     356                                *offset_end++ = '\0';
     357
     358                                #ifdef __USE_STREAM__
     359                                serr    | "("  | frameNo | ")" | messages[i] | ":"
     360                                        | name | "+" | offset_begin | offset_end | endl;
     361                                #else
     362                                fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end);
     363                                #endif
     364                        }
     365                        // otherwise, print the whole line
     366                        else {
     367                                #ifdef __USE_STREAM__
     368                                serr | "(" | frameNo | ")" | messages[i] | endl;
     369                                #else
     370                                fprintf( stderr, "(%i) %s\n", frameNo, messages[i] );
     371                                #endif
     372                        }
     373                }
     374
     375                free( messages );
     376        }
     377)
     378
     379void sigHandler_segv( __CFA_SIGPARMS__ ) {
     380        LIB_DEBUG_DO(
     381                #ifdef __USE_STREAM__
     382                serr    | "*CFA runtime error* program cfa-cpp terminated with"
     383                        | (sig == SIGSEGV ? "segment fault." : "bus error.")
     384                        | endl;
     385                #else
     386                fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." );
     387                #endif
     388
     389                // skip first 2 stack frames
     390                __kernel_backtrace( 1 );
     391        )
     392        exit( EXIT_FAILURE );
     393}
     394
     395// void sigHandler_abort( __CFA_SIGPARMS__ ) {
     396//      // skip first 6 stack frames
     397//      LIB_DEBUG_DO( __kernel_backtrace( 6 ); )
     398
     399//      // reset default signal handler
     400//      __kernel_sigdefault( SIGABRT );
     401
     402//      raise( SIGABRT );
     403// }
  • src/libcfa/concurrency/thread

    r4c03e63 r7bbba76  
    5454}
    5555
    56 thread_desc * this_thread(void);
     56extern volatile thread_local thread_desc * this_thread;
    5757
    5858forall( dtype T | is_thread(T) )
  • src/libcfa/concurrency/thread.c

    r4c03e63 r7bbba76  
    2828}
    2929
    30 extern thread_local processor * this_processor;
     30extern volatile thread_local processor * this_processor;
    3131
    3232//-----------------------------------------------------------------------------
     
    7171        coroutine_desc* thrd_c = get_coroutine(this);
    7272        thread_desc*  thrd_h = get_thread   (this);
    73         thrd_c->last = this_coroutine();
    74         this_processor->current_coroutine = thrd_c;
     73        thrd_c->last = this_coroutine;
    7574
    76         LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
     75        // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
    7776
     77        disable_interrupts();
    7878        create_stack(&thrd_c->stack, thrd_c->stack.size);
     79        this_coroutine = thrd_c;
    7980        CtxStart(this, CtxInvokeThread);
     81        assert( thrd_c->last->stack.context );
    8082        CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context );
    8183
    8284        ScheduleThread(thrd_h);
     85        enable_interrupts( DEBUG_CTX );
    8386}
    8487
    8588void yield( void ) {
    86         ScheduleInternal( this_processor->current_thread );
     89        BlockInternal( (thread_desc *)this_thread );
    8790}
    8891
     
    9598void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    9699        // set state of current coroutine to inactive
    97         src->state = Inactive;
     100        src->state = src->state == Halted ? Halted : Inactive;
    98101        dst->state = Active;
    99102
     
    103106        // set new coroutine that the processor is executing
    104107        // and context switch to it
    105         this_processor->current_coroutine = dst;
     108        this_coroutine = dst;
     109        assert( src->stack.context );
    106110        CtxSwitch( src->stack.context, dst->stack.context );
    107         this_processor->current_coroutine = src;
     111        this_coroutine = src;
    108112
    109113        // set state of new coroutine to active
    110         dst->state = Inactive;
     114        dst->state = dst->state == Halted ? Halted : Inactive;
    111115        src->state = Active;
    112116}
  • src/libcfa/libhdr/libalign.h

    r4c03e63 r7bbba76  
    1 //                              -*- Mode: C++ -*- 
     1//                              -*- Mode: C++ -*-
    22//
    33// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
     
    1818// Free Software  Foundation; either  version 2.1 of  the License, or  (at your
    1919// option) any later version.
    20 // 
     20//
    2121// This library is distributed in the  hope that it will be useful, but WITHOUT
    2222// ANY  WARRANTY;  without even  the  implied  warranty  of MERCHANTABILITY  or
    2323// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
    2424// for more details.
    25 // 
     25//
    2626// You should  have received a  copy of the  GNU Lesser General  Public License
    2727// along  with this library.
    28 // 
     28//
    2929
    3030
     
    3333
    3434#include "assert"
     35#include <stdbool.h>
    3536
    36 // Minimum size used to align memory boundaries for memory allocations. 
     37// Minimum size used to align memory boundaries for memory allocations.
    3738#define libAlign() (sizeof(double))
    3839
  • src/libcfa/libhdr/libdebug.h

    r4c03e63 r7bbba76  
    1818
    1919#ifdef __CFA_DEBUG__
    20         #define LIB_DEBUG_DO(x) x
    21         #define LIB_NO_DEBUG_DO(x) ((void)0)
     20        #define LIB_DEBUG_DO(...) __VA_ARGS__
     21        #define LIB_NO_DEBUG_DO(...)
     22        #define DEBUG_CTX __PRETTY_FUNCTION__
     23        #define DEBUG_CTX2 , __PRETTY_FUNCTION__
     24        #define DEBUG_CTX_PARAM const char * caller
     25        #define DEBUG_CTX_PARAM2 , const char * caller
    2226#else
    23         #define LIB_DEBUG_DO(x) ((void)0)
    24         #define LIB_NO_DEBUG_DO(x) x     
     27        #define LIB_DEBUG_DO(...)
     28        #define LIB_NO_DEBUG_DO(...) __VA_ARGS__
     29        #define DEBUG_CTX
     30        #define DEBUG_CTX2
     31        #define DEBUG_CTX_PARAM
     32        #define DEBUG_CTX_PARAM2
    2533#endif
    2634
     
    5159
    5260#ifdef __CFA_DEBUG_PRINT__
    53       #define LIB_DEBUG_WRITE( fd, buffer, len )  __lib_debug_write( fd, buffer, len )
    54       #define LIB_DEBUG_ACQUIRE()                 __lib_debug_acquire()
    55       #define LIB_DEBUG_RELEASE()                 __lib_debug_release()
    56       #define LIB_DEBUG_PRINT_SAFE(...)           __lib_debug_print_safe   (__VA_ARGS__)
    57       #define LIB_DEBUG_PRINT_NOLOCK(...)         __lib_debug_print_nolock (__VA_ARGS__)
    58       #define LIB_DEBUG_PRINT_BUFFER(...)         __lib_debug_print_buffer (__VA_ARGS__)
     61        #define LIB_DEBUG_WRITE( fd, buffer, len )     __lib_debug_write( fd, buffer, len )
     62        #define LIB_DEBUG_ACQUIRE()                    __lib_debug_acquire()
     63        #define LIB_DEBUG_RELEASE()                    __lib_debug_release()
     64        #define LIB_DEBUG_PRINT_SAFE(...)              __lib_debug_print_safe   (__VA_ARGS__)
     65        #define LIB_DEBUG_PRINT_NOLOCK(...)            __lib_debug_print_nolock (__VA_ARGS__)
     66        #define LIB_DEBUG_PRINT_BUFFER(...)            __lib_debug_print_buffer (__VA_ARGS__)
     67        #define LIB_DEBUG_PRINT_BUFFER_DECL(fd, ...)   char text[256]; int len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len );
     68        #define LIB_DEBUG_PRINT_BUFFER_LOCAL(fd, ...)  len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len );
    5969#else
    60       #define LIB_DEBUG_WRITE(...)          ((void)0)
    61       #define LIB_DEBUG_ACQUIRE()           ((void)0)
    62       #define LIB_DEBUG_RELEASE()           ((void)0)
    63       #define LIB_DEBUG_PRINT_SAFE(...)     ((void)0)
    64       #define LIB_DEBUG_PRINT_NOLOCK(...)   ((void)0)
    65       #define LIB_DEBUG_PRINT_BUFFER(...)   ((void)0)
     70        #define LIB_DEBUG_WRITE(...)               ((void)0)
     71        #define LIB_DEBUG_ACQUIRE()                ((void)0)
     72        #define LIB_DEBUG_RELEASE()                ((void)0)
     73        #define LIB_DEBUG_PRINT_SAFE(...)          ((void)0)
     74        #define LIB_DEBUG_PRINT_NOLOCK(...)        ((void)0)
     75        #define LIB_DEBUG_PRINT_BUFFER(...)        ((void)0)
     76        #define LIB_DEBUG_PRINT_BUFFER_DECL(...)   ((void)0)
     77        #define LIB_DEBUG_PRINT_BUFFER_LOCAL(...)  ((void)0)
    6678#endif
    6779
  • src/tests/sched-int-block.c

    r4c03e63 r7bbba76  
    3131//------------------------------------------------------------------------------
    3232void wait_op( global_data_t * mutex a, global_data_t * mutex b, unsigned i ) {
    33         wait( &cond, (uintptr_t)this_thread() );
     33        wait( &cond, (uintptr_t)this_thread );
    3434
    3535        yield( ((unsigned)rand48()) % 10 );
     
    4040        }
    4141
    42         a->last_thread = b->last_thread = this_thread();
     42        a->last_thread = b->last_thread = this_thread;
    4343
    4444        yield( ((unsigned)rand48()) % 10 );
     
    5656        yield( ((unsigned)rand48()) % 10 );
    5757
    58         a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread();
     58        a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread;
    5959
    6060        if( !is_empty( &cond ) ) {
     
    8686//------------------------------------------------------------------------------
    8787void barge_op( global_data_t * mutex a ) {
    88         a->last_thread = this_thread();
     88        a->last_thread = this_thread;
    8989}
    9090
Note: See TracChangeset for help on using the changeset viewer.