Changes in / [14cebb7a:36354b1]


Ignore:
Files:
3 added
23 edited

Legend:

Unmodified
Added
Removed
  • benchmark/ctxswitch/cfa_cor.cfa

    r14cebb7a r36354b1  
    1111}
    1212
    13 void main( GreatSuspender & this ) {
     13void main( __attribute__((unused)) GreatSuspender & this ) {
    1414        while( true ) {
    1515                suspend();
  • benchmark/ctxswitch/cfa_thrd2.cfa

    r14cebb7a r36354b1  
    88thread Fibre {};
    99
    10 void main(Fibre & this) {
     10void main(__attribute__((unused)) Fibre & this) {
    1111        while(!done) {
    1212                yield();
  • configure

    r14cebb7a r36354b1  
    637637LIBOBJS
    638638CFA_BACKEND_CC
     639WITH_LIBTCMALLOC_FALSE
     640WITH_LIBTCMALLOC_TRUE
     641WITH_LIBPROFILER_FALSE
     642WITH_LIBPROFILER_TRUE
    639643WITH_LIBFIBRE_FALSE
    640644WITH_LIBFIBRE_TRUE
     
    33983402                "debug") ;;
    33993403                "nolib") ;;
     3404                "profile") ;;
    34003405                *)
    34013406                        >&2 echo "Configuration must be 'debug', 'nodebug' or 'nolib'"
     
    1665716662
    1665816663
     16664{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ProfilingIsEnabledForAllThreads in -lprofiler" >&5
     16665$as_echo_n "checking for ProfilingIsEnabledForAllThreads in -lprofiler... " >&6; }
     16666if ${ac_cv_lib_profiler_ProfilingIsEnabledForAllThreads+:} false; then :
     16667  $as_echo_n "(cached) " >&6
     16668else
     16669  ac_check_lib_save_LIBS=$LIBS
     16670LIBS="-lprofiler  $LIBS"
     16671cat confdefs.h - <<_ACEOF >conftest.$ac_ext
     16672/* end confdefs.h.  */
     16673
     16674/* Override any GCC internal prototype to avoid an error.
     16675   Use char because int might match the return type of a GCC
     16676   builtin and then its argument prototype would still apply.  */
     16677#ifdef __cplusplus
     16678extern "C"
     16679#endif
     16680char ProfilingIsEnabledForAllThreads ();
     16681int
     16682main ()
     16683{
     16684return ProfilingIsEnabledForAllThreads ();
     16685  ;
     16686  return 0;
     16687}
     16688_ACEOF
     16689if ac_fn_c_try_link "$LINENO"; then :
     16690  ac_cv_lib_profiler_ProfilingIsEnabledForAllThreads=yes
     16691else
     16692  ac_cv_lib_profiler_ProfilingIsEnabledForAllThreads=no
     16693fi
     16694rm -f core conftest.err conftest.$ac_objext \
     16695    conftest$ac_exeext conftest.$ac_ext
     16696LIBS=$ac_check_lib_save_LIBS
     16697fi
     16698{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_profiler_ProfilingIsEnabledForAllThreads" >&5
     16699$as_echo "$ac_cv_lib_profiler_ProfilingIsEnabledForAllThreads" >&6; }
     16700if test "x$ac_cv_lib_profiler_ProfilingIsEnabledForAllThreads" = xyes; then :
     16701  HAVE_LIBPROFILER=1
     16702else
     16703  HAVE_LIBPROFILER=0
     16704fi
     16705
     16706 if test "$HAVE_LIBPROFILER" -eq 1; then
     16707  WITH_LIBPROFILER_TRUE=
     16708  WITH_LIBPROFILER_FALSE='#'
     16709else
     16710  WITH_LIBPROFILER_TRUE='#'
     16711  WITH_LIBPROFILER_FALSE=
     16712fi
     16713
     16714
     16715{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for malloc in -ltcmalloc" >&5
     16716$as_echo_n "checking for malloc in -ltcmalloc... " >&6; }
     16717if ${ac_cv_lib_tcmalloc_malloc+:} false; then :
     16718  $as_echo_n "(cached) " >&6
     16719else
     16720  ac_check_lib_save_LIBS=$LIBS
     16721LIBS="-ltcmalloc  $LIBS"
     16722cat confdefs.h - <<_ACEOF >conftest.$ac_ext
     16723/* end confdefs.h.  */
     16724
     16725/* Override any GCC internal prototype to avoid an error.
     16726   Use char because int might match the return type of a GCC
     16727   builtin and then its argument prototype would still apply.  */
     16728#ifdef __cplusplus
     16729extern "C"
     16730#endif
     16731char malloc ();
     16732int
     16733main ()
     16734{
     16735return malloc ();
     16736  ;
     16737  return 0;
     16738}
     16739_ACEOF
     16740if ac_fn_c_try_link "$LINENO"; then :
     16741  ac_cv_lib_tcmalloc_malloc=yes
     16742else
     16743  ac_cv_lib_tcmalloc_malloc=no
     16744fi
     16745rm -f core conftest.err conftest.$ac_objext \
     16746    conftest$ac_exeext conftest.$ac_ext
     16747LIBS=$ac_check_lib_save_LIBS
     16748fi
     16749{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_tcmalloc_malloc" >&5
     16750$as_echo "$ac_cv_lib_tcmalloc_malloc" >&6; }
     16751if test "x$ac_cv_lib_tcmalloc_malloc" = xyes; then :
     16752  HAVE_LIBTCMALLOC=1
     16753else
     16754  HAVE_LIBTCMALLOC=0
     16755fi
     16756
     16757 if test "$HAVE_LIBTCMALLOC" -eq 1; then
     16758  WITH_LIBTCMALLOC_TRUE=
     16759  WITH_LIBTCMALLOC_FALSE='#'
     16760else
     16761  WITH_LIBTCMALLOC_TRUE='#'
     16762  WITH_LIBTCMALLOC_FALSE=
     16763fi
     16764
     16765
    1665916766# Checks for header files.
    1666016767for ac_header in libintl.h malloc.h unistd.h
     
    1688916996if test -z "${WITH_LIBFIBRE_TRUE}" && test -z "${WITH_LIBFIBRE_FALSE}"; then
    1689016997  as_fn_error $? "conditional \"WITH_LIBFIBRE\" was never defined.
     16998Usually this means the macro was only invoked conditionally." "$LINENO" 5
     16999fi
     17000if test -z "${WITH_LIBPROFILER_TRUE}" && test -z "${WITH_LIBPROFILER_FALSE}"; then
     17001  as_fn_error $? "conditional \"WITH_LIBPROFILER\" was never defined.
     17002Usually this means the macro was only invoked conditionally." "$LINENO" 5
     17003fi
     17004if test -z "${WITH_LIBTCMALLOC_TRUE}" && test -z "${WITH_LIBTCMALLOC_FALSE}"; then
     17005  as_fn_error $? "conditional \"WITH_LIBTCMALLOC\" was never defined.
    1689117006Usually this means the macro was only invoked conditionally." "$LINENO" 5
    1689217007fi
  • configure.ac

    r14cebb7a r36354b1  
    139139                "debug") ;;
    140140                "nolib") ;;
     141                "profile") ;;
    141142                *)
    142143                        >&2 echo "Configuration must be 'debug', 'nodebug' or 'nolib'"
     
    190191AM_CONDITIONAL([WITH_LIBFIBRE], [test "$HAVE_LIBFIBRE" -eq 1])
    191192
     193AC_CHECK_LIB([profiler], [ProfilingIsEnabledForAllThreads], [HAVE_LIBPROFILER=1], [HAVE_LIBPROFILER=0])
     194AM_CONDITIONAL([WITH_LIBPROFILER], [test "$HAVE_LIBPROFILER" -eq 1])
     195
     196AC_CHECK_LIB([tcmalloc], [malloc], [HAVE_LIBTCMALLOC=1], [HAVE_LIBTCMALLOC=0])
     197AM_CONDITIONAL([WITH_LIBTCMALLOC], [test "$HAVE_LIBTCMALLOC" -eq 1])
     198
    192199# Checks for header files.
    193200AC_CHECK_HEADERS([libintl.h malloc.h unistd.h], [], [echo "Error: Missing required header"; exit 1])
  • libcfa/configure

    r14cebb7a r36354b1  
    29592959case $CONFIGURATION in
    29602960        "debug"   )
    2961                 CONFIG_CFLAGS="-O0 -g"
     2961                CONFIG_CFLAGS="-Og -g"
    29622962                CONFIG_CFAFLAGS="-debug"
    29632963                CONFIG_BUILDLIB="yes"
    29642964        ;;
    29652965        "nodebug" )
    2966                 CONFIG_CFLAGS="-O2 -s"
     2966                CONFIG_CFLAGS="-O3 -s"
    29672967                CONFIG_CFAFLAGS="-nodebug"
    29682968                CONFIG_BUILDLIB="yes"
    29692969        ;;
    29702970        "nolib"   )
    2971                 CONFIG_CFLAGS="-O2 -s"
     2971                CONFIG_CFLAGS="-O3 -s"
    29722972                CONFIG_CFAFLAGS="-nolib"
    29732973                CONFIG_BUILDLIB="no"
     2974        ;;
     2975        "profile" )
     2976                CONFIG_CFLAGS="-O3 -g -fno-omit-frame-pointer"
     2977                CONFIG_CFAFLAGS="-nodebug"
     2978                CONFIG_BUILDLIB="yes"
    29742979        ;;
    29752980        *)
  • libcfa/configure.ac

    r14cebb7a r36354b1  
    4545case $CONFIGURATION in
    4646        "debug"   )
    47                 CONFIG_CFLAGS="-O0 -g"
     47                CONFIG_CFLAGS="-Og -g"
    4848                CONFIG_CFAFLAGS="-debug"
    4949                CONFIG_BUILDLIB="yes"
    5050        ;;
    5151        "nodebug" )
    52                 CONFIG_CFLAGS="-O2 -s"
     52                CONFIG_CFLAGS="-O3 -s"
    5353                CONFIG_CFAFLAGS="-nodebug"
    5454                CONFIG_BUILDLIB="yes"
    5555        ;;
    5656        "nolib"   )
    57                 CONFIG_CFLAGS="-O2 -s"
     57                CONFIG_CFLAGS="-O3 -s"
    5858                CONFIG_CFAFLAGS="-nolib"
    5959                CONFIG_BUILDLIB="no"
     60        ;;
     61        "profile" )
     62                CONFIG_CFLAGS="-O3 -g -fno-omit-frame-pointer"
     63                CONFIG_CFAFLAGS="-nodebug"
     64                CONFIG_BUILDLIB="yes"
    6065        ;;
    6166        *)
  • libcfa/src/bits/containers.hfa

    r14cebb7a r36354b1  
    186186
    187187        forall(dtype T | is_node(T))
    188         static inline bool ?!=?( __queue(T) & this, zero_t zero ) {
     188        static inline bool ?!=?( __queue(T) & this, __attribute__((unused)) zero_t zero ) {
    189189                return this.head != 0;
    190190        }
     
    196196//-----------------------------------------------------------------------------
    197197#ifdef __cforall
    198         forall(dtype TYPE | sized(TYPE))
     198        forall(dtype TYPE)
    199199        #define T TYPE
    200200        #define __getter_t * [T * & next, T * & prev] ( T & )
     
    268268
    269269        forall(dtype T | sized(T))
    270         static inline bool ?!=?( __dllist(T) & this, zero_t zero ) {
     270        static inline bool ?!=?( __dllist(T) & this, __attribute__((unused)) zero_t zero ) {
    271271                return this.head != 0;
    272272        }
  • libcfa/src/concurrency/CtxSwitch-i386.S

    r14cebb7a r36354b1  
    4141#define PC_OFFSET       ( 2 * PTR_BYTE )
    4242
    43 .text
     43        .text
    4444        .align 2
    45 .globl  CtxSwitch
     45        .globl CtxSwitch
     46        .type  CtxSwitch, @function
    4647CtxSwitch:
    4748
     
    5051
    5152        movl 4(%esp),%eax
    52 
    53         // Save floating & SSE control words on the stack.
    54 
    55         sub    $8,%esp
    56         stmxcsr 0(%esp)         // 4 bytes
    57         fnstcw  4(%esp)         // 2 bytes
    5853
    5954        // Save volatile registers on the stack.
     
    6762        movl %esp,SP_OFFSET(%eax)
    6863        movl %ebp,FP_OFFSET(%eax)
    69 //      movl 4(%ebp),%ebx       // save previous eip for debugger
    70 //      movl %ebx,PC_OFFSET(%eax)
    7164
    7265        // Copy the "to" context argument from the stack to register eax
     
    8780        popl %ebx
    8881
    89         // Load floating & SSE control words from the stack.
    90 
    91         fldcw   4(%esp)
    92         ldmxcsr 0(%esp)
    93         add    $8,%esp
    94 
    9582        // Return to thread.
    9683
    9784        ret
     85        .size  CtxSwitch, .-CtxSwitch
    9886
    9987// Local Variables: //
  • libcfa/src/concurrency/CtxSwitch-x86_64.S

    r14cebb7a r36354b1  
    3939#define SP_OFFSET       ( 0 * PTR_BYTE )
    4040#define FP_OFFSET       ( 1 * PTR_BYTE )
    41 #define PC_OFFSET       ( 2 * PTR_BYTE )
    4241
    43 .text
     42//-----------------------------------------------------------------------------
     43// Regular context switch routine which enables switching from one context to anouther
     44        .text
    4445        .align 2
    45 .globl  CtxSwitch
     46        .globl CtxSwitch
     47        .type  CtxSwitch, @function
    4648CtxSwitch:
    47 
    48         // Save floating & SSE control words on the stack.
    49 
    50         subq   $8,%rsp
    51         stmxcsr 0(%rsp)         // 4 bytes
    52         fnstcw  4(%rsp)         // 2 bytes
    5349
    5450        // Save volatile registers on the stack.
     
    7874        popq %r15
    7975
    80         // Load floating & SSE control words from the stack.
    81 
    82         fldcw   4(%rsp)
    83         ldmxcsr 0(%rsp)
    84         addq   $8,%rsp
    85 
    8676        // Return to thread.
    8777
    8878        ret
     79        .size  CtxSwitch, .-CtxSwitch
    8980
    90 //.text
    91 //      .align 2
    92 //.globl        CtxStore
    93 //CtxStore:
    94 //      // Save floating & SSE control words on the stack.
    95 //
    96 //      subq   $8,%rsp
    97 //      stmxcsr 0(%rsp)         // 4 bytes
    98 //      fnstcw  4(%rsp)         // 2 bytes
    99 //
    100 //      // Save volatile registers on the stack.
    101 //
    102 //      pushq %r15
    103 //      pushq %r14
    104 //      pushq %r13
    105 //      pushq %r12
    106 //      pushq %rbx
    107 //
    108 //      // Save old context in the "from" area.
    109 //
    110 //      movq %rsp,SP_OFFSET(%rdi)
    111 //      movq %rbp,FP_OFFSET(%rdi)
    112 //
    113 //      // Return to thread
    114 //
    115 //      ret
    116 //
    117 //.text
    118 //      .align 2
    119 //.globl        CtxRet
    120 //CtxRet:
    121 //      // Load new context from the "to" area.
    122 //
    123 //      movq SP_OFFSET(%rdi),%rsp
    124 //      movq FP_OFFSET(%rdi),%rbp
    125 //
    126 //      // Load volatile registers from the stack.
    127 //
    128 //      popq %rbx
    129 //      popq %r12
    130 //      popq %r13
    131 //      popq %r14
    132 //      popq %r15
    133 //
    134 //      // Load floating & SSE control words from the stack.
    135 //
    136 //      fldcw   4(%rsp)
    137 //      ldmxcsr 0(%rsp)
    138 //      addq   $8,%rsp
    139 //
    140 //      // Return to thread.
    141 //
    142 //      ret
    143 
    144 
    145 .text
     81//-----------------------------------------------------------------------------
     82// Stub used to create new stacks which are ready to be context switched to
     83        .text
    14684        .align 2
    147 .globl  CtxInvokeStub
     85        .globl CtxInvokeStub
     86        .type    CtxInvokeStub, @function
    14887CtxInvokeStub:
    14988        movq %rbx, %rdi
    15089        jmp *%r12
     90        .size  CtxInvokeStub, .-CtxInvokeStub
    15191
    15292// Local Variables: //
  • libcfa/src/concurrency/coroutine.cfa

    r14cebb7a r36354b1  
    3535
    3636extern "C" {
    37       void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
    38       static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    39       static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
    40             abort();
    41       }
     37        void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     38        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
     39        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     40                abort();
     41        }
     42
     43        extern void CtxRet( struct __stack_context_t * to ) asm ("CtxRet") __attribute__ ((__noreturn__));
    4244}
    4345
     
    4749// minimum feasible stack size in bytes
    4850#define MinStackSize 1000
    49 static size_t pageSize = 0;                             // architecture pagesize HACK, should go in proper runtime singleton
     51extern size_t __page_size;                              // architecture pagesize HACK, should go in proper runtime singleton
     52
     53void __stack_prepare( __stack_info_t * this, size_t create_size );
    5054
    5155//-----------------------------------------------------------------------------
    5256// Coroutine ctors and dtors
    53 void ?{}( coStack_t & this, void * storage, size_t storageSize ) with( this ) {
    54       size               = storageSize == 0 ? 65000 : storageSize; // size of stack
    55       this.storage = storage;                                // pointer to stack
    56       limit              = NULL;                                   // stack grows towards stack limit
    57       base               = NULL;                                   // base of stack
    58       context    = NULL;                                   // address of cfa_context_t
    59       top                = NULL;                                   // address of top of storage
    60       userStack  = storage != NULL;
    61 }
    62 
    63 void ^?{}(coStack_t & this) {
    64       if ( ! this.userStack && this.storage ) {
    65             __cfaabi_dbg_debug_do(
    66                   if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) {
    67                         abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
    68                   }
    69             );
    70             free( this.storage );
    71       }
     57void ?{}( __stack_info_t & this, void * storage, size_t storageSize ) {
     58        this.storage   = (__stack_t *)storage;
     59
     60        // Did we get a piece of storage ?
     61        if (this.storage || storageSize != 0) {
     62                // We either got a piece of storage or the user asked for a specific size
     63                // Immediately create the stack
     64                // (This is slightly unintuitive that non-default sized coroutines create are eagerly created
     65                // but it avoids that all coroutines carry an unnecessary size)
     66                verify( storageSize != 0 );
     67                __stack_prepare( &this, storageSize );
     68        }
     69}
     70
     71void ^?{}(__stack_info_t & this) {
     72        bool userStack = ((intptr_t)this.storage & 0x1) != 0;
     73        if ( ! userStack && this.storage ) {
     74                __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage;
     75                *istorage &= (intptr_t)-1;
     76
     77                void * storage = this.storage->limit;
     78                __cfaabi_dbg_debug_do(
     79                        storage = (char*)(storage) - __page_size;
     80                        if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) {
     81                                abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
     82                        }
     83                );
     84                __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage);
     85                free( storage );
     86        }
    7287}
    7388
    7489void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {
    75       (this.stack){storage, storageSize};
    76       this.name = name;
    77       errno_ = 0;
    78       state = Start;
    79       starter = NULL;
    80       last = NULL;
    81       cancellation = NULL;
     90        (this.context){NULL, NULL};
     91        (this.stack){storage, storageSize};
     92        this.name = name;
     93        state = Start;
     94        starter = NULL;
     95        last = NULL;
     96        cancellation = NULL;
    8297}
    8398
    8499void ^?{}(coroutine_desc& this) {
    85       if(this.state != Halted && this.state != Start) {
    86             coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    87             coroutine_desc * dst = &this;
    88 
    89             struct _Unwind_Exception storage;
    90             storage.exception_class = -1;
    91             storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
    92             this.cancellation = &storage;
    93             this.last = src;
    94 
    95               // not resuming self ?
    96               if ( src == dst ) {
    97                       abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
    98             }
    99 
    100               CoroutineCtxSwitch( src, dst );
    101       }
     100        if(this.state != Halted && this.state != Start) {
     101                coroutine_desc * src = TL_GET( this_thread )->curr_cor;
     102                coroutine_desc * dst = &this;
     103
     104                struct _Unwind_Exception storage;
     105                storage.exception_class = -1;
     106                storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
     107                this.cancellation = &storage;
     108                this.last = src;
     109
     110                // not resuming self ?
     111                if ( src == dst ) {
     112                        abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
     113                }
     114
     115                CoroutineCtxSwitch( src, dst );
     116        }
    102117}
    103118
     
    106121forall(dtype T | is_coroutine(T))
    107122void prime(T& cor) {
    108       coroutine_desc* this = get_coroutine(cor);
    109       assert(this->state == Start);
    110 
    111       this->state = Primed;
    112       resume(cor);
    113 }
    114 
    115 // Wrapper for co
    116 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    117       // Safety note : Preemption must be disabled since there is a race condition
    118       // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times
    119       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    120       disable_interrupts();
    121 
    122       // set state of current coroutine to inactive
    123       src->state = src->state == Halted ? Halted : Inactive;
    124 
    125       // set new coroutine that task is executing
    126       TL_GET( this_thread )->curr_cor = dst;
    127 
    128       // context switch to specified coroutine
    129       assert( src->stack.context );
    130       CtxSwitch( src->stack.context, dst->stack.context );
    131       // when CtxSwitch returns we are back in the src coroutine
    132 
    133       // set state of new coroutine to active
    134       src->state = Active;
    135 
    136       enable_interrupts( __cfaabi_dbg_ctx );
    137       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    138 
    139 
    140       if( unlikely(src->cancellation != NULL) ) {
    141             _CtxCoroutine_Unwind(src->cancellation, src);
    142       }
    143 } //ctxSwitchDirect
    144 
    145 void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) {
    146       //TEMP HACK do this on proper kernel startup
    147       if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
    148 
    149       size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
    150 
    151       if ( !storage ) {
    152             __cfaabi_dbg_print_safe("Kernel : Creating stack of size %zu for stack obj %p\n", cxtSize + size + 8, this);
    153 
    154             userStack = false;
    155             size = libCeiling( storageSize, 16 );
    156             // use malloc/memalign because "new" raises an exception for out-of-memory
    157 
    158             // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
    159             __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) );
    160             __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) );
    161 
    162             __cfaabi_dbg_debug_do(
    163                   if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) {
    164                         abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) );
    165                   } // if
    166             );
    167 
    168             if ( (intptr_t)storage == 0 ) {
    169                   abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size );
    170             } // if
    171 
    172             __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize );
    173             __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment
    174 
    175       } else {
    176             __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%u bytes)\n", this, storage, storageSize);
    177 
    178             assertf( ((size_t)storage & (libAlign() - 1)) == 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() );
    179             userStack = true;
    180             size = storageSize - cxtSize;
    181 
    182             if ( size % 16 != 0u ) size -= 8;
    183 
    184             limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment
    185       } // if
    186       assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
    187 
    188       base = (char *)limit + size;
    189       context = base;
    190       top = (char *)context + cxtSize;
     123        coroutine_desc* this = get_coroutine(cor);
     124        assert(this->state == Start);
     125
     126        this->state = Primed;
     127        resume(cor);
     128}
     129
     130[void *, size_t] __stack_alloc( size_t storageSize ) {
     131        static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
     132        assert(__page_size != 0l);
     133        size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
     134
     135        // If we are running debug, we also need to allocate a guardpage to catch stack overflows.
     136        void * storage;
     137        __cfaabi_dbg_debug_do(
     138                storage = memalign( __page_size, size + __page_size );
     139        );
     140        __cfaabi_dbg_no_debug_do(
     141                storage = (void*)malloc(size);
     142        );
     143
     144        __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size);
     145        __cfaabi_dbg_debug_do(
     146                if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
     147                        abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );
     148                }
     149                storage = (void *)(((intptr_t)storage) + __page_size);
     150        );
     151
     152        verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul );
     153        return [storage, size];
     154}
     155
     156void __stack_prepare( __stack_info_t * this, size_t create_size ) {
     157        static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
     158        bool userStack;
     159        void * storage;
     160        size_t size;
     161        if ( !this->storage ) {
     162                userStack = false;
     163                [storage, size] = __stack_alloc( create_size );
     164        } else {
     165                userStack = true;
     166                __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%zd bytes)\n", this, this->storage, (intptr_t)this->storage->limit - (intptr_t)this->storage->base);
     167
     168                // The stack must be aligned, advance the pointer to the next align data
     169                storage = (void*)libCeiling( (intptr_t)this->storage, libAlign());
     170
     171                // The size needs to be shrinked to fit all the extra data structure and be aligned
     172                ptrdiff_t diff = (intptr_t)storage - (intptr_t)this->storage;
     173                size = libFloor(create_size - stack_data_size - diff, libAlign());
     174        } // if
     175        assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
     176
     177        this->storage = (__stack_t *)((intptr_t)storage + size);
     178        this->storage->limit = storage;
     179        this->storage->base  = (void*)((intptr_t)storage + size);
     180        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*)&this->storage;
     181        *istorage |= userStack ? 0x1 : 0x0;
    191182}
    192183
     
    194185// is not inline (We can't inline Cforall in C)
    195186extern "C" {
    196       void __suspend_internal(void) {
    197             suspend();
    198       }
    199 
    200       void __leave_coroutine( coroutine_desc * src ) {
    201             coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
    202 
    203             src->state = Halted;
    204 
    205             assertf( starter != 0,
    206                   "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
    207                   "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
    208                   src->name, src );
    209             assertf( starter->state != Halted,
    210                   "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
    211                   "Possible cause is terminated coroutine's main routine has already returned.",
    212                   src->name, src, starter->name, starter );
    213 
    214             CoroutineCtxSwitch( src, starter );
    215       }
     187        void __suspend_internal(void) {
     188                suspend();
     189        }
     190
     191        void __leave_coroutine( coroutine_desc * src ) {
     192                coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;
     193
     194                src->state = Halted;
     195
     196                assertf( starter != 0,
     197                        "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
     198                        "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
     199                        src->name, src );
     200                assertf( starter->state != Halted,
     201                        "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
     202                        "Possible cause is terminated coroutine's main routine has already returned.",
     203                        src->name, src, starter->name, starter );
     204
     205                CoroutineCtxSwitch( src, starter );
     206        }
    216207}
    217208
  • libcfa/src/concurrency/coroutine.hfa

    r14cebb7a r36354b1  
    6464      forall(dtype T | is_coroutine(T))
    6565      void CtxStart(T * this, void ( *invoke)(T *));
     66
     67        extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));
     68
     69        extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
    6670}
    6771
    6872// Private wrappers for context switch and stack creation
    69 extern void CoroutineCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
    70 extern void create_stack( coStack_t * this, unsigned int storageSize );
     73// Wrapper for co
     74static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
     75        // set state of current coroutine to inactive
     76        src->state = src->state == Halted ? Halted : Inactive;
     77
     78        // set new coroutine that task is executing
     79        TL_GET( this_thread )->curr_cor = dst;
     80
     81        // context switch to specified coroutine
     82        verify( dst->context.SP );
     83        CtxSwitch( &src->context, &dst->context );
     84        // when CtxSwitch returns we are back in the src coroutine
     85
     86        // set state of new coroutine to active
     87        src->state = Active;
     88
     89        if( unlikely(src->cancellation != NULL) ) {
     90                _CtxCoroutine_Unwind(src->cancellation, src);
     91        }
     92}
     93
     94extern void __stack_prepare   ( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
    7195
    7296// Suspend implementation inlined for performance
     
    102126        coroutine_desc * dst = get_coroutine(cor);
    103127
    104         if( unlikely(!dst->stack.base) ) {
    105                 create_stack(&dst->stack, dst->stack.size);
     128        if( unlikely(dst->context.SP == NULL) ) {
     129                __stack_prepare(&dst->stack, 65000);
    106130                CtxStart(&cor, CtxInvokeCoroutine);
    107131        }
     
    146170}
    147171
    148 
    149 
    150 // static inline bool suspend_checkpoint(void) {
    151 //      // optimization : read TLS once and reuse it
    152 //      // Safety note: this is preemption safe since if
    153 //      // preemption occurs after this line, the pointer
    154 //      // will also migrate which means this value will
    155 //      // stay in syn with the TLS
    156 //      // set state of current coroutine to inactive
    157 //       this->state = Checkpoint;
    158 
    159 //       // context switch to specified coroutine
    160 //       assert( src->stack.context );
    161 
    162 //       CtxStore(src->stack.context);
    163 
    164 //      bool ret = this->state == Checkpoint;
    165 
    166 //       // set state of new coroutine to active
    167 //       src->state = Active;
    168 
    169 //       enable_interrupts( __cfaabi_dbg_ctx );
    170 //       // Safety note : This could cause some false positives due to preemption
    171 //       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    172 
    173 //       if( unlikely(src->cancellation != NULL) ) {
    174 //             _CtxCoroutine_Unwind(src->cancellation);
    175 //       }
    176 
    177 //      return ret;
    178 // }
    179 
    180 // static inline void suspend_return(void) {
    181 //      // optimization : read TLS once and reuse it
    182 //      // Safety note: this is preemption safe since if
    183 //      // preemption occurs after this line, the pointer
    184 //      // will also migrate which means this value will
    185 //      // stay in syn with the TLS
    186 //      coroutine_desc * src = TL_GET( this_thread )->curr_cor;
    187 
    188 //      assertf( src->last != 0,
    189 //              "Attempt to suspend coroutine \"%.256s\" (%p) that has never been resumed.\n"
    190 //              "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
    191 //              src->name, src );
    192 //      assertf( src->last->state != Halted,
    193 //              "Attempt by coroutine \"%.256s\" (%p) to suspend back to terminated coroutine \"%.256s\" (%p).\n"
    194 //              "Possible cause is terminated coroutine's main routine has already returned.",
    195 //              src->name, src, src->last->name, src->last );
    196 
    197 //      // Safety note : Preemption must be disabled here since kernelTLS.this_coroutine must always be up to date
    198 //       verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    199 //       disable_interrupts();
    200 
    201 //       // set state of current coroutine to inactive
    202 //       src->state = src->state == Halted ? Halted : Inactive;
    203 
    204 //       // set new coroutine that task is executing
    205 //       kernelTLS.this_coroutine = dst;
    206 
    207 //       // context switch to specified coroutine
    208 //       assert( src->stack.context );
    209 //      CtxRet( src->stack.context );
    210 
    211 //      abort();
    212 // }
    213 
    214172// Local Variables: //
    215173// mode: c //
  • libcfa/src/concurrency/invoke.c

    r14cebb7a r36354b1  
    2929extern void __suspend_internal(void);
    3030extern void __leave_coroutine( struct coroutine_desc * );
    31 extern void __finish_creation( struct coroutine_desc * );
     31extern void __finish_creation( struct thread_desc * );
    3232extern void __leave_thread_monitor( struct thread_desc * this );
    3333extern void disable_interrupts();
     
    4646
    4747        cor->state = Active;
    48 
    49         enable_interrupts( __cfaabi_dbg_ctx );
    5048
    5149        main( this );
     
    9391        // First suspend, once the thread arrives here,
    9492        // the function pointer to main can be invalidated without risk
    95         __finish_creation(&thrd->self_cor);
    96 
    97         // Restore the last to NULL, we clobbered because of the thunk problem
    98         thrd->self_cor.last = NULL;
     93        __finish_creation( thrd );
    9994
    10095        // Officially start the thread by enabling preemption
     
    122117        void (*invoke)(void *)
    123118) {
    124         struct coStack_t* stack = &get_coroutine( this )->stack;
     119        struct coroutine_desc * cor = get_coroutine( this );
     120        struct __stack_t * stack = cor->stack.storage;
    125121
    126122#if defined( __i386 )
     
    128124        struct FakeStack {
    129125            void *fixedRegisters[3];                    // fixed registers ebx, edi, esi (popped on 1st uSwitch, values unimportant)
    130             uint32_t mxcr;                        // SSE Status and Control bits (control bits are preserved across function calls)
    131             uint16_t fcw;                         // X97 FPU control word (preserved across function calls)
    132126            void *rturn;                          // where to go on return from uSwitch
    133127            void *dummyReturn;                          // fake return compiler would have pushed on call to uInvoke
     
    136130        };
    137131
    138         ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
    139         ((struct machine_context_t *)stack->context)->FP = NULL;                // terminate stack with NULL fp
     132        cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
     133        cor->context.FP = NULL;         // terminate stack with NULL fp
    140134
    141         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;
    142         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->argument[0] = this;     // argument to invoke
    143         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke;
    144         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    145         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
     135        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
     136
     137        fs->dummyReturn = NULL;
     138        fs->argument[0] = this;     // argument to invoke
     139        fs->rturn = invoke;
    146140
    147141#elif defined( __x86_64 )
     
    149143        struct FakeStack {
    150144                void *fixedRegisters[5];            // fixed registers rbx, r12, r13, r14, r15
    151                 uint32_t mxcr;                      // SSE Status and Control bits (control bits are preserved across function calls)
    152                 uint16_t fcw;                       // X97 FPU control word (preserved across function calls)
    153145                void *rturn;                        // where to go on return from uSwitch
    154146                void *dummyReturn;                  // NULL return address to provide proper alignment
    155147        };
    156148
    157         ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
    158         ((struct machine_context_t *)stack->context)->FP = NULL;                // terminate stack with NULL fp
     149        cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
     150        cor->context.FP = NULL;         // terminate stack with NULL fp
    159151
    160         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;
    161         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = CtxInvokeStub;
    162         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[0] = this;
    163         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke;
    164         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    165         ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
     152        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
     153
     154        fs->dummyReturn = NULL;
     155        fs->rturn = CtxInvokeStub;
     156        fs->fixedRegisters[0] = this;
     157        fs->fixedRegisters[1] = invoke;
    166158
    167159#elif defined( __ARM_ARCH )
     
    173165        };
    174166
    175         ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );
    176         ((struct machine_context_t *)stack->context)->FP = NULL;
     167        cor->context.SP = (char *)stack->base - sizeof( struct FakeStack );
     168        cor->context.FP = NULL;
    177169
    178         struct FakeStack *fs = (struct FakeStack *)((struct machine_context_t *)stack->context)->SP;
     170        struct FakeStack *fs = (struct FakeStack *)cor->context.SP;
    179171
    180172        fs->intRegs[8] = CtxInvokeStub;
  • libcfa/src/concurrency/invoke.h

    r14cebb7a r36354b1  
    6262        #endif
    6363
    64         struct coStack_t {
    65                 size_t size;                                                                    // size of stack
    66                 void * storage;                                                                 // pointer to stack
    67                 void * limit;                                                                   // stack grows towards stack limit
    68                 void * base;                                                                    // base of stack
    69                 void * context;                                                                 // address of cfa_context_t
    70                 void * top;                                                                             // address of top of storage
    71                 bool userStack;                                                                 // whether or not the user allocated the stack
     64        struct __stack_context_t {
     65                void * SP;
     66                void * FP;
     67        };
     68
     69        // low adresses  :           +----------------------+ <- start of allocation
     70        //                           |  optional guard page |
     71        //                           +----------------------+ <- __stack_t.limit
     72        //                           |                      |
     73        //                           |       /\ /\ /\       |
     74        //                           |       || || ||       |
     75        //                           |                      |
     76        //                           |    program  stack    |
     77        //                           |                      |
     78        // __stack_info_t.storage -> +----------------------+ <- __stack_t.base
     79        //                           |      __stack_t       |
     80        // high adresses :           +----------------------+ <- end of allocation
     81
     82        struct __stack_t {
     83                // stack grows towards stack limit
     84                void * limit;
     85
     86                // base of stack
     87                void * base;
     88        };
     89
     90        struct __stack_info_t {
     91                // pointer to stack
     92                struct __stack_t * storage;
    7293        };
    7394
     
    7596
    7697        struct coroutine_desc {
     98                // context that is switch during a CtxSwitch
     99                struct __stack_context_t context;
     100
    77101                // stack information of the coroutine
    78                 struct coStack_t stack;
    79 
    80                 // textual name for coroutine/task, initialized by uC++ generated code
     102                struct __stack_info_t stack;
     103
     104                // textual name for coroutine/task
    81105                const char * name;
    82 
    83                 // copy of global UNIX variable errno
    84                 int errno_;
    85106
    86107                // current execution status for coroutine
    87108                enum coroutine_state state;
     109
    88110                // first coroutine to resume this one
    89111                struct coroutine_desc * starter;
     
    139161        struct thread_desc {
    140162                // Core threading fields
     163                // context that is switch during a CtxSwitch
     164                struct __stack_context_t context;
     165
     166                // current execution status for coroutine
     167                enum coroutine_state state;
     168
     169                //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
     170
    141171                // coroutine body used to store context
    142172                struct coroutine_desc  self_cor;
     
    169199        #ifdef __cforall
    170200        extern "Cforall" {
    171                 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_thread )->curr_cor; }
    172                 static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
    173                 static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
     201                static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; }
     202                static inline struct thread_desc    * active_thread   () { return TL_GET( this_thread    ); }
     203                static inline struct processor      * active_processor() { return TL_GET( this_processor ); } // UNSAFE
    174204
    175205                static inline thread_desc * & get_next( thread_desc & this ) {
     
    230260        // assembler routines that performs the context switch
    231261        extern void CtxInvokeStub( void );
    232         void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
     262        extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
    233263        // void CtxStore ( void * this ) asm ("CtxStore");
    234264        // void CtxRet   ( void * dst  ) asm ("CtxRet");
    235 
    236         #if   defined( __i386 )
    237         #define CtxGet( ctx ) __asm__ ( \
    238                         "movl %%esp,%0\n"   \
    239                         "movl %%ebp,%1\n"   \
    240                 : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    241         #elif defined( __x86_64 )
    242         #define CtxGet( ctx ) __asm__ ( \
    243                         "movq %%rsp,%0\n"   \
    244                         "movq %%rbp,%1\n"   \
    245                 : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    246         #elif defined( __ARM_ARCH )
    247         #define CtxGet( ctx ) __asm__ ( \
    248                         "mov %0,%%sp\n"   \
    249                         "mov %1,%%r11\n"   \
    250                 : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    251         #else
    252                 #error unknown hardware architecture
    253         #endif
    254265
    255266#endif //_INVOKE_PRIVATE_H_
  • libcfa/src/concurrency/kernel.cfa

    r14cebb7a r36354b1  
    3636#include "invoke.h"
    3737
     38//-----------------------------------------------------------------------------
     39// Some assembly required
     40#if   defined( __i386 )
     41        #define CtxGet( ctx )        \
     42                __asm__ volatile (     \
     43                        "movl %%esp,%0\n"\
     44                        "movl %%ebp,%1\n"\
     45                        : "=rm" (ctx.SP),\
     46                                "=rm" (ctx.FP) \
     47                )
     48
     49        // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
     50        // fcw  : X87 FPU control word (preserved across function calls)
     51        #define __x87_store         \
     52                uint32_t __mxcr;      \
     53                uint16_t __fcw;       \
     54                __asm__ volatile (    \
     55                        "stmxcsr %0\n"  \
     56                        "fnstcw  %1\n"  \
     57                        : "=m" (__mxcr),\
     58                                "=m" (__fcw)  \
     59                )
     60
     61        #define __x87_load         \
     62                __asm__ volatile (   \
     63                        "fldcw  %1\n"  \
     64                        "ldmxcsr %0\n" \
     65                        ::"m" (__mxcr),\
     66                                "m" (__fcw)  \
     67                )
     68
     69#elif defined( __x86_64 )
     70        #define CtxGet( ctx )        \
     71                __asm__ volatile (     \
     72                        "movq %%rsp,%0\n"\
     73                        "movq %%rbp,%1\n"\
     74                        : "=rm" (ctx.SP),\
     75                                "=rm" (ctx.FP) \
     76                )
     77
     78        #define __x87_store         \
     79                uint32_t __mxcr;      \
     80                uint16_t __fcw;       \
     81                __asm__ volatile (    \
     82                        "stmxcsr %0\n"  \
     83                        "fnstcw  %1\n"  \
     84                        : "=m" (__mxcr),\
     85                                "=m" (__fcw)  \
     86                )
     87
     88        #define __x87_load          \
     89                __asm__ volatile (    \
     90                        "fldcw  %1\n"   \
     91                        "ldmxcsr %0\n"  \
     92                        :: "m" (__mxcr),\
     93                                "m" (__fcw)  \
     94                )
     95
     96
     97#elif defined( __ARM_ARCH )
     98#define CtxGet( ctx ) __asm__ ( \
     99                "mov %0,%%sp\n"   \
     100                "mov %1,%%r11\n"   \
     101        : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     102#else
     103        #error unknown hardware architecture
     104#endif
     105
     106//-----------------------------------------------------------------------------
    38107//Start and stop routine for the kernel, declared first to make sure they run first
    39108static void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     
    42111//-----------------------------------------------------------------------------
    43112// Kernel storage
    44 KERNEL_STORAGE(cluster,           mainCluster);
    45 KERNEL_STORAGE(processor,         mainProcessor);
    46 KERNEL_STORAGE(thread_desc,       mainThread);
    47 KERNEL_STORAGE(machine_context_t, mainThreadCtx);
     113KERNEL_STORAGE(cluster,         mainCluster);
     114KERNEL_STORAGE(processor,       mainProcessor);
     115KERNEL_STORAGE(thread_desc,     mainThread);
     116KERNEL_STORAGE(__stack_t,       mainThreadCtx);
    48117
    49118cluster     * mainCluster;
     
    54123struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
    55124}
     125
     126size_t __page_size = 0;
    56127
    57128//-----------------------------------------------------------------------------
     
    66137// Struct to steal stack
    67138struct current_stack_info_t {
    68         machine_context_t ctx;
    69         unsigned int size;              // size of stack
     139        __stack_t * storage;            // pointer to stack object
    70140        void *base;                             // base of stack
    71         void *storage;                  // pointer to stack
    72141        void *limit;                    // stack grows towards stack limit
    73142        void *context;                  // address of cfa_context_t
    74         void *top;                              // address of top of storage
    75143};
    76144
    77145void ?{}( current_stack_info_t & this ) {
    78         CtxGet( this.ctx );
    79         this.base = this.ctx.FP;
    80         this.storage = this.ctx.SP;
     146        __stack_context_t ctx;
     147        CtxGet( ctx );
     148        this.base = ctx.FP;
    81149
    82150        rlimit r;
    83151        getrlimit( RLIMIT_STACK, &r);
    84         this.size = r.rlim_cur;
    85 
    86         this.limit = (void *)(((intptr_t)this.base) - this.size);
     152        size_t size = r.rlim_cur;
     153
     154        this.limit = (void *)(((intptr_t)this.base) - size);
    87155        this.context = &storage_mainThreadCtx;
    88         this.top = this.base;
    89156}
    90157
    91158//-----------------------------------------------------------------------------
    92159// Main thread construction
    93 void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
    94         size      = info->size;
    95         storage   = info->storage;
    96         limit     = info->limit;
    97         base      = info->base;
    98         context   = info->context;
    99         top       = info->top;
    100         userStack = true;
    101 }
    102160
    103161void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
    104         stack{ info };
     162        stack.storage = info->storage;
     163        with(*stack.storage) {
     164                limit     = info->limit;
     165                base      = info->base;
     166        }
     167        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
     168        *istorage |= 0x1;
    105169        name = "Main Thread";
    106         errno_ = 0;
    107170        state = Start;
    108171        starter = NULL;
     172        last = NULL;
     173        cancellation = NULL;
    109174}
    110175
    111176void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
     177        state = Start;
    112178        self_cor{ info };
    113179        curr_cor = &self_cor;
     
    240306}
    241307
     308static int * __volatile_errno() __attribute__((noinline));
     309static int * __volatile_errno() { asm(""); return &errno; }
     310
    242311// KERNEL ONLY
    243312// runThread runs a thread by context switching
    244313// from the processor coroutine to the target thread
    245 static void runThread(processor * this, thread_desc * dst) {
    246         assert(dst->curr_cor);
     314static void runThread(processor * this, thread_desc * thrd_dst) {
    247315        coroutine_desc * proc_cor = get_coroutine(this->runner);
    248         coroutine_desc * thrd_cor = dst->curr_cor;
    249316
    250317        // Reset the terminating actions here
     
    252319
    253320        // Update global state
    254         kernelTLS.this_thread = dst;
    255 
    256         // Context Switch to the thread
    257         ThreadCtxSwitch(proc_cor, thrd_cor);
    258         // when ThreadCtxSwitch returns we are back in the processor coroutine
     321        kernelTLS.this_thread = thrd_dst;
     322
     323        // set state of processor coroutine to inactive and the thread to active
     324        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
     325        thrd_dst->state = Active;
     326
     327        // set context switch to the thread that the processor is executing
     328        verify( thrd_dst->context.SP );
     329        CtxSwitch( &proc_cor->context, &thrd_dst->context );
     330        // when CtxSwitch returns we are back in the processor coroutine
     331
     332        // set state of processor coroutine to active and the thread to inactive
     333        thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive;
     334        proc_cor->state = Active;
    259335}
    260336
     
    262338static void returnToKernel() {
    263339        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    264         coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor;
    265         ThreadCtxSwitch(thrd_cor, proc_cor);
     340        thread_desc * thrd_src = kernelTLS.this_thread;
     341
     342        // set state of current coroutine to inactive
     343        thrd_src->state = thrd_src->state == Halted ? Halted : Inactive;
     344        proc_cor->state = Active;
     345        int local_errno = *__volatile_errno();
     346        #if defined( __i386 ) || defined( __x86_64 )
     347                __x87_store;
     348        #endif
     349
     350        // set new coroutine that the processor is executing
     351        // and context switch to it
     352        verify( proc_cor->context.SP );
     353        CtxSwitch( &thrd_src->context, &proc_cor->context );
     354
     355        // set state of new coroutine to active
     356        proc_cor->state = proc_cor->state == Halted ? Halted : Inactive;
     357        thrd_src->state = Active;
     358
     359        #if defined( __i386 ) || defined( __x86_64 )
     360                __x87_load;
     361        #endif
     362        *__volatile_errno() = local_errno;
    266363}
    267364
     
    312409        // to waste the perfectly valid stack create by pthread.
    313410        current_stack_info_t info;
    314         machine_context_t ctx;
    315         info.context = &ctx;
     411        __stack_t ctx;
     412        info.storage = &ctx;
    316413        (proc->runner){ proc, &info };
    317414
    318         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.base);
     415        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
    319416
    320417        //Set global state
     
    347444
    348445// KERNEL_ONLY
    349 void kernel_first_resume(processor * this) {
    350         coroutine_desc * src = mainThread->curr_cor;
     446void kernel_first_resume( processor * this ) {
     447        thread_desc * src = mainThread;
    351448        coroutine_desc * dst = get_coroutine(this->runner);
    352449
    353450        verify( ! kernelTLS.preemption_state.enabled );
    354451
    355         create_stack(&dst->stack, dst->stack.size);
     452        __stack_prepare( &dst->stack, 65000 );
    356453        CtxStart(&this->runner, CtxInvokeCoroutine);
    357454
    358455        verify( ! kernelTLS.preemption_state.enabled );
    359456
    360         dst->last = src;
    361         dst->starter = dst->starter ? dst->starter : src;
     457        dst->last = &src->self_cor;
     458        dst->starter = dst->starter ? dst->starter : &src->self_cor;
    362459
    363460        // set state of current coroutine to inactive
    364461        src->state = src->state == Halted ? Halted : Inactive;
    365462
    366         // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
    367         // Therefore, when first creating a coroutine, interrupts are enable before calling the main.
    368         // This is consistent with thread creation. However, when creating the main processor coroutine,
    369         // we wan't interrupts to be disabled. Therefore, we double-disable interrupts here so they will
    370         // stay disabled.
    371         disable_interrupts();
    372 
    373463        // context switch to specified coroutine
    374         assert( src->stack.context );
    375         CtxSwitch( src->stack.context, dst->stack.context );
     464        verify( dst->context.SP );
     465        CtxSwitch( &src->context, &dst->context );
    376466        // when CtxSwitch returns we are back in the src coroutine
    377467
     
    380470
    381471        verify( ! kernelTLS.preemption_state.enabled );
     472}
     473
     474// KERNEL_ONLY
     475void kernel_last_resume( processor * this ) {
     476        coroutine_desc * src = &mainThread->self_cor;
     477        coroutine_desc * dst = get_coroutine(this->runner);
     478
     479        verify( ! kernelTLS.preemption_state.enabled );
     480        verify( dst->starter == src );
     481        verify( dst->context.SP );
     482
     483        // context switch to the processor
     484        CtxSwitch( &src->context, &dst->context );
    382485}
    383486
     
    388491void ScheduleThread( thread_desc * thrd ) {
    389492        verify( thrd );
    390         verify( thrd->self_cor.state != Halted );
     493        verify( thrd->state != Halted );
    391494
    392495        verify( ! kernelTLS.preemption_state.enabled );
     
    545648        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    546649
     650        __page_size = sysconf( _SC_PAGESIZE );
     651
    547652        __cfa_dbg_global_clusters.list{ __get };
    548653        __cfa_dbg_global_clusters.lock{};
     
    559664        mainThread = (thread_desc *)&storage_mainThread;
    560665        current_stack_info_t info;
     666        info.storage = (__stack_t*)&storage_mainThreadCtx;
    561667        (*mainThread){ &info };
    562668
     
    627733        // which is currently here
    628734        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    629         returnToKernel();
     735        kernel_last_resume( kernelTLS.this_processor );
    630736        mainThread->self_cor.state = Halted;
    631737
  • libcfa/src/concurrency/thread.cfa

    r14cebb7a r36354b1  
    3131// Thread ctors and dtors
    3232void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
     33        context{ NULL, NULL };
    3334        self_cor{ name, storage, storageSize };
    34         verify(&self_cor);
     35        state = Start;
    3536        curr_cor = &self_cor;
    3637        self_mon.owner = &this;
     
    7374forall( dtype T | is_thread(T) )
    7475void __thrd_start( T& this ) {
    75         coroutine_desc* thrd_c = get_coroutine(this);
    76         thread_desc   * thrd_h = get_thread   (this);
    77         thrd_c->last = TL_GET( this_thread )->curr_cor;
    78 
    79         // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
     76        thread_desc * this_thrd = get_thread(this);
     77        thread_desc * curr_thrd = TL_GET( this_thread );
    8078
    8179        disable_interrupts();
    82         create_stack(&thrd_c->stack, thrd_c->stack.size);
    8380        CtxStart(&this, CtxInvokeThread);
    84         assert( thrd_c->last->stack.context );
    85         CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context );
     81        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
     82        verify( this_thrd->context.SP );
     83        CtxSwitch( &curr_thrd->context, &this_thrd->context );
    8684
    87         ScheduleThread(thrd_h);
     85        ScheduleThread(this_thrd);
    8886        enable_interrupts( __cfaabi_dbg_ctx );
    8987}
     
    9189extern "C" {
    9290        // KERNEL ONLY
    93         void __finish_creation(coroutine_desc * thrd_c) {
    94                 ThreadCtxSwitch( thrd_c, thrd_c->last );
     91        void __finish_creation(thread_desc * this) {
     92                // set new coroutine that the processor is executing
     93                // and context switch to it
     94                verify( kernelTLS.this_thread != this );
     95                verify( kernelTLS.this_thread->context.SP );
     96                CtxSwitch( &this->context, &kernelTLS.this_thread->context );
    9597        }
    9698}
     
    110112}
    111113
    112 // KERNEL ONLY
    113 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    114         // set state of current coroutine to inactive
    115         src->state = src->state == Halted ? Halted : Inactive;
    116         dst->state = Active;
    117 
    118         // set new coroutine that the processor is executing
    119         // and context switch to it
    120         assert( src->stack.context );
    121         CtxSwitch( src->stack.context, dst->stack.context );
    122 
    123         // set state of new coroutine to active
    124         dst->state = dst->state == Halted ? Halted : Inactive;
    125         src->state = Active;
    126 }
    127 
    128114// Local Variables: //
    129115// mode: c //
  • libcfa/src/concurrency/thread.hfa

    r14cebb7a r36354b1  
    6161void ^?{}(thread_desc & this);
    6262
    63 static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 0 }; }
     63static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }
    6464static inline void ?{}(thread_desc & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }
    6565static inline void ?{}(thread_desc & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
    66 static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 0 }; }
    67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0, stackSize }; }
     66static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 65000 }; }
     67static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, NULL, stackSize }; }
    6868static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
    69 static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 0 }; }
    70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 0 }; }
     69static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 65000 }; }
     70static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 65000 }; }
    7171static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }
    7272
  • libcfa/src/time.hfa

    r14cebb7a r36354b1  
    3030
    3131static inline {
    32         Duration ?=?( Duration & dur, zero_t ) { return dur{ 0 }; }
     32        Duration ?=?( Duration & dur, __attribute__((unused)) zero_t ) { return dur{ 0 }; }
    3333
    3434        Duration +?( Duration rhs ) with( rhs ) {       return (Duration)@{ +tv }; }
     
    5959        bool ?>=?( Duration lhs, Duration rhs ) { return lhs.tv >= rhs.tv; }
    6060
    61         bool ?==?( Duration lhs, zero_t ) { return lhs.tv == 0; }
    62         bool ?!=?( Duration lhs, zero_t ) { return lhs.tv != 0; }
    63         bool ?<? ( Duration lhs, zero_t ) { return lhs.tv <  0; }
    64         bool ?<=?( Duration lhs, zero_t ) { return lhs.tv <= 0; }
    65         bool ?>? ( Duration lhs, zero_t ) { return lhs.tv >  0; }
    66         bool ?>=?( Duration lhs, zero_t ) { return lhs.tv >= 0; }
     61        bool ?==?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv == 0; }
     62        bool ?!=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv != 0; }
     63        bool ?<? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv <  0; }
     64        bool ?<=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv <= 0; }
     65        bool ?>? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv >  0; }
     66        bool ?>=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv >= 0; }
    6767
    6868        Duration abs( Duration rhs ) { return rhs.tv >= 0 ? rhs : -rhs; }
     
    101101        void ?{}( timeval & t, time_t sec, suseconds_t usec ) { t.tv_sec = sec; t.tv_usec = usec; }
    102102        void ?{}( timeval & t, time_t sec ) { t{ sec, 0 }; }
    103         void ?{}( timeval & t, zero_t ) { t{ 0, 0 }; }
    104 
    105         timeval ?=?( timeval & t, zero_t ) { return t{ 0 }; }
     103        void ?{}( timeval & t, __attribute__((unused)) zero_t ) { t{ 0, 0 }; }
     104
     105        timeval ?=?( timeval & t, __attribute__((unused)) zero_t ) { return t{ 0 }; }
    106106        timeval ?+?( timeval lhs, timeval rhs ) { return (timeval)@{ lhs.tv_sec + rhs.tv_sec, lhs.tv_usec + rhs.tv_usec }; }
    107107        timeval ?-?( timeval lhs, timeval rhs ) { return (timeval)@{ lhs.tv_sec - rhs.tv_sec, lhs.tv_usec - rhs.tv_usec }; }
     
    116116        void ?{}( timespec & t, time_t sec, __syscall_slong_t nsec ) { t.tv_sec = sec; t.tv_nsec = nsec; }
    117117        void ?{}( timespec & t, time_t sec ) { t{ sec, 0}; }
    118         void ?{}( timespec & t, zero_t ) { t{ 0, 0 }; }
    119 
    120         timespec ?=?( timespec & t, zero_t ) { return t{ 0 }; }
     118        void ?{}( timespec & t, __attribute__((unused)) zero_t ) { t{ 0, 0 }; }
     119
     120        timespec ?=?( timespec & t, __attribute__((unused)) zero_t ) { return t{ 0 }; }
    121121        timespec ?+?( timespec lhs, timespec rhs ) { return (timespec)@{ lhs.tv_sec + rhs.tv_sec, lhs.tv_nsec + rhs.tv_nsec }; }
    122122        timespec ?-?( timespec lhs, timespec rhs ) { return (timespec)@{ lhs.tv_sec - rhs.tv_sec, lhs.tv_nsec - rhs.tv_nsec }; }
     
    145145void ?{}( Time & time, int year, int month = 0, int day = 0, int hour = 0, int min = 0, int sec = 0, int nsec = 0 );
    146146static inline {
    147         Time ?=?( Time & time, zero_t ) { return time{ 0 }; }
     147        Time ?=?( Time & time, __attribute__((unused)) zero_t ) { return time{ 0 }; }
    148148
    149149        void ?{}( Time & time, timeval t ) with( time ) { tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * 1000; }
  • libcfa/src/time_t.hfa

    r14cebb7a r36354b1  
    2424
    2525static inline void ?{}( Duration & dur ) with( dur ) { tv = 0; }
    26 static inline void ?{}( Duration & dur, zero_t ) with( dur ) { tv = 0; }
     26static inline void ?{}( Duration & dur, __attribute__((unused)) zero_t ) with( dur ) { tv = 0; }
    2727
    2828
     
    3434
    3535static inline void ?{}( Time & time ) with( time ) { tv = 0; }
    36 static inline void ?{}( Time & time, zero_t ) with( time ) { tv = 0; }
     36static inline void ?{}( Time & time, __attribute__((unused)) zero_t ) with( time ) { tv = 0; }
    3737
    3838// Local Variables: //
  • src/Common/Stats/Heap.cc

    r14cebb7a r36354b1  
    2121#include <iostream>
    2222
    23 #if defined( NO_STATISTICS )
     23#if defined( NO_STATISTICS ) || defined( TCMALLOC )
    2424        #define NO_HEAP_STATISTICS
    2525#endif
  • src/Makefile.am

    r14cebb7a r36354b1  
    2828MOSTLYCLEANFILES =
    2929
     30if WITH_LIBPROFILER
     31LIBPROFILER = -lprofiler
     32endif
     33
     34if WITH_LIBTCMALLOC
     35LIBTCMALLOC = -ltcmalloc
     36TCMALLOCFLAG = -DTCMALLOC
     37endif
     38
    3039include CodeGen/module.mk
    3140include CodeTools/module.mk
     
    5463cfa_cpplib_PROGRAMS = ../driver/cfa-cpp demangler
    5564___driver_cfa_cpp_SOURCES = $(SRC)
    56 ___driver_cfa_cpp_LDADD = -ldl                  # yywrap
     65___driver_cfa_cpp_LDADD = -ldl $(LIBPROFILER) $(LIBTCMALLOC)
    5766
    58 AM_CXXFLAGS = @HOST_FLAGS@ -Wno-deprecated -Wall -Wextra -DDEBUG_ALL -I./Parser -I$(srcdir)/Parser -I$(srcdir)/include -DYY_NO_INPUT -O2 -g -std=c++14
     67AM_CXXFLAGS = @HOST_FLAGS@ -Wno-deprecated -Wall -Wextra -DDEBUG_ALL -I./Parser -I$(srcdir)/Parser -I$(srcdir)/include -DYY_NO_INPUT -O3 -g -std=c++14 $(TCMALLOCFLAG)
    5968AM_LDFLAGS  = @HOST_FLAGS@ -Xlinker -export-dynamic
    6069ARFLAGS     = cr
  • src/Makefile.in

    r14cebb7a r36354b1  
    258258am____driver_cfa_cpp_OBJECTS = $(am__objects_8)
    259259___driver_cfa_cpp_OBJECTS = $(am____driver_cfa_cpp_OBJECTS)
    260 ___driver_cfa_cpp_DEPENDENCIES =
     260am__DEPENDENCIES_1 =
     261___driver_cfa_cpp_DEPENDENCIES = $(am__DEPENDENCIES_1) \
     262        $(am__DEPENDENCIES_1)
    261263AM_V_lt = $(am__v_lt_@AM_V@)
    262264am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
     
    554556MOSTLYCLEANFILES = Parser/lex.cc Parser/parser.cc Parser/parser.hh \
    555557        Parser/parser.output
     558@WITH_LIBPROFILER_TRUE@LIBPROFILER = -lprofiler
     559@WITH_LIBTCMALLOC_TRUE@LIBTCMALLOC = -ltcmalloc
     560@WITH_LIBTCMALLOC_TRUE@TCMALLOCFLAG = -DTCMALLOC
    556561SRC_CODEGEN = \
    557562        CodeGen/CodeGenerator.cc \
     
    650655cfa_cpplibdir = $(CFA_LIBDIR)
    651656___driver_cfa_cpp_SOURCES = $(SRC)
    652 ___driver_cfa_cpp_LDADD = -ldl                  # yywrap
    653 AM_CXXFLAGS = @HOST_FLAGS@ -Wno-deprecated -Wall -Wextra -DDEBUG_ALL -I./Parser -I$(srcdir)/Parser -I$(srcdir)/include -DYY_NO_INPUT -O2 -g -std=c++14
     657___driver_cfa_cpp_LDADD = -ldl $(LIBPROFILER) $(LIBTCMALLOC)
     658AM_CXXFLAGS = @HOST_FLAGS@ -Wno-deprecated -Wall -Wextra -DDEBUG_ALL -I./Parser -I$(srcdir)/Parser -I$(srcdir)/include -DYY_NO_INPUT -O3 -g -std=c++14 $(TCMALLOCFLAG)
    654659AM_LDFLAGS = @HOST_FLAGS@ -Xlinker -export-dynamic
    655660ARFLAGS = cr
  • tests/Makefile.am

    r14cebb7a r36354b1  
    2222debug=yes
    2323installed=no
     24
     25INSTALL_FLAGS=-in-tree
     26DEBUG_FLAGS=-debug -O0
    2427
    2528quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes
  • tests/Makefile.in

    r14cebb7a r36354b1  
    375375debug = yes
    376376installed = no
     377INSTALL_FLAGS = -in-tree
     378DEBUG_FLAGS = -debug -O0
    377379quick_test = avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes
    378380concurrent =
Note: See TracChangeset for help on using the changeset viewer.