Changes in / [1bc5975:673cd63]
- Files:
-
- 16 edited
-
benchmark/ctxswitch/cfa_cor.cfa (modified) (1 diff)
-
benchmark/ctxswitch/cfa_thrd2.cfa (modified) (1 diff)
-
libcfa/src/bits/containers.hfa (modified) (3 diffs)
-
libcfa/src/concurrency/CtxSwitch-i386.S (modified) (4 diffs)
-
libcfa/src/concurrency/CtxSwitch-x86_64.S (modified) (2 diffs)
-
libcfa/src/concurrency/coroutine.cfa (modified) (4 diffs)
-
libcfa/src/concurrency/coroutine.hfa (modified) (2 diffs)
-
libcfa/src/concurrency/invoke.c (modified) (8 diffs)
-
libcfa/src/concurrency/invoke.h (modified) (5 diffs)
-
libcfa/src/concurrency/kernel.cfa (modified) (14 diffs)
-
libcfa/src/concurrency/thread.cfa (modified) (4 diffs)
-
libcfa/src/concurrency/thread.hfa (modified) (1 diff)
-
libcfa/src/time.hfa (modified) (5 diffs)
-
libcfa/src/time_t.hfa (modified) (2 diffs)
-
tests/Makefile.am (modified) (1 diff)
-
tests/Makefile.in (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
benchmark/ctxswitch/cfa_cor.cfa
r1bc5975 r673cd63 11 11 } 12 12 13 void main( GreatSuspender & this ) {13 void main( __attribute__((unused)) GreatSuspender & this ) { 14 14 while( true ) { 15 15 suspend(); -
benchmark/ctxswitch/cfa_thrd2.cfa
r1bc5975 r673cd63 8 8 thread Fibre {}; 9 9 10 void main( Fibre & this) {10 void main(__attribute__((unused)) Fibre & this) { 11 11 while(!done) { 12 12 yield(); -
libcfa/src/bits/containers.hfa
r1bc5975 r673cd63 186 186 187 187 forall(dtype T | is_node(T)) 188 static inline bool ?!=?( __queue(T) & this, zero_t zero ) {188 static inline bool ?!=?( __queue(T) & this, __attribute__((unused)) zero_t zero ) { 189 189 return this.head != 0; 190 190 } … … 196 196 //----------------------------------------------------------------------------- 197 197 #ifdef __cforall 198 forall(dtype TYPE | sized(TYPE))198 forall(dtype TYPE) 199 199 #define T TYPE 200 200 #define __getter_t * [T * & next, T * & prev] ( T & ) … … 268 268 269 269 forall(dtype T | sized(T)) 270 static inline bool ?!=?( __dllist(T) & this, zero_t zero ) {270 static inline bool ?!=?( __dllist(T) & this, __attribute__((unused)) zero_t zero ) { 271 271 return this.head != 0; 272 272 } -
libcfa/src/concurrency/CtxSwitch-i386.S
r1bc5975 r673cd63 41 41 #define PC_OFFSET ( 2 * PTR_BYTE ) 42 42 43 .text43 .text 44 44 .align 2 45 .globl CtxSwitch 45 .globl CtxSwitch 46 .type CtxSwitch, @function 46 47 CtxSwitch: 47 48 … … 50 51 51 52 movl 4(%esp),%eax 52 53 // Save floating & SSE control words on the stack.54 55 sub $8,%esp56 stmxcsr 0(%esp) // 4 bytes57 fnstcw 4(%esp) // 2 bytes58 53 59 54 // Save volatile registers on the stack. … … 67 62 movl %esp,SP_OFFSET(%eax) 68 63 movl %ebp,FP_OFFSET(%eax) 69 // movl 4(%ebp),%ebx // save previous eip for debugger70 // movl %ebx,PC_OFFSET(%eax)71 64 72 65 // Copy the "to" context argument from the stack to register eax … … 87 80 popl %ebx 88 81 89 // Load floating & SSE control words from the stack.90 91 fldcw 4(%esp)92 ldmxcsr 0(%esp)93 add $8,%esp94 95 82 // Return to thread. 96 83 97 84 ret 85 .size CtxSwitch, .-CtxSwitch 98 86 99 87 // Local Variables: // -
libcfa/src/concurrency/CtxSwitch-x86_64.S
r1bc5975 r673cd63 39 39 #define SP_OFFSET ( 0 * PTR_BYTE ) 40 40 #define FP_OFFSET ( 1 * PTR_BYTE ) 41 #define PC_OFFSET ( 2 * PTR_BYTE )42 41 43 .text42 .text 44 43 .align 2 45 .globl CtxSwitch 44 .globl CtxSwitch 45 .type CtxSwitch, @function 46 46 CtxSwitch: 47 48 // Save floating & SSE control words on the stack.49 50 subq $8,%rsp51 stmxcsr 0(%rsp) // 4 bytes52 fnstcw 4(%rsp) // 2 bytes53 47 54 48 // Save volatile registers on the stack. … … 78 72 popq %r15 79 73 80 // Load floating & SSE control words from the stack.81 82 fldcw 4(%rsp)83 ldmxcsr 0(%rsp)84 addq $8,%rsp85 86 74 // Return to thread. 87 75 88 76 ret 89 90 //.text 91 // .align 2 92 //.globl CtxStore 93 //CtxStore: 94 // // Save floating & SSE control words on the stack. 95 // 96 // subq $8,%rsp 97 // stmxcsr 0(%rsp) // 4 bytes 98 // fnstcw 4(%rsp) // 2 bytes 99 // 100 // // Save volatile registers on the stack. 101 // 102 // pushq %r15 103 // pushq %r14 104 // pushq %r13 105 // pushq %r12 106 // pushq %rbx 107 // 108 // // Save old context in the "from" area. 109 // 110 // movq %rsp,SP_OFFSET(%rdi) 111 // movq %rbp,FP_OFFSET(%rdi) 112 // 113 // // Return to thread 114 // 115 // ret 116 // 117 //.text 118 // .align 2 119 //.globl CtxRet 120 //CtxRet: 121 // // Load new context from the "to" area. 122 // 123 // movq SP_OFFSET(%rdi),%rsp 124 // movq FP_OFFSET(%rdi),%rbp 125 // 126 // // Load volatile registers from the stack. 127 // 128 // popq %rbx 129 // popq %r12 130 // popq %r13 131 // popq %r14 132 // popq %r15 133 // 134 // // Load floating & SSE control words from the stack. 135 // 136 // fldcw 4(%rsp) 137 // ldmxcsr 0(%rsp) 138 // addq $8,%rsp 139 // 140 // // Return to thread. 141 // 142 // ret 143 77 .size CtxSwitch, .-CtxSwitch 144 78 145 79 .text -
libcfa/src/concurrency/coroutine.cfa
r1bc5975 r673cd63 35 35 36 36 extern "C" { 37 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));38 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));39 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {40 abort();41 }37 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__)); 38 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__)); 39 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) { 40 abort(); 41 } 42 42 } 43 43 … … 47 47 // minimum feasible stack size in bytes 48 48 #define MinStackSize 1000 49 static size_t pageSize = 0; // architecture pagesize HACK, should go in proper runtime singleton 49 extern size_t __page_size; // architecture pagesize HACK, should go in proper runtime singleton 50 51 void __stack_prepare( __stack_info_t * this, size_t create_size ); 50 52 51 53 //----------------------------------------------------------------------------- 52 54 // Coroutine ctors and dtors 53 void ?{}( coStack_t & this, void * storage, size_t storageSize ) with( this ) { 54 size = storageSize == 0 ? 65000 : storageSize; // size of stack 55 this.storage = storage; // pointer to stack 56 limit = NULL; // stack grows towards stack limit 57 base = NULL; // base of stack 58 context = NULL; // address of cfa_context_t 59 top = NULL; // address of top of storage 60 userStack = storage != NULL; 61 } 62 63 void ^?{}(coStack_t & this) { 64 if ( ! this.userStack && this.storage ) { 65 __cfaabi_dbg_debug_do( 66 if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) { 67 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 68 } 69 ); 70 free( this.storage ); 71 } 55 void ?{}( __stack_info_t & this, void * storage, size_t storageSize ) { 56 this.storage = (__stack_t *)storage; 57 58 // Did we get a piece of storage ? 59 if (this.storage || storageSize != 0) { 60 // We either got a piece of storage or the user asked for a specific size 61 // Immediately create the stack 62 // (This is slightly unintuitive that non-default sized coroutines create are eagerly created 63 // but it avoids that all coroutines carry an unnecessary size) 64 verify( storageSize != 0 ); 65 __stack_prepare( &this, storageSize ); 66 } 67 } 68 69 void ^?{}(__stack_info_t & this) { 70 bool userStack = ((intptr_t)this.storage & 0x1) != 0; 71 if ( ! userStack && this.storage ) { 72 __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage; 73 *istorage &= (intptr_t)-1; 74 75 void * storage = this.storage->limit; 76 __cfaabi_dbg_debug_do( 77 storage = (char*)(storage) - __page_size; 78 if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) { 79 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 80 } 81 ); 82 __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage); 83 free( storage ); 84 } 72 85 } 73 86 74 87 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) { 75 (this.stack){storage, storageSize};76 this.name = name;77 errno_ = 0;78 state = Start;79 starter = NULL;80 last = NULL;81 cancellation = NULL;88 (this.context){NULL, NULL}; 89 (this.stack){storage, storageSize}; 90 this.name = name; 91 state = Start; 92 starter = NULL; 93 last = NULL; 94 cancellation = NULL; 82 95 } 83 96 84 97 void ^?{}(coroutine_desc& this) { 85 if(this.state != Halted && this.state != Start) {86 coroutine_desc * src = TL_GET( this_thread )->curr_cor;87 coroutine_desc * dst = &this;88 89 struct _Unwind_Exception storage;90 storage.exception_class = -1;91 storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;92 this.cancellation = &storage;93 this.last = src;94 95 // not resuming self ?96 if ( src == dst ) {97 abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );98 }99 100 CoroutineCtxSwitch( src, dst );101 }98 if(this.state != Halted && this.state != Start) { 99 coroutine_desc * src = TL_GET( this_thread )->curr_cor; 100 coroutine_desc * dst = &this; 101 102 struct _Unwind_Exception storage; 103 storage.exception_class = -1; 104 storage.exception_cleanup = _CtxCoroutine_UnwindCleanup; 105 this.cancellation = &storage; 106 this.last = src; 107 108 // not resuming self ? 109 if ( src == dst ) { 110 abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src ); 111 } 112 113 CoroutineCtxSwitch( src, dst ); 114 } 102 115 } 103 116 … … 106 119 forall(dtype T | is_coroutine(T)) 107 120 void prime(T& cor) { 108 coroutine_desc* this = get_coroutine(cor); 109 assert(this->state == Start); 110 111 this->state = Primed; 112 resume(cor); 113 } 114 115 // Wrapper for co 116 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 117 // Safety note : Preemption must be disabled since there is a race condition 118 // kernelTLS.this_thread->curr_cor and $rsp/$rbp must agree at all times 119 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 120 disable_interrupts(); 121 122 // set state of current coroutine to inactive 123 src->state = src->state == Halted ? Halted : Inactive; 124 125 // set new coroutine that task is executing 126 TL_GET( this_thread )->curr_cor = dst; 127 128 // context switch to specified coroutine 129 assert( src->stack.context ); 130 CtxSwitch( src->stack.context, dst->stack.context ); 131 // when CtxSwitch returns we are back in the src coroutine 132 133 // set state of new coroutine to active 134 src->state = Active; 135 136 enable_interrupts( __cfaabi_dbg_ctx ); 137 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 138 139 140 if( unlikely(src->cancellation != NULL) ) { 141 _CtxCoroutine_Unwind(src->cancellation, src); 142 } 143 } //ctxSwitchDirect 144 145 void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) { 146 //TEMP HACK do this on proper kernel startup 147 if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE ); 148 149 size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment 150 151 if ( !storage ) { 152 __cfaabi_dbg_print_safe("Kernel : Creating stack of size %zu for stack obj %p\n", cxtSize + size + 8, this); 153 154 userStack = false; 155 size = libCeiling( storageSize, 16 ); 156 // use malloc/memalign because "new" raises an exception for out-of-memory 157 158 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 159 __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) ); 160 __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) ); 161 162 __cfaabi_dbg_debug_do( 163 if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) { 164 abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) ); 165 } // if 166 ); 167 168 if ( (intptr_t)storage == 0 ) { 169 abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size ); 170 } // if 171 172 __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize ); 173 __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment 174 175 } else { 176 __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%u bytes)\n", this, storage, storageSize); 177 178 assertf( ((size_t)storage & (libAlign() - 1)) == 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() ); 179 userStack = true; 180 size = storageSize - cxtSize; 181 182 if ( size % 16 != 0u ) size -= 8; 183 184 limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment 185 } // if 186 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize ); 187 188 base = (char *)limit + size; 189 context = base; 190 top = (char *)context + cxtSize; 121 coroutine_desc* this = get_coroutine(cor); 122 assert(this->state == Start); 123 124 this->state = Primed; 125 resume(cor); 126 } 127 128 [void *, size_t] __stack_alloc( size_t storageSize ) { 129 static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 130 assert(__page_size != 0l); 131 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; 132 133 // If we are running debug, we also need to allocate a guardpage to catch stack overflows. 134 void * storage; 135 __cfaabi_dbg_debug_do( 136 storage = memalign( __page_size, size + __page_size ); 137 ); 138 __cfaabi_dbg_no_debug_do( 139 storage = (void*)malloc(size); 140 ); 141 142 __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size); 143 __cfaabi_dbg_debug_do( 144 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 145 abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 146 } 147 storage = (void *)(((intptr_t)storage) + __page_size); 148 ); 149 150 verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul ); 151 return [storage, size]; 152 } 153 154 void __stack_prepare( __stack_info_t * this, size_t create_size ) { 155 static const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 156 bool userStack; 157 void * storage; 158 size_t size; 159 if ( !this->storage ) { 160 userStack = false; 161 [storage, size] = __stack_alloc( create_size ); 162 } else { 163 userStack = true; 164 __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%zd bytes)\n", this, this->storage, (intptr_t)this->storage->limit - (intptr_t)this->storage->base); 165 166 // The stack must be aligned, advance the pointer to the next align data 167 storage = (void*)libCeiling( (intptr_t)this->storage, libAlign()); 168 169 // The size needs to be shrinked to fit all the extra data structure and be aligned 170 ptrdiff_t diff = (intptr_t)storage - (intptr_t)this->storage; 171 size = libFloor(create_size - stack_data_size - diff, libAlign()); 172 } // if 173 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize ); 174 175 this->storage = (__stack_t *)((intptr_t)storage + size); 176 this->storage->limit = storage; 177 this->storage->base = (void*)((intptr_t)storage + size); 178 __attribute__((may_alias)) intptr_t * istorage = (intptr_t*)&this->storage; 179 *istorage |= userStack ? 0x1 : 0x0; 191 180 } 192 181 … … 194 183 // is not inline (We can't inline Cforall in C) 195 184 extern "C" { 196 void __suspend_internal(void) {197 suspend();198 }199 200 void __leave_coroutine( coroutine_desc * src ) {201 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter;202 203 src->state = Halted;204 205 assertf( starter != 0,206 "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"207 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",208 src->name, src );209 assertf( starter->state != Halted,210 "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"211 "Possible cause is terminated coroutine's main routine has already returned.",212 src->name, src, starter->name, starter );213 214 CoroutineCtxSwitch( src, starter );215 }185 void __suspend_internal(void) { 186 suspend(); 187 } 188 189 void __leave_coroutine( coroutine_desc * src ) { 190 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter; 191 192 src->state = Halted; 193 194 assertf( starter != 0, 195 "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n" 196 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.", 197 src->name, src ); 198 assertf( starter->state != Halted, 199 "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n" 200 "Possible cause is terminated coroutine's main routine has already returned.", 201 src->name, src, starter->name, starter ); 202 203 CoroutineCtxSwitch( src, starter ); 204 } 216 205 } 217 206 -
libcfa/src/concurrency/coroutine.hfa
r1bc5975 r673cd63 64 64 forall(dtype T | is_coroutine(T)) 65 65 void CtxStart(T * this, void ( *invoke)(T *)); 66 67 extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__)); 68 69 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch"); 70 // void CtxStore ( void * this ) asm ("CtxStore"); 71 // void CtxRet ( void * dst ) asm ("CtxRet"); 66 72 } 67 73 68 74 // Private wrappers for context switch and stack creation 69 extern void CoroutineCtxSwitch(coroutine_desc * src, coroutine_desc * dst); 70 extern void create_stack( coStack_t * this, unsigned int storageSize ); 75 // Wrapper for co 76 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 77 // set state of current coroutine to inactive 78 src->state = src->state == Halted ? Halted : Inactive; 79 80 // set new coroutine that task is executing 81 TL_GET( this_thread )->curr_cor = dst; 82 83 // context switch to specified coroutine 84 verify( dst->context.SP ); 85 CtxSwitch( &src->context, &dst->context ); 86 // when CtxSwitch returns we are back in the src coroutine 87 88 // set state of new coroutine to active 89 src->state = Active; 90 91 if( unlikely(src->cancellation != NULL) ) { 92 _CtxCoroutine_Unwind(src->cancellation, src); 93 } 94 } 95 96 extern void __stack_prepare ( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 71 97 72 98 // Suspend implementation inlined for performance … … 102 128 coroutine_desc * dst = get_coroutine(cor); 103 129 104 if( unlikely( !dst->stack.base) ) {105 create_stack(&dst->stack, dst->stack.size);130 if( unlikely(dst->context.SP == NULL) ) { 131 __stack_prepare(&dst->stack, 65000); 106 132 CtxStart(&cor, CtxInvokeCoroutine); 107 133 } -
libcfa/src/concurrency/invoke.c
r1bc5975 r673cd63 29 29 extern void __suspend_internal(void); 30 30 extern void __leave_coroutine( struct coroutine_desc * ); 31 extern void __finish_creation( struct coroutine_desc * );31 extern void __finish_creation( struct thread_desc * ); 32 32 extern void __leave_thread_monitor( struct thread_desc * this ); 33 33 extern void disable_interrupts(); … … 46 46 47 47 cor->state = Active; 48 49 enable_interrupts( __cfaabi_dbg_ctx );50 48 51 49 main( this ); … … 93 91 // First suspend, once the thread arrives here, 94 92 // the function pointer to main can be invalidated without risk 95 __finish_creation(&thrd->self_cor); 96 97 // Restore the last to NULL, we clobbered because of the thunk problem 98 thrd->self_cor.last = NULL; 93 __finish_creation( thrd ); 99 94 100 95 // Officially start the thread by enabling preemption … … 122 117 void (*invoke)(void *) 123 118 ) { 124 struct coStack_t* stack = &get_coroutine( this )->stack; 119 struct coroutine_desc * cor = get_coroutine( this ); 120 struct __stack_t * stack = cor->stack.storage; 125 121 126 122 #if defined( __i386 ) … … 128 124 struct FakeStack { 129 125 void *fixedRegisters[3]; // fixed registers ebx, edi, esi (popped on 1st uSwitch, values unimportant) 130 uint32_t mxcr; // SSE Status and Control bits (control bits are preserved across function calls)131 uint16_t fcw; // X97 FPU control word (preserved across function calls)132 126 void *rturn; // where to go on return from uSwitch 133 127 void *dummyReturn; // fake return compiler would have pushed on call to uInvoke … … 136 130 }; 137 131 138 ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );139 ((struct machine_context_t *)stack->context)->FP = NULL; // terminate stack with NULL fp132 cor->context.SP = (char *)stack->base - sizeof( struct FakeStack ); 133 cor->context.FP = NULL; // terminate stack with NULL fp 140 134 141 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;142 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->argument[0] = this; // argument to invoke 143 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke;144 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520145 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7135 struct FakeStack *fs = (struct FakeStack *)cor->context.SP; 136 137 fs->dummyReturn = NULL; 138 fs->argument[0] = this; // argument to invoke 139 fs->rturn = invoke; 146 140 147 141 #elif defined( __x86_64 ) … … 149 143 struct FakeStack { 150 144 void *fixedRegisters[5]; // fixed registers rbx, r12, r13, r14, r15 151 uint32_t mxcr; // SSE Status and Control bits (control bits are preserved across function calls)152 uint16_t fcw; // X97 FPU control word (preserved across function calls)153 145 void *rturn; // where to go on return from uSwitch 154 146 void *dummyReturn; // NULL return address to provide proper alignment 155 147 }; 156 148 157 ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );158 ((struct machine_context_t *)stack->context)->FP = NULL; // terminate stack with NULL fp149 cor->context.SP = (char *)stack->base - sizeof( struct FakeStack ); 150 cor->context.FP = NULL; // terminate stack with NULL fp 159 151 160 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->dummyReturn = NULL;161 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = CtxInvokeStub; 162 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[0] = this;163 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke;164 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520165 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7152 struct FakeStack *fs = (struct FakeStack *)cor->context.SP; 153 154 fs->dummyReturn = NULL; 155 fs->rturn = CtxInvokeStub; 156 fs->fixedRegisters[0] = this; 157 fs->fixedRegisters[1] = invoke; 166 158 167 159 #elif defined( __ARM_ARCH ) … … 173 165 }; 174 166 175 ((struct machine_context_t *)stack->context)->SP = (char *)stack->base - sizeof( struct FakeStack );176 ((struct machine_context_t *)stack->context)->FP = NULL;167 cor->context.SP = (char *)stack->base - sizeof( struct FakeStack ); 168 cor->context.FP = NULL; 177 169 178 struct FakeStack *fs = (struct FakeStack *) ((struct machine_context_t *)stack->context)->SP;170 struct FakeStack *fs = (struct FakeStack *)cor->context.SP; 179 171 180 172 fs->intRegs[8] = CtxInvokeStub; -
libcfa/src/concurrency/invoke.h
r1bc5975 r673cd63 62 62 #endif 63 63 64 struct coStack_t { 65 size_t size; // size of stack 66 void * storage; // pointer to stack 67 void * limit; // stack grows towards stack limit 68 void * base; // base of stack 69 void * context; // address of cfa_context_t 70 void * top; // address of top of storage 71 bool userStack; // whether or not the user allocated the stack 64 struct __stack_context_t { 65 void * SP; 66 void * FP; 67 }; 68 69 // low adresses : +----------------------+ <- start of allocation 70 // | optional guard page | 71 // +----------------------+ <- __stack_t.limit 72 // | | 73 // | /\ /\ /\ | 74 // | || || || | 75 // | | 76 // | program stack | 77 // | | 78 // __stack_info_t.storage -> +----------------------+ <- __stack_t.base 79 // | __stack_t | 80 // high adresses : +----------------------+ <- end of allocation 81 82 struct __stack_t { 83 // stack grows towards stack limit 84 void * limit; 85 86 // base of stack 87 void * base; 88 }; 89 90 struct __stack_info_t { 91 // pointer to stack 92 struct __stack_t * storage; 72 93 }; 73 94 … … 75 96 76 97 struct coroutine_desc { 98 // context that is switch during a CtxSwitch 99 struct __stack_context_t context; 100 77 101 // stack information of the coroutine 78 struct coStack_t stack;79 80 // textual name for coroutine/task , initialized by uC++ generated code102 struct __stack_info_t stack; 103 104 // textual name for coroutine/task 81 105 const char * name; 82 83 // copy of global UNIX variable errno84 int errno_;85 106 86 107 // current execution status for coroutine 87 108 enum coroutine_state state; 109 88 110 // first coroutine to resume this one 89 111 struct coroutine_desc * starter; … … 139 161 struct thread_desc { 140 162 // Core threading fields 163 // context that is switch during a CtxSwitch 164 struct __stack_context_t context; 165 166 // current execution status for coroutine 167 enum coroutine_state state; 168 169 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it 170 141 171 // coroutine body used to store context 142 172 struct coroutine_desc self_cor; … … 169 199 #ifdef __cforall 170 200 extern "Cforall" { 171 static inline struct coroutine_desc * volatileactive_coroutine() { return TL_GET( this_thread )->curr_cor; }172 static inline struct thread_desc * volatileactive_thread () { return TL_GET( this_thread ); }173 static inline struct processor * volatileactive_processor() { return TL_GET( this_processor ); } // UNSAFE201 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; } 202 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 203 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE 174 204 175 205 static inline thread_desc * & get_next( thread_desc & this ) { … … 230 260 // assembler routines that performs the context switch 231 261 extern void CtxInvokeStub( void ); 232 void CtxSwitch( void * from, void* to ) asm ("CtxSwitch");262 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch"); 233 263 // void CtxStore ( void * this ) asm ("CtxStore"); 234 264 // void CtxRet ( void * dst ) asm ("CtxRet"); 235 236 #if defined( __i386 )237 #define CtxGet( ctx ) __asm__ ( \238 "movl %%esp,%0\n" \239 "movl %%ebp,%1\n" \240 : "=rm" (ctx.SP), "=rm" (ctx.FP) )241 #elif defined( __x86_64 )242 #define CtxGet( ctx ) __asm__ ( \243 "movq %%rsp,%0\n" \244 "movq %%rbp,%1\n" \245 : "=rm" (ctx.SP), "=rm" (ctx.FP) )246 #elif defined( __ARM_ARCH )247 #define CtxGet( ctx ) __asm__ ( \248 "mov %0,%%sp\n" \249 "mov %1,%%r11\n" \250 : "=rm" (ctx.SP), "=rm" (ctx.FP) )251 #else252 #error unknown hardware architecture253 #endif254 265 255 266 #endif //_INVOKE_PRIVATE_H_ -
libcfa/src/concurrency/kernel.cfa
r1bc5975 r673cd63 36 36 #include "invoke.h" 37 37 38 //----------------------------------------------------------------------------- 39 // Some assembly required 40 #if defined( __i386 ) 41 #define CtxGet( ctx ) \ 42 __asm__ volatile ( \ 43 "movl %%esp,%0\n"\ 44 "movl %%ebp,%1\n"\ 45 : "=rm" (ctx.SP),\ 46 "=rm" (ctx.FP) \ 47 ) 48 49 // mxcr : SSE Status and Control bits (control bits are preserved across function calls) 50 // fcw : X87 FPU control word (preserved across function calls) 51 #define __x87_store \ 52 uint32_t __mxcr; \ 53 uint16_t __fcw; \ 54 __asm__ volatile ( \ 55 "stmxcsr %0\n" \ 56 "fnstcw %1\n" \ 57 : "=m" (__mxcr),\ 58 "=m" (__fcw) \ 59 ) 60 61 #define __x87_load \ 62 __asm__ volatile ( \ 63 "fldcw %1\n" \ 64 "ldmxcsr %0\n" \ 65 ::"m" (__mxcr),\ 66 "m" (__fcw) \ 67 ) 68 69 #elif defined( __x86_64 ) 70 #define CtxGet( ctx ) \ 71 __asm__ volatile ( \ 72 "movq %%rsp,%0\n"\ 73 "movq %%rbp,%1\n"\ 74 : "=rm" (ctx.SP),\ 75 "=rm" (ctx.FP) \ 76 ) 77 78 #define __x87_store \ 79 uint32_t __mxcr; \ 80 uint16_t __fcw; \ 81 __asm__ volatile ( \ 82 "stmxcsr %0\n" \ 83 "fnstcw %1\n" \ 84 : "=m" (__mxcr),\ 85 "=m" (__fcw) \ 86 ) 87 88 #define __x87_load \ 89 __asm__ volatile ( \ 90 "fldcw %1\n" \ 91 "ldmxcsr %0\n" \ 92 :: "m" (__mxcr),\ 93 "m" (__fcw) \ 94 ) 95 96 97 #elif defined( __ARM_ARCH ) 98 #define CtxGet( ctx ) __asm__ ( \ 99 "mov %0,%%sp\n" \ 100 "mov %1,%%r11\n" \ 101 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 102 #else 103 #error unknown hardware architecture 104 #endif 105 106 //----------------------------------------------------------------------------- 38 107 //Start and stop routine for the kernel, declared first to make sure they run first 39 108 static void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); … … 42 111 //----------------------------------------------------------------------------- 43 112 // Kernel storage 44 KERNEL_STORAGE(cluster, mainCluster);45 KERNEL_STORAGE(processor, mainProcessor);46 KERNEL_STORAGE(thread_desc, mainThread);47 KERNEL_STORAGE( machine_context_t,mainThreadCtx);113 KERNEL_STORAGE(cluster, mainCluster); 114 KERNEL_STORAGE(processor, mainProcessor); 115 KERNEL_STORAGE(thread_desc, mainThread); 116 KERNEL_STORAGE(__stack_t, mainThreadCtx); 48 117 49 118 cluster * mainCluster; … … 54 123 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 55 124 } 125 126 size_t __page_size = 0; 56 127 57 128 //----------------------------------------------------------------------------- … … 66 137 // Struct to steal stack 67 138 struct current_stack_info_t { 68 machine_context_t ctx; 69 unsigned int size; // size of stack 139 __stack_t * storage; // pointer to stack object 70 140 void *base; // base of stack 71 void *storage; // pointer to stack72 141 void *limit; // stack grows towards stack limit 73 142 void *context; // address of cfa_context_t 74 void *top; // address of top of storage75 143 }; 76 144 77 145 void ?{}( current_stack_info_t & this ) { 78 CtxGet( this.ctx );79 this.base = this.ctx.FP;80 this. storage = this.ctx.SP;146 __stack_context_t ctx; 147 CtxGet( ctx ); 148 this.base = ctx.FP; 81 149 82 150 rlimit r; 83 151 getrlimit( RLIMIT_STACK, &r); 84 this.size = r.rlim_cur;85 86 this.limit = (void *)(((intptr_t)this.base) - this.size);152 size_t size = r.rlim_cur; 153 154 this.limit = (void *)(((intptr_t)this.base) - size); 87 155 this.context = &storage_mainThreadCtx; 88 this.top = this.base;89 156 } 90 157 91 158 //----------------------------------------------------------------------------- 92 159 // Main thread construction 93 void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {94 size = info->size;95 storage = info->storage;96 limit = info->limit;97 base = info->base;98 context = info->context;99 top = info->top;100 userStack = true;101 }102 160 103 161 void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) { 104 stack{ info }; 162 stack.storage = info->storage; 163 with(*stack.storage) { 164 limit = info->limit; 165 base = info->base; 166 } 167 __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage; 168 *istorage |= 0x1; 105 169 name = "Main Thread"; 106 errno_ = 0;107 170 state = Start; 108 171 starter = NULL; 172 last = NULL; 173 cancellation = NULL; 109 174 } 110 175 111 176 void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) { 177 state = Start; 112 178 self_cor{ info }; 113 179 curr_cor = &self_cor; … … 240 306 } 241 307 308 static int * __volatile_errno() __attribute__((noinline)); 309 static int * __volatile_errno() { asm(""); return &errno; } 310 242 311 // KERNEL ONLY 243 312 // runThread runs a thread by context switching 244 313 // from the processor coroutine to the target thread 245 static void runThread(processor * this, thread_desc * dst) { 246 assert(dst->curr_cor); 314 static void runThread(processor * this, thread_desc * thrd_dst) { 247 315 coroutine_desc * proc_cor = get_coroutine(this->runner); 248 coroutine_desc * thrd_cor = dst->curr_cor;249 316 250 317 // Reset the terminating actions here … … 252 319 253 320 // Update global state 254 kernelTLS.this_thread = dst; 255 256 // Context Switch to the thread 257 ThreadCtxSwitch(proc_cor, thrd_cor); 258 // when ThreadCtxSwitch returns we are back in the processor coroutine 321 kernelTLS.this_thread = thrd_dst; 322 323 // set state of processor coroutine to inactive and the thread to active 324 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 325 thrd_dst->state = Active; 326 327 // set context switch to the thread that the processor is executing 328 verify( thrd_dst->context.SP ); 329 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 330 // when CtxSwitch returns we are back in the processor coroutine 331 332 // set state of processor coroutine to active and the thread to inactive 333 thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive; 334 proc_cor->state = Active; 259 335 } 260 336 … … 262 338 static void returnToKernel() { 263 339 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 264 coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor; 265 ThreadCtxSwitch(thrd_cor, proc_cor); 340 thread_desc * thrd_src = kernelTLS.this_thread; 341 342 // set state of current coroutine to inactive 343 thrd_src->state = thrd_src->state == Halted ? Halted : Inactive; 344 proc_cor->state = Active; 345 int local_errno = *__volatile_errno(); 346 #if defined( __i386 ) || defined( __x86_64 ) 347 __x87_store; 348 #endif 349 350 // set new coroutine that the processor is executing 351 // and context switch to it 352 verify( proc_cor->context.SP ); 353 CtxSwitch( &thrd_src->context, &proc_cor->context ); 354 355 // set state of new coroutine to active 356 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 357 thrd_src->state = Active; 358 359 #if defined( __i386 ) || defined( __x86_64 ) 360 __x87_load; 361 #endif 362 *__volatile_errno() = local_errno; 266 363 } 267 364 … … 312 409 // to waste the perfectly valid stack create by pthread. 313 410 current_stack_info_t info; 314 machine_context_t ctx;315 info. context= &ctx;411 __stack_t ctx; 412 info.storage = &ctx; 316 413 (proc->runner){ proc, &info }; 317 414 318 __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack. base);415 __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage); 319 416 320 417 //Set global state … … 347 444 348 445 // KERNEL_ONLY 349 void kernel_first_resume( processor * this) {350 coroutine_desc * src = mainThread->curr_cor;446 void kernel_first_resume( processor * this ) { 447 thread_desc * src = mainThread; 351 448 coroutine_desc * dst = get_coroutine(this->runner); 352 449 353 450 verify( ! kernelTLS.preemption_state.enabled ); 354 451 355 create_stack(&dst->stack, dst->stack.size);452 __stack_prepare( &dst->stack, 65000 ); 356 453 CtxStart(&this->runner, CtxInvokeCoroutine); 357 454 358 455 verify( ! kernelTLS.preemption_state.enabled ); 359 456 360 dst->last = src;361 dst->starter = dst->starter ? dst->starter : src;457 dst->last = &src->self_cor; 458 dst->starter = dst->starter ? dst->starter : &src->self_cor; 362 459 363 460 // set state of current coroutine to inactive 364 461 src->state = src->state == Halted ? Halted : Inactive; 365 462 366 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.367 // Therefore, when first creating a coroutine, interrupts are enable before calling the main.368 // This is consistent with thread creation. However, when creating the main processor coroutine,369 // we wan't interrupts to be disabled. Therefore, we double-disable interrupts here so they will370 // stay disabled.371 disable_interrupts();372 373 463 // context switch to specified coroutine 374 assert( src->stack.context);375 CtxSwitch( src->stack.context, dst->stack.context );464 verify( dst->context.SP ); 465 CtxSwitch( &src->context, &dst->context ); 376 466 // when CtxSwitch returns we are back in the src coroutine 377 467 … … 380 470 381 471 verify( ! kernelTLS.preemption_state.enabled ); 472 } 473 474 // KERNEL_ONLY 475 void kernel_last_resume( processor * this ) { 476 coroutine_desc * src = &mainThread->self_cor; 477 coroutine_desc * dst = get_coroutine(this->runner); 478 479 verify( ! kernelTLS.preemption_state.enabled ); 480 verify( dst->starter == src ); 481 verify( dst->context.SP ); 482 483 // context switch to the processor 484 CtxSwitch( &src->context, &dst->context ); 382 485 } 383 486 … … 388 491 void ScheduleThread( thread_desc * thrd ) { 389 492 verify( thrd ); 390 verify( thrd->s elf_cor.state != Halted );493 verify( thrd->state != Halted ); 391 494 392 495 verify( ! kernelTLS.preemption_state.enabled ); … … 545 648 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 546 649 650 __page_size = sysconf( _SC_PAGESIZE ); 651 547 652 __cfa_dbg_global_clusters.list{ __get }; 548 653 __cfa_dbg_global_clusters.lock{}; … … 559 664 mainThread = (thread_desc *)&storage_mainThread; 560 665 current_stack_info_t info; 666 info.storage = (__stack_t*)&storage_mainThreadCtx; 561 667 (*mainThread){ &info }; 562 668 … … 627 733 // which is currently here 628 734 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 629 returnToKernel();735 kernel_last_resume( kernelTLS.this_processor ); 630 736 mainThread->self_cor.state = Halted; 631 737 -
libcfa/src/concurrency/thread.cfa
r1bc5975 r673cd63 31 31 // Thread ctors and dtors 32 32 void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 33 context{ NULL, NULL }; 33 34 self_cor{ name, storage, storageSize }; 34 verify(&self_cor);35 state = Start; 35 36 curr_cor = &self_cor; 36 37 self_mon.owner = &this; … … 73 74 forall( dtype T | is_thread(T) ) 74 75 void __thrd_start( T& this ) { 75 coroutine_desc* thrd_c = get_coroutine(this); 76 thread_desc * thrd_h = get_thread (this); 77 thrd_c->last = TL_GET( this_thread )->curr_cor; 78 79 // __cfaabi_dbg_print_safe("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 76 thread_desc * this_thrd = get_thread(this); 77 thread_desc * curr_thrd = TL_GET( this_thread ); 80 78 81 79 disable_interrupts(); 82 create_stack(&thrd_c->stack, thrd_c->stack.size);83 80 CtxStart(&this, CtxInvokeThread); 84 assert( thrd_c->last->stack.context ); 85 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 81 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 82 verify( this_thrd->context.SP ); 83 CtxSwitch( &curr_thrd->context, &this_thrd->context ); 86 84 87 ScheduleThread(th rd_h);85 ScheduleThread(this_thrd); 88 86 enable_interrupts( __cfaabi_dbg_ctx ); 89 87 } … … 91 89 extern "C" { 92 90 // KERNEL ONLY 93 void __finish_creation(coroutine_desc * thrd_c) { 94 ThreadCtxSwitch( thrd_c, thrd_c->last ); 91 void __finish_creation(thread_desc * this) { 92 // set new coroutine that the processor is executing 93 // and context switch to it 94 verify( kernelTLS.this_thread != this ); 95 verify( kernelTLS.this_thread->context.SP ); 96 CtxSwitch( &this->context, &kernelTLS.this_thread->context ); 95 97 } 96 98 } … … 110 112 } 111 113 112 // KERNEL ONLY113 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {114 // set state of current coroutine to inactive115 src->state = src->state == Halted ? Halted : Inactive;116 dst->state = Active;117 118 // set new coroutine that the processor is executing119 // and context switch to it120 assert( src->stack.context );121 CtxSwitch( src->stack.context, dst->stack.context );122 123 // set state of new coroutine to active124 dst->state = dst->state == Halted ? Halted : Inactive;125 src->state = Active;126 }127 128 114 // Local Variables: // 129 115 // mode: c // -
libcfa/src/concurrency/thread.hfa
r1bc5975 r673cd63 61 61 void ^?{}(thread_desc & this); 62 62 63 static inline void ?{}(thread_desc & this) { this{ "Anonymous Thread", *mainCluster, NULL, 0 }; }63 static inline void ?{}(thread_desc & this) { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; } 64 64 static inline void ?{}(thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; } 65 65 static inline void ?{}(thread_desc & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 66 static inline void ?{}(thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, NULL, 0 }; }67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0, stackSize }; }66 static inline void ?{}(thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, NULL, 65000 }; } 67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, NULL, stackSize }; } 68 68 static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 69 static inline void ?{}(thread_desc & this, const char * const name) { this{ name, *mainCluster, NULL, 0 }; }70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, NULL, 0 }; }69 static inline void ?{}(thread_desc & this, const char * const name) { this{ name, *mainCluster, NULL, 65000 }; } 70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, NULL, 65000 }; } 71 71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; } 72 72 -
libcfa/src/time.hfa
r1bc5975 r673cd63 30 30 31 31 static inline { 32 Duration ?=?( Duration & dur, zero_t ) { return dur{ 0 }; }32 Duration ?=?( Duration & dur, __attribute__((unused)) zero_t ) { return dur{ 0 }; } 33 33 34 34 Duration +?( Duration rhs ) with( rhs ) { return (Duration)@{ +tv }; } … … 59 59 bool ?>=?( Duration lhs, Duration rhs ) { return lhs.tv >= rhs.tv; } 60 60 61 bool ?==?( Duration lhs, zero_t ) { return lhs.tv == 0; }62 bool ?!=?( Duration lhs, zero_t ) { return lhs.tv != 0; }63 bool ?<? ( Duration lhs, zero_t ) { return lhs.tv < 0; }64 bool ?<=?( Duration lhs, zero_t ) { return lhs.tv <= 0; }65 bool ?>? ( Duration lhs, zero_t ) { return lhs.tv > 0; }66 bool ?>=?( Duration lhs, zero_t ) { return lhs.tv >= 0; }61 bool ?==?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv == 0; } 62 bool ?!=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv != 0; } 63 bool ?<? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv < 0; } 64 bool ?<=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv <= 0; } 65 bool ?>? ( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv > 0; } 66 bool ?>=?( Duration lhs, __attribute__((unused)) zero_t ) { return lhs.tv >= 0; } 67 67 68 68 Duration abs( Duration rhs ) { return rhs.tv >= 0 ? rhs : -rhs; } … … 101 101 void ?{}( timeval & t, time_t sec, suseconds_t usec ) { t.tv_sec = sec; t.tv_usec = usec; } 102 102 void ?{}( timeval & t, time_t sec ) { t{ sec, 0 }; } 103 void ?{}( timeval & t, zero_t ) { t{ 0, 0 }; }104 105 timeval ?=?( timeval & t, zero_t ) { return t{ 0 }; }103 void ?{}( timeval & t, __attribute__((unused)) zero_t ) { t{ 0, 0 }; } 104 105 timeval ?=?( timeval & t, __attribute__((unused)) zero_t ) { return t{ 0 }; } 106 106 timeval ?+?( timeval lhs, timeval rhs ) { return (timeval)@{ lhs.tv_sec + rhs.tv_sec, lhs.tv_usec + rhs.tv_usec }; } 107 107 timeval ?-?( timeval lhs, timeval rhs ) { return (timeval)@{ lhs.tv_sec - rhs.tv_sec, lhs.tv_usec - rhs.tv_usec }; } … … 116 116 void ?{}( timespec & t, time_t sec, __syscall_slong_t nsec ) { t.tv_sec = sec; t.tv_nsec = nsec; } 117 117 void ?{}( timespec & t, time_t sec ) { t{ sec, 0}; } 118 void ?{}( timespec & t, zero_t ) { t{ 0, 0 }; }119 120 timespec ?=?( timespec & t, zero_t ) { return t{ 0 }; }118 void ?{}( timespec & t, __attribute__((unused)) zero_t ) { t{ 0, 0 }; } 119 120 timespec ?=?( timespec & t, __attribute__((unused)) zero_t ) { return t{ 0 }; } 121 121 timespec ?+?( timespec lhs, timespec rhs ) { return (timespec)@{ lhs.tv_sec + rhs.tv_sec, lhs.tv_nsec + rhs.tv_nsec }; } 122 122 timespec ?-?( timespec lhs, timespec rhs ) { return (timespec)@{ lhs.tv_sec - rhs.tv_sec, lhs.tv_nsec - rhs.tv_nsec }; } … … 145 145 void ?{}( Time & time, int year, int month = 0, int day = 0, int hour = 0, int min = 0, int sec = 0, int nsec = 0 ); 146 146 static inline { 147 Time ?=?( Time & time, zero_t ) { return time{ 0 }; }147 Time ?=?( Time & time, __attribute__((unused)) zero_t ) { return time{ 0 }; } 148 148 149 149 void ?{}( Time & time, timeval t ) with( time ) { tv = (int64_t)t.tv_sec * TIMEGRAN + t.tv_usec * 1000; } -
libcfa/src/time_t.hfa
r1bc5975 r673cd63 24 24 25 25 static inline void ?{}( Duration & dur ) with( dur ) { tv = 0; } 26 static inline void ?{}( Duration & dur, zero_t ) with( dur ) { tv = 0; }26 static inline void ?{}( Duration & dur, __attribute__((unused)) zero_t ) with( dur ) { tv = 0; } 27 27 28 28 … … 34 34 35 35 static inline void ?{}( Time & time ) with( time ) { tv = 0; } 36 static inline void ?{}( Time & time, zero_t ) with( time ) { tv = 0; }36 static inline void ?{}( Time & time, __attribute__((unused)) zero_t ) with( time ) { tv = 0; } 37 37 38 38 // Local Variables: // -
tests/Makefile.am
r1bc5975 r673cd63 22 22 debug=yes 23 23 installed=no 24 25 INSTALL_FLAGS=-in-tree 26 DEBUG_FLAGS=-debug -O0 24 27 25 28 quick_test=avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes -
tests/Makefile.in
r1bc5975 r673cd63 375 375 debug = yes 376 376 installed = no 377 INSTALL_FLAGS = -in-tree 378 DEBUG_FLAGS = -debug -O0 377 379 quick_test = avl_test operators numericConstants expression enum array typeof cast raii/dtor-early-exit raii/init_once attributes 378 380 concurrent =
Note:
See TracChangeset
for help on using the changeset viewer.