Changeset 9a1e509 for src/libcfa
- Timestamp:
- Jul 13, 2017, 3:57:23 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- bf30ab3
- Parents:
- 1d776fd (diff), 3d4b23fa (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- src/libcfa
- Files:
-
- 33 edited
-
Makefile.am (modified) (1 diff)
-
Makefile.in (modified) (3 diffs)
-
concurrency/CtxSwitch-i386.S (modified) (1 diff)
-
concurrency/CtxSwitch-x86_64.S (modified) (3 diffs)
-
concurrency/alarm.c (modified) (7 diffs)
-
concurrency/coroutine (modified) (4 diffs)
-
concurrency/coroutine.c (modified) (5 diffs)
-
concurrency/invoke.c (modified) (5 diffs)
-
concurrency/invoke.h (modified) (4 diffs)
-
concurrency/kernel (modified) (4 diffs)
-
concurrency/kernel.c (modified) (38 diffs)
-
concurrency/kernel_private.h (modified) (3 diffs)
-
concurrency/monitor (modified) (1 diff)
-
concurrency/monitor.c (modified) (23 diffs)
-
concurrency/preemption.c (modified) (7 diffs)
-
concurrency/thread (modified) (1 diff)
-
concurrency/thread.c (modified) (4 diffs)
-
exception.c (modified) (5 diffs)
-
exception.h (modified) (2 diffs)
-
fstream (modified) (3 diffs)
-
fstream.c (modified) (6 diffs)
-
gmp (modified) (1 diff)
-
iostream (modified) (4 diffs)
-
iostream.c (modified) (6 diffs)
-
iterator (modified) (2 diffs)
-
iterator.c (modified) (2 diffs)
-
libhdr/libalign.h (modified) (3 diffs)
-
libhdr/libdebug.h (modified) (2 diffs)
-
limits (modified) (2 diffs)
-
math (modified) (2 diffs)
-
rational (modified) (3 diffs)
-
rational.c (modified) (18 diffs)
-
stdlib (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/Makefile.am
r1d776fd r9a1e509 17 17 # create object files in directory with source files 18 18 AUTOMAKE_OPTIONS = subdir-objects 19 ARFLAGS = cr 19 20 20 21 libdir = ${CFA_LIBDIR} -
src/libcfa/Makefile.in
r1d776fd r9a1e509 142 142 LIBRARIES = $(lib_LIBRARIES) 143 143 AR = ar 144 ARFLAGS = cru145 144 AM_V_AR = $(am__v_AR_@AM_V@) 146 145 am__v_AR_ = $(am__v_AR_@AM_DEFAULT_V@) … … 409 408 # create object files in directory with source files 410 409 AUTOMAKE_OPTIONS = subdir-objects 410 ARFLAGS = cr 411 411 lib_LIBRARIES = $(am__append_1) $(am__append_2) 412 412 EXTRA_FLAGS = -g -Wall -Werror -Wno-unused-function -I${abs_top_srcdir}/src/libcfa/libhdr -imacros libcfa-prelude.c @CFA_FLAGS@ … … 439 439 esac; \ 440 440 done; \ 441 echo ' cd $(top_srcdir) && $(AUTOMAKE) -- gnusrc/libcfa/Makefile'; \441 echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/libcfa/Makefile'; \ 442 442 $(am__cd) $(top_srcdir) && \ 443 $(AUTOMAKE) -- gnusrc/libcfa/Makefile443 $(AUTOMAKE) --foreign src/libcfa/Makefile 444 444 Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status 445 445 @case '$?' in \ -
src/libcfa/concurrency/CtxSwitch-i386.S
r1d776fd r9a1e509 98 98 ret 99 99 100 .text101 .align 2102 .globl CtxGet103 CtxGet:104 movl %esp,SP_OFFSET(%eax)105 movl %ebp,FP_OFFSET(%eax)106 107 ret108 109 100 // Local Variables: // 110 101 // compile-command: "make install" // -
src/libcfa/concurrency/CtxSwitch-x86_64.S
r1d776fd r9a1e509 1 // -*- Mode: Asm -*- 1 // -*- Mode: Asm -*- 2 2 // 3 3 // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo … … 18 18 // Free Software Foundation; either version 2.1 of the License, or (at your 19 19 // option) any later version. 20 // 20 // 21 21 // This library is distributed in the hope that it will be useful, but WITHOUT 22 22 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 23 23 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License 24 24 // for more details. 25 // 25 // 26 26 // You should have received a copy of the GNU Lesser General Public License 27 27 // along with this library. 28 // 28 // 29 29 30 30 // This context switch routine depends on the fact that the stack of a new … … 93 93 .globl CtxInvokeStub 94 94 CtxInvokeStub: 95 movq %rbx, %rdi 95 movq %rbx, %rdi 96 96 jmp *%r12 97 98 .text99 .align 2100 .globl CtxGet101 CtxGet:102 movq %rsp,SP_OFFSET(%rdi)103 movq %rbp,FP_OFFSET(%rdi)104 105 ret106 97 107 98 // Local Variables: // -
src/libcfa/concurrency/alarm.c
r1d776fd r9a1e509 16 16 17 17 extern "C" { 18 #include <errno.h> 19 #include <stdio.h> 20 #include <string.h> 18 21 #include <time.h> 22 #include <unistd.h> 19 23 #include <sys/time.h> 20 24 } 25 26 #include "libhdr.h" 21 27 22 28 #include "alarm.h" … … 31 37 timespec curr; 32 38 clock_gettime( CLOCK_REALTIME, &curr ); 33 return ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 39 __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 40 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time ); 41 return curr_time; 34 42 } 35 43 36 44 void __kernel_set_timer( __cfa_time_t alarm ) { 45 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %lu\n", (__cfa_time_t)alarm ); 37 46 itimerval val; 38 47 val.it_value.tv_sec = alarm / TIMEGRAN; // seconds … … 71 80 } 72 81 82 LIB_DEBUG_DO( bool validate( alarm_list_t * this ) { 83 alarm_node_t ** it = &this->head; 84 while( (*it) ) { 85 it = &(*it)->next; 86 } 87 88 return it == this->tail; 89 }) 90 73 91 static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) { 74 assert( !n->next );92 verify( !n->next ); 75 93 if( p == this->tail ) { 76 94 this->tail = &n->next; … … 80 98 } 81 99 *p = n; 100 101 verify( validate( this ) ); 82 102 } 83 103 … … 89 109 90 110 insert_at( this, n, it ); 111 112 verify( validate( this ) ); 91 113 } 92 114 … … 100 122 head->next = NULL; 101 123 } 124 verify( validate( this ) ); 102 125 return head; 103 126 } … … 105 128 static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) { 106 129 verify( it ); 107 verify( (*it) ->next== n );130 verify( (*it) == n ); 108 131 109 (*it) ->next= n->next;132 (*it) = n->next; 110 133 if( !n-> next ) { 111 134 this->tail = it; 112 135 } 113 136 n->next = NULL; 137 138 verify( validate( this ) ); 114 139 } 115 140 116 141 static inline void remove( alarm_list_t * this, alarm_node_t * n ) { 117 142 alarm_node_t ** it = &this->head; 118 while( (*it) && (*it) ->next!= n ) {143 while( (*it) && (*it) != n ) { 119 144 it = &(*it)->next; 120 145 } 121 146 147 verify( validate( this ) ); 148 122 149 if( *it ) { remove_at( this, n, it ); } 150 151 verify( validate( this ) ); 123 152 } 124 153 125 154 void register_self( alarm_node_t * this ) { 126 155 disable_interrupts(); 127 assert( !systemProcessor->pending_alarm );128 lock( &systemProcessor->alarm_lock );156 verify( !systemProcessor->pending_alarm ); 157 lock( &systemProcessor->alarm_lock DEBUG_CTX2 ); 129 158 { 159 verify( validate( &systemProcessor->alarms ) ); 160 bool first = !systemProcessor->alarms.head; 161 130 162 insert( &systemProcessor->alarms, this ); 131 163 if( systemProcessor->pending_alarm ) { 132 164 tick_preemption(); 133 165 } 166 if( first ) { 167 __kernel_set_timer( systemProcessor->alarms.head->alarm - __kernel_get_time() ); 168 } 134 169 } 135 170 unlock( &systemProcessor->alarm_lock ); 136 171 this->set = true; 137 enable_interrupts( );172 enable_interrupts( DEBUG_CTX ); 138 173 } 139 174 140 175 void unregister_self( alarm_node_t * this ) { 176 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : unregister %p start\n", this ); 141 177 disable_interrupts(); 142 lock( &systemProcessor->alarm_lock ); 143 remove( &systemProcessor->alarms, this ); 178 lock( &systemProcessor->alarm_lock DEBUG_CTX2 ); 179 { 180 verify( validate( &systemProcessor->alarms ) ); 181 remove( &systemProcessor->alarms, this ); 182 } 144 183 unlock( &systemProcessor->alarm_lock ); 145 disable_interrupts();184 enable_interrupts( DEBUG_CTX ); 146 185 this->set = false; 186 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Kernel : unregister %p end\n", this ); 147 187 } -
src/libcfa/concurrency/coroutine
r1d776fd r9a1e509 63 63 64 64 // Get current coroutine 65 coroutine_desc * this_coroutine(void);65 extern volatile thread_local coroutine_desc * this_coroutine; 66 66 67 67 // Private wrappers for context switch and stack creation … … 71 71 // Suspend implementation inlined for performance 72 72 static inline void suspend() { 73 coroutine_desc * src = this_coroutine (); // optimization73 coroutine_desc * src = this_coroutine; // optimization 74 74 75 75 assertf( src->last != 0, … … 88 88 forall(dtype T | is_coroutine(T)) 89 89 static inline void resume(T * cor) { 90 coroutine_desc * src = this_coroutine (); // optimization90 coroutine_desc * src = this_coroutine; // optimization 91 91 coroutine_desc * dst = get_coroutine(cor); 92 92 … … 112 112 113 113 static inline void resume(coroutine_desc * dst) { 114 coroutine_desc * src = this_coroutine (); // optimization114 coroutine_desc * src = this_coroutine; // optimization 115 115 116 116 // not resuming self ? -
src/libcfa/concurrency/coroutine.c
r1d776fd r9a1e509 32 32 #include "invoke.h" 33 33 34 extern thread_local processor * this_processor;34 extern volatile thread_local processor * this_processor; 35 35 36 36 //----------------------------------------------------------------------------- … … 44 44 // Coroutine ctors and dtors 45 45 void ?{}(coStack_t* this) { 46 this->size = 10240; // size of stack46 this->size = 65000; // size of stack 47 47 this->storage = NULL; // pointer to stack 48 48 this->limit = NULL; // stack grows towards stack limit … … 50 50 this->context = NULL; // address of cfa_context_t 51 51 this->top = NULL; // address of top of storage 52 this->userStack = false; 52 this->userStack = false; 53 53 } 54 54 … … 106 106 107 107 // set state of current coroutine to inactive 108 src->state = Inactive;108 src->state = src->state == Halted ? Halted : Inactive; 109 109 110 110 // set new coroutine that task is executing 111 this_ processor->current_coroutine = dst;111 this_coroutine = dst; 112 112 113 113 // context switch to specified coroutine 114 assert( src->stack.context ); 114 115 CtxSwitch( src->stack.context, dst->stack.context ); 115 // when CtxSwitch returns we are back in the src coroutine 116 // when CtxSwitch returns we are back in the src coroutine 116 117 117 118 // set state of new coroutine to active … … 131 132 this->size = libCeiling( storageSize, 16 ); 132 133 // use malloc/memalign because "new" raises an exception for out-of-memory 133 134 134 135 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 135 136 LIB_DEBUG_DO( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) ); -
src/libcfa/concurrency/invoke.c
r1d776fd r9a1e509 29 29 30 30 extern void __suspend_internal(void); 31 extern void __leave_monitor_desc( struct monitor_desc * this ); 31 extern void __leave_thread_monitor( struct thread_desc * this ); 32 extern void disable_interrupts(); 33 extern void enable_interrupts( DEBUG_CTX_PARAM ); 32 34 33 35 void CtxInvokeCoroutine( 34 void (*main)(void *), 35 struct coroutine_desc *(*get_coroutine)(void *), 36 void (*main)(void *), 37 struct coroutine_desc *(*get_coroutine)(void *), 36 38 void *this 37 39 ) { … … 56 58 57 59 void CtxInvokeThread( 58 void (*dtor)(void *), 59 void (*main)(void *), 60 struct thread_desc *(*get_thread)(void *), 60 void (*dtor)(void *), 61 void (*main)(void *), 62 struct thread_desc *(*get_thread)(void *), 61 63 void *this 62 64 ) { 65 // First suspend, once the thread arrives here, 66 // the function pointer to main can be invalidated without risk 63 67 __suspend_internal(); 64 68 69 // Fetch the thread handle from the user defined thread structure 65 70 struct thread_desc* thrd = get_thread( this ); 66 struct coroutine_desc* cor = &thrd->cor;67 struct monitor_desc* mon = &thrd->mon;68 cor->state = Active;69 71 70 // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this); 72 // Officially start the thread by enabling preemption 73 enable_interrupts( DEBUG_CTX ); 74 75 // Call the main of the thread 71 76 main( this ); 72 77 73 __leave_monitor_desc( mon ); 74 78 // To exit a thread we must : 79 // 1 - Mark it as halted 80 // 2 - Leave its monitor 81 // 3 - Disable the interupts 82 // 4 - Final suspend 83 // The order of these 4 operations is very important 75 84 //Final suspend, should never return 76 __ suspend_internal();85 __leave_thread_monitor( thrd ); 77 86 abortf("Resumed dead thread"); 78 87 } … … 80 89 81 90 void CtxStart( 82 void (*main)(void *), 83 struct coroutine_desc *(*get_coroutine)(void *), 84 void *this, 91 void (*main)(void *), 92 struct coroutine_desc *(*get_coroutine)(void *), 93 void *this, 85 94 void (*invoke)(void *) 86 95 ) { … … 108 117 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke; 109 118 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 110 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 119 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 111 120 112 121 #elif defined( __x86_64__ ) … … 128 137 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke; 129 138 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 130 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 139 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 131 140 #else 132 141 #error Only __i386__ and __x86_64__ is supported for threads in cfa -
src/libcfa/concurrency/invoke.h
r1d776fd r9a1e509 31 31 struct spinlock { 32 32 volatile int lock; 33 #ifdef __CFA_DEBUG__ 34 const char * prev_name; 35 void* prev_thrd; 36 #endif 33 37 }; 34 38 … … 83 87 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 84 88 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 85 struct monitor_desc * stack_owner; // if bulk acquiring was used we need to synchronize signals with an other monitor86 89 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 87 90 }; … … 99 102 #ifndef _INVOKE_PRIVATE_H_ 100 103 #define _INVOKE_PRIVATE_H_ 101 104 102 105 struct machine_context_t { 103 106 void *SP; … … 109 112 extern void CtxInvokeStub( void ); 110 113 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 111 void CtxGet( void * this ) asm ("CtxGet"); 114 115 #if defined( __x86_64__ ) 116 #define CtxGet( ctx ) __asm__ ( \ 117 "movq %%rsp,%0\n" \ 118 "movq %%rbp,%1\n" \ 119 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 120 #elif defined( __i386__ ) 121 #define CtxGet( ctx ) __asm__ ( \ 122 "movl %%esp,%0\n" \ 123 "movl %%ebp,%1\n" \ 124 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 125 #endif 112 126 113 127 #endif //_INVOKE_PRIVATE_H_ -
src/libcfa/concurrency/kernel
r1d776fd r9a1e509 28 28 //----------------------------------------------------------------------------- 29 29 // Locks 30 bool try_lock( spinlock * ); 31 void lock( spinlock * ); 32 void unlock( spinlock * ); 30 bool try_lock ( spinlock * DEBUG_CTX_PARAM2 ); 31 void lock ( spinlock * DEBUG_CTX_PARAM2 ); 32 void lock_yield( spinlock * DEBUG_CTX_PARAM2 ); 33 void unlock ( spinlock * ); 33 34 34 struct s ignal_once {35 volatile bool cond;36 struct spinlock lock;37 struct __thread_queue_t blocked;35 struct semaphore { 36 spinlock lock; 37 int count; 38 __thread_queue_t waiting; 38 39 }; 39 40 40 void ?{}(signal_once * this); 41 void ^?{}(signal_once * this); 41 void ?{}(semaphore * this, int count = 1); 42 void ^?{}(semaphore * this); 43 void P(semaphore * this); 44 void V(semaphore * this); 42 45 43 void wait( signal_once * );44 void signal( signal_once * );45 46 46 47 //----------------------------------------------------------------------------- … … 68 69 unsigned short thrd_count; 69 70 }; 70 static inline void ?{}(FinishAction * this) { 71 static inline void ?{}(FinishAction * this) { 71 72 this->action_code = No_Action; 72 73 this->thrd = NULL; … … 78 79 struct processorCtx_t * runner; 79 80 cluster * cltr; 80 coroutine_desc * current_coroutine;81 thread_desc * current_thread;82 81 pthread_t kernel_thread; 83 84 s ignal_once terminated;82 83 semaphore terminated; 85 84 volatile bool is_terminated; 86 85 … … 90 89 unsigned int preemption; 91 90 92 unsigned short disable_preempt_count;91 bool pending_preemption; 93 92 94 bool pending_preemption;93 char * last_enable; 95 94 }; 96 95 -
src/libcfa/concurrency/kernel.c
r1d776fd r9a1e509 15 15 // 16 16 17 #include "startup.h" 18 19 //Start and stop routine for the kernel, declared first to make sure they run first 20 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 21 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 22 23 //Header 24 #include "kernel_private.h" 17 #include "libhdr.h" 25 18 26 19 //C Includes … … 35 28 36 29 //CFA Includes 37 #include " libhdr.h"30 #include "kernel_private.h" 38 31 #include "preemption.h" 32 #include "startup.h" 39 33 40 34 //Private includes … … 42 36 #include "invoke.h" 43 37 38 //Start and stop routine for the kernel, declared first to make sure they run first 39 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 40 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 41 44 42 //----------------------------------------------------------------------------- 45 43 // Kernel storage 46 #define KERNEL_STORAGE(T,X) static char X## _storage[sizeof(T)]44 #define KERNEL_STORAGE(T,X) static char X##Storage[sizeof(T)] 47 45 48 46 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx); … … 50 48 KERNEL_STORAGE(system_proc_t, systemProcessor); 51 49 KERNEL_STORAGE(thread_desc, mainThread); 52 KERNEL_STORAGE(machine_context_t, mainThread _context);50 KERNEL_STORAGE(machine_context_t, mainThreadCtx); 53 51 54 52 cluster * systemCluster; … … 59 57 // Global state 60 58 61 thread_local processor * this_processor; 62 63 coroutine_desc * this_coroutine(void) { 64 return this_processor->current_coroutine; 65 } 66 67 thread_desc * this_thread(void) { 68 return this_processor->current_thread; 69 } 59 volatile thread_local processor * this_processor; 60 volatile thread_local coroutine_desc * this_coroutine; 61 volatile thread_local thread_desc * this_thread; 62 volatile thread_local bool preemption_in_progress = 0; 63 volatile thread_local unsigned short disable_preempt_count = 1; 70 64 71 65 //----------------------------------------------------------------------------- 72 66 // Main thread construction 73 67 struct current_stack_info_t { 74 machine_context_t ctx; 68 machine_context_t ctx; 75 69 unsigned int size; // size of stack 76 70 void *base; // base of stack … … 82 76 83 77 void ?{}( current_stack_info_t * this ) { 84 CtxGet( &this->ctx );78 CtxGet( this->ctx ); 85 79 this->base = this->ctx.FP; 86 80 this->storage = this->ctx.SP; … … 91 85 92 86 this->limit = (void *)(((intptr_t)this->base) - this->size); 93 this->context = &mainThread _context_storage;87 this->context = &mainThreadCtxStorage; 94 88 this->top = this->base; 95 89 } … … 106 100 107 101 void ?{}( coroutine_desc * this, current_stack_info_t * info) { 108 (&this->stack){ info }; 102 (&this->stack){ info }; 109 103 this->name = "Main Thread"; 110 104 this->errno_ = 0; … … 136 130 void ?{}(processor * this, cluster * cltr) { 137 131 this->cltr = cltr; 138 this->current_coroutine = NULL; 139 this->current_thread = NULL; 140 (&this->terminated){}; 132 (&this->terminated){ 0 }; 141 133 this->is_terminated = false; 142 134 this->preemption_alarm = NULL; 143 135 this->preemption = default_preemption(); 144 this->disable_preempt_count = 1; //Start with interrupts disabled145 136 this->pending_preemption = false; 146 137 … … 150 141 void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) { 151 142 this->cltr = cltr; 152 this->current_coroutine = NULL; 153 this->current_thread = NULL; 154 (&this->terminated){}; 143 (&this->terminated){ 0 }; 155 144 this->is_terminated = false; 156 this->disable_preempt_count = 0; 145 this->preemption_alarm = NULL; 146 this->preemption = default_preemption(); 157 147 this->pending_preemption = false; 148 this->kernel_thread = pthread_self(); 158 149 159 150 this->runner = runner; 160 LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);151 LIB_DEBUG_PRINT_SAFE("Kernel : constructing system processor context %p\n", runner); 161 152 runner{ this }; 162 153 } 154 155 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); ) 163 156 164 157 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) { … … 168 161 169 162 (&this->proc){ cltr, runner }; 163 164 verify( validate( &this->alarms ) ); 170 165 } 171 166 … … 174 169 LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this); 175 170 this->is_terminated = true; 176 wait( &this->terminated ); 171 P( &this->terminated ); 172 pthread_join( this->kernel_thread, NULL ); 177 173 } 178 174 } … … 184 180 185 181 void ^?{}(cluster * this) { 186 182 187 183 } 188 184 … … 203 199 204 200 thread_desc * readyThread = NULL; 205 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 201 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 206 202 { 207 203 readyThread = nextThread( this->cltr ); … … 209 205 if(readyThread) 210 206 { 207 verify( disable_preempt_count > 0 ); 208 211 209 runThread(this, readyThread); 210 211 verify( disable_preempt_count > 0 ); 212 212 213 213 //Some actions need to be taken from the kernel … … 225 225 } 226 226 227 signal( &this->terminated ); 227 V( &this->terminated ); 228 228 229 LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this); 229 230 } 230 231 231 // runThread runs a thread by context switching 232 // from the processor coroutine to the target thread 232 // runThread runs a thread by context switching 233 // from the processor coroutine to the target thread 233 234 void runThread(processor * this, thread_desc * dst) { 234 235 coroutine_desc * proc_cor = get_coroutine(this->runner); 235 236 coroutine_desc * thrd_cor = get_coroutine(dst); 236 237 237 238 //Reset the terminating actions here 238 239 this->finish.action_code = No_Action; 239 240 240 241 //Update global state 241 this ->current_thread = dst;242 this_thread = dst; 242 243 243 244 // Context Switch to the thread … … 246 247 } 247 248 248 // Once a thread has finished running, some of 249 // Once a thread has finished running, some of 249 250 // its final actions must be executed from the kernel 250 251 void finishRunning(processor * this) { … … 256 257 } 257 258 else if( this->finish.action_code == Release_Schedule ) { 258 unlock( this->finish.lock ); 259 unlock( this->finish.lock ); 259 260 ScheduleThread( this->finish.thrd ); 260 261 } … … 289 290 processor * proc = (processor *) arg; 290 291 this_processor = proc; 292 this_coroutine = NULL; 293 this_thread = NULL; 294 disable_preempt_count = 1; 291 295 // SKULLDUGGERY: We want to create a context for the processor coroutine 292 296 // which is needed for the 2-step context switch. However, there is no reason 293 // to waste the perfectly valid stack create by pthread. 297 // to waste the perfectly valid stack create by pthread. 294 298 current_stack_info_t info; 295 299 machine_context_t ctx; … … 300 304 301 305 //Set global state 302 proc->current_coroutine = &proc->runner->__cor;303 proc->current_thread = NULL;306 this_coroutine = &proc->runner->__cor; 307 this_thread = NULL; 304 308 305 309 //We now have a proper context from which to schedule threads 306 310 LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx); 307 311 308 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 309 // resume it to start it like it normally would, it will just context switch 310 // back to here. Instead directly call the main since we already are on the 312 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 313 // resume it to start it like it normally would, it will just context switch 314 // back to here. Instead directly call the main since we already are on the 311 315 // appropriate stack. 312 316 proc_cor_storage.__cor.state = Active; … … 315 319 316 320 // Main routine of the core returned, the core is now fully terminated 317 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 321 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 318 322 319 323 return NULL; … … 322 326 void start(processor * this) { 323 327 LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this); 324 328 325 329 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 326 330 327 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 331 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 328 332 } 329 333 … … 331 335 // Scheduler routines 332 336 void ScheduleThread( thread_desc * thrd ) { 333 if( !thrd ) return; 337 // if( !thrd ) return; 338 assert( thrd ); 339 assert( thrd->cor.state != Halted ); 340 341 verify( disable_preempt_count > 0 ); 334 342 335 343 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 336 337 lock( &systemProcessor->proc.cltr->lock );344 345 lock( &systemProcessor->proc.cltr->lock DEBUG_CTX2 ); 338 346 append( &systemProcessor->proc.cltr->ready_queue, thrd ); 339 347 unlock( &systemProcessor->proc.cltr->lock ); 348 349 verify( disable_preempt_count > 0 ); 340 350 } 341 351 342 352 thread_desc * nextThread(cluster * this) { 343 lock( &this->lock ); 353 verify( disable_preempt_count > 0 ); 354 lock( &this->lock DEBUG_CTX2 ); 344 355 thread_desc * head = pop_head( &this->ready_queue ); 345 356 unlock( &this->lock ); 357 verify( disable_preempt_count > 0 ); 346 358 return head; 347 359 } 348 360 349 void ScheduleInternal() { 361 void BlockInternal() { 362 disable_interrupts(); 363 verify( disable_preempt_count > 0 ); 350 364 suspend(); 351 } 352 353 void ScheduleInternal( spinlock * lock ) { 365 verify( disable_preempt_count > 0 ); 366 enable_interrupts( DEBUG_CTX ); 367 } 368 369 void BlockInternal( spinlock * lock ) { 370 disable_interrupts(); 354 371 this_processor->finish.action_code = Release; 355 372 this_processor->finish.lock = lock; 373 374 verify( disable_preempt_count > 0 ); 356 375 suspend(); 357 } 358 359 void ScheduleInternal( thread_desc * thrd ) { 376 verify( disable_preempt_count > 0 ); 377 378 enable_interrupts( DEBUG_CTX ); 379 } 380 381 void BlockInternal( thread_desc * thrd ) { 382 disable_interrupts(); 383 assert( thrd->cor.state != Halted ); 360 384 this_processor->finish.action_code = Schedule; 361 385 this_processor->finish.thrd = thrd; 386 387 verify( disable_preempt_count > 0 ); 362 388 suspend(); 363 } 364 365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) { 389 verify( disable_preempt_count > 0 ); 390 391 enable_interrupts( DEBUG_CTX ); 392 } 393 394 void BlockInternal( spinlock * lock, thread_desc * thrd ) { 395 disable_interrupts(); 366 396 this_processor->finish.action_code = Release_Schedule; 367 397 this_processor->finish.lock = lock; 368 398 this_processor->finish.thrd = thrd; 399 400 verify( disable_preempt_count > 0 ); 369 401 suspend(); 370 } 371 372 void ScheduleInternal(spinlock ** locks, unsigned short count) { 402 verify( disable_preempt_count > 0 ); 403 404 enable_interrupts( DEBUG_CTX ); 405 } 406 407 void BlockInternal(spinlock ** locks, unsigned short count) { 408 disable_interrupts(); 373 409 this_processor->finish.action_code = Release_Multi; 374 410 this_processor->finish.locks = locks; 375 411 this_processor->finish.lock_count = count; 412 413 verify( disable_preempt_count > 0 ); 376 414 suspend(); 377 } 378 379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 415 verify( disable_preempt_count > 0 ); 416 417 enable_interrupts( DEBUG_CTX ); 418 } 419 420 void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 421 disable_interrupts(); 380 422 this_processor->finish.action_code = Release_Multi_Schedule; 381 423 this_processor->finish.locks = locks; … … 383 425 this_processor->finish.thrds = thrds; 384 426 this_processor->finish.thrd_count = thrd_count; 427 428 verify( disable_preempt_count > 0 ); 429 suspend(); 430 verify( disable_preempt_count > 0 ); 431 432 enable_interrupts( DEBUG_CTX ); 433 } 434 435 void LeaveThread(spinlock * lock, thread_desc * thrd) { 436 verify( disable_preempt_count > 0 ); 437 this_processor->finish.action_code = thrd ? Release_Schedule : Release; 438 this_processor->finish.lock = lock; 439 this_processor->finish.thrd = thrd; 440 385 441 suspend(); 386 442 } … … 392 448 // Kernel boot procedures 393 449 void kernel_startup(void) { 394 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 450 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 395 451 396 452 // Start by initializing the main thread 397 // SKULLDUGGERY: the mainThread steals the process main thread 453 // SKULLDUGGERY: the mainThread steals the process main thread 398 454 // which will then be scheduled by the systemProcessor normally 399 mainThread = (thread_desc *)&mainThread _storage;455 mainThread = (thread_desc *)&mainThreadStorage; 400 456 current_stack_info_t info; 401 457 mainThread{ &info }; … … 403 459 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n"); 404 460 461 // Initialize the system cluster 462 systemCluster = (cluster *)&systemClusterStorage; 463 systemCluster{}; 464 465 LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n"); 466 467 // Initialize the system processor and the system processor ctx 468 // (the coroutine that contains the processing control flow) 469 systemProcessor = (system_proc_t *)&systemProcessorStorage; 470 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtxStorage }; 471 472 // Add the main thread to the ready queue 473 // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread 474 ScheduleThread(mainThread); 475 476 //initialize the global state variables 477 this_processor = &systemProcessor->proc; 478 this_thread = mainThread; 479 this_coroutine = &mainThread->cor; 480 disable_preempt_count = 1; 481 405 482 // Enable preemption 406 483 kernel_start_preemption(); 407 484 408 // Initialize the system cluster409 systemCluster = (cluster *)&systemCluster_storage;410 systemCluster{};411 412 LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n");413 414 // Initialize the system processor and the system processor ctx415 // (the coroutine that contains the processing control flow)416 systemProcessor = (system_proc_t *)&systemProcessor_storage;417 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };418 419 // Add the main thread to the ready queue420 // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread421 ScheduleThread(mainThread);422 423 //initialize the global state variables424 this_processor = &systemProcessor->proc;425 this_processor->current_thread = mainThread;426 this_processor->current_coroutine = &mainThread->cor;427 428 485 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX 429 486 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 430 // mainThread is on the ready queue when this call is made. 487 // mainThread is on the ready queue when this call is made. 431 488 resume( systemProcessor->proc.runner ); 432 489 … … 435 492 // THE SYSTEM IS NOW COMPLETELY RUNNING 436 493 LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n"); 494 495 enable_interrupts( DEBUG_CTX ); 437 496 } 438 497 439 498 void kernel_shutdown(void) { 440 499 LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n"); 500 501 disable_interrupts(); 441 502 442 503 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates. … … 448 509 // THE SYSTEM IS NOW COMPLETELY STOPPED 449 510 511 // Disable preemption 512 kernel_stop_preemption(); 513 450 514 // Destroy the system processor and its context in reverse order of construction 451 515 // These were manually constructed so we need manually destroy them … … 457 521 ^(mainThread){}; 458 522 459 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 523 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 460 524 } 461 525 … … 467 531 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 468 532 // the globalAbort flag is true. 469 lock( &kernel_abort_lock );533 lock( &kernel_abort_lock DEBUG_CTX2 ); 470 534 471 535 // first task to abort ? … … 473 537 kernel_abort_called = true; 474 538 unlock( &kernel_abort_lock ); 475 } 539 } 476 540 else { 477 541 unlock( &kernel_abort_lock ); 478 542 479 543 sigset_t mask; 480 544 sigemptyset( &mask ); … … 482 546 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals 483 547 sigsuspend( &mask ); // block the processor to prevent further damage during abort 484 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 485 } 486 487 return this_thread ();548 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 549 } 550 551 return this_thread; 488 552 } 489 553 … … 494 558 __lib_debug_write( STDERR_FILENO, abort_text, len ); 495 559 496 if ( thrd != this_coroutine ()) {497 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine ()->name, this_coroutine());560 if ( thrd != this_coroutine ) { 561 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine ); 498 562 __lib_debug_write( STDERR_FILENO, abort_text, len ); 499 } 563 } 500 564 else { 501 565 __lib_debug_write( STDERR_FILENO, ".\n", 2 ); … … 505 569 extern "C" { 506 570 void __lib_debug_acquire() { 507 lock( &kernel_debug_lock);571 lock( &kernel_debug_lock DEBUG_CTX2 ); 508 572 } 509 573 510 574 void __lib_debug_release() { 511 unlock( &kernel_debug_lock);575 unlock( &kernel_debug_lock ); 512 576 } 513 577 } … … 525 589 } 526 590 527 bool try_lock( spinlock * this ) {591 bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) { 528 592 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 529 593 } 530 594 531 void lock( spinlock * this ) {595 void lock( spinlock * this DEBUG_CTX_PARAM2 ) { 532 596 for ( unsigned int i = 1;; i += 1 ) { 533 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break; 534 } 535 } 597 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 598 } 599 LIB_DEBUG_DO( 600 this->prev_name = caller; 601 this->prev_thrd = this_thread; 602 ) 603 } 604 605 void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) { 606 for ( unsigned int i = 1;; i += 1 ) { 607 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 608 yield(); 609 } 610 LIB_DEBUG_DO( 611 this->prev_name = caller; 612 this->prev_thrd = this_thread; 613 ) 614 } 615 536 616 537 617 void unlock( spinlock * this ) { … … 539 619 } 540 620 541 void ?{}( signal_once * this ) { 542 this->cond = false; 543 } 544 void ^?{}( signal_once * this ) { 545 546 } 547 548 void wait( signal_once * this ) { 549 lock( &this->lock ); 550 if( !this->cond ) { 551 append( &this->blocked, this_thread() ); 552 ScheduleInternal( &this->lock ); 553 lock( &this->lock ); 554 } 621 void ?{}( semaphore * this, int count = 1 ) { 622 (&this->lock){}; 623 this->count = count; 624 (&this->waiting){}; 625 } 626 void ^?{}(semaphore * this) {} 627 628 void P(semaphore * this) { 629 lock( &this->lock DEBUG_CTX2 ); 630 this->count -= 1; 631 if ( this->count < 0 ) { 632 // queue current task 633 append( &this->waiting, (thread_desc *)this_thread ); 634 635 // atomically release spin lock and block 636 BlockInternal( &this->lock ); 637 } 638 else { 639 unlock( &this->lock ); 640 } 641 } 642 643 void V(semaphore * this) { 644 thread_desc * thrd = NULL; 645 lock( &this->lock DEBUG_CTX2 ); 646 this->count += 1; 647 if ( this->count <= 0 ) { 648 // remove task at head of waiting list 649 thrd = pop_head( &this->waiting ); 650 } 651 555 652 unlock( &this->lock ); 556 } 557 558 void signal( signal_once * this ) { 559 lock( &this->lock ); 560 { 561 this->cond = true; 562 563 thread_desc * it; 564 while( it = pop_head( &this->blocked) ) { 565 ScheduleThread( it ); 566 } 567 } 568 unlock( &this->lock ); 653 654 // make new owner 655 WakeThread( thrd ); 569 656 } 570 657 … … 590 677 } 591 678 head->next = NULL; 592 } 679 } 593 680 return head; 594 681 } … … 609 696 this->top = top->next; 610 697 top->next = NULL; 611 } 698 } 612 699 return top; 613 700 } -
src/libcfa/concurrency/kernel_private.h
r1d776fd r9a1e509 18 18 #define KERNEL_PRIVATE_H 19 19 20 #include "libhdr.h" 21 20 22 #include "kernel" 21 23 #include "thread" … … 23 25 #include "alarm.h" 24 26 25 #include "libhdr.h"26 27 27 28 //----------------------------------------------------------------------------- 28 29 // Scheduler 30 31 extern "C" { 32 void disable_interrupts(); 33 void enable_interrupts_noRF(); 34 void enable_interrupts( DEBUG_CTX_PARAM ); 35 } 36 29 37 void ScheduleThread( thread_desc * ); 38 static inline void WakeThread( thread_desc * thrd ) { 39 if( !thrd ) return; 40 41 disable_interrupts(); 42 ScheduleThread( thrd ); 43 enable_interrupts( DEBUG_CTX ); 44 } 30 45 thread_desc * nextThread(cluster * this); 31 46 32 void ScheduleInternal(void); 33 void ScheduleInternal(spinlock * lock); 34 void ScheduleInternal(thread_desc * thrd); 35 void ScheduleInternal(spinlock * lock, thread_desc * thrd); 36 void ScheduleInternal(spinlock ** locks, unsigned short count); 37 void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count); 47 void BlockInternal(void); 48 void BlockInternal(spinlock * lock); 49 void BlockInternal(thread_desc * thrd); 50 void BlockInternal(spinlock * lock, thread_desc * thrd); 51 void BlockInternal(spinlock ** locks, unsigned short count); 52 void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count); 53 void LeaveThread(spinlock * lock, thread_desc * thrd); 38 54 39 55 //----------------------------------------------------------------------------- … … 60 76 extern cluster * systemCluster; 61 77 extern system_proc_t * systemProcessor; 62 extern thread_local processor * this_processor; 63 64 static inline void disable_interrupts() { 65 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 66 assert( prev != (unsigned short) -1 ); 67 } 68 69 static inline void enable_interrupts_noRF() { 70 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 71 verify( prev != (unsigned short) 0 ); 72 } 73 74 static inline void enable_interrupts() { 75 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 76 verify( prev != (unsigned short) 0 ); 77 if( prev == 1 && this_processor->pending_preemption ) { 78 ScheduleInternal( this_processor->current_thread ); 79 this_processor->pending_preemption = false; 80 } 81 } 78 extern volatile thread_local processor * this_processor; 79 extern volatile thread_local coroutine_desc * this_coroutine; 80 extern volatile thread_local thread_desc * this_thread; 81 extern volatile thread_local bool preemption_in_progress; 82 extern volatile thread_local unsigned short disable_preempt_count; 82 83 83 84 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/monitor
r1d776fd r9a1e509 26 26 static inline void ?{}(monitor_desc * this) { 27 27 this->owner = NULL; 28 this->stack_owner = NULL;29 28 this->recursion = 0; 30 29 } -
src/libcfa/concurrency/monitor.c
r1d776fd r9a1e509 19 19 #include <stdlib> 20 20 21 #include "libhdr.h" 21 22 #include "kernel_private.h" 22 #include "libhdr.h"23 23 24 24 //----------------------------------------------------------------------------- … … 44 44 45 45 extern "C" { 46 void __enter_monitor_desc( monitor_desc * this) {47 lock ( &this->lock);48 thread_desc * thrd = this_thread ();49 50 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);46 void __enter_monitor_desc( monitor_desc * this ) { 47 lock_yield( &this->lock DEBUG_CTX2 ); 48 thread_desc * thrd = this_thread; 49 50 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 51 51 52 52 if( !this->owner ) { … … 62 62 //Some one else has the monitor, wait in line for it 63 63 append( &this->entry_queue, thrd ); 64 LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);65 ScheduleInternal( &this->lock );66 67 // ScheduleInternal will unlock spinlock, no need to unlock ourselves68 return; 64 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd); 65 BlockInternal( &this->lock ); 66 67 //BlockInternal will unlock spinlock, no need to unlock ourselves 68 return; 69 69 } 70 70 … … 75 75 // leave pseudo code : 76 76 // TODO 77 void __leave_monitor_desc( monitor_desc * this) {78 lock ( &this->lock);79 80 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);81 verifyf( this_thread () == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );77 void __leave_monitor_desc( monitor_desc * this ) { 78 lock_yield( &this->lock DEBUG_CTX2 ); 79 80 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion); 81 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion ); 82 82 83 83 //Leaving a recursion level, decrement the counter … … 96 96 unlock( &this->lock ); 97 97 98 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);98 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner); 99 99 100 100 //We need to wake-up the thread 101 ScheduleThread( new_owner ); 101 WakeThread( new_owner ); 102 } 103 104 void __leave_thread_monitor( thread_desc * thrd ) { 105 monitor_desc * this = &thrd->mon; 106 lock_yield( &this->lock DEBUG_CTX2 ); 107 108 disable_interrupts(); 109 110 thrd->cor.state = Halted; 111 112 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion ); 113 114 //Leaving a recursion level, decrement the counter 115 this->recursion -= 1; 116 117 //If we haven't left the last level of recursion 118 //it means we don't need to do anything 119 if( this->recursion != 0) { 120 unlock( &this->lock ); 121 return; 122 } 123 124 thread_desc * new_owner = next_thread( this ); 125 126 LeaveThread( &this->lock, new_owner ); 102 127 } 103 128 } … … 121 146 enter( this->m, this->count ); 122 147 123 this->prev_mntrs = this_thread ()->current_monitors;124 this->prev_count = this_thread ()->current_monitor_count;125 126 this_thread ()->current_monitors = m;127 this_thread ()->current_monitor_count = count;148 this->prev_mntrs = this_thread->current_monitors; 149 this->prev_count = this_thread->current_monitor_count; 150 151 this_thread->current_monitors = m; 152 this_thread->current_monitor_count = count; 128 153 } 129 154 … … 131 156 leave( this->m, this->count ); 132 157 133 this_thread ()->current_monitors = this->prev_mntrs;134 this_thread ()->current_monitor_count = this->prev_count;158 this_thread->current_monitors = this->prev_mntrs; 159 this_thread->current_monitor_count = this->prev_count; 135 160 } 136 161 … … 159 184 // Internal scheduling 160 185 void wait( condition * this, uintptr_t user_info = 0 ) { 161 LIB_DEBUG_PRINT_SAFE("Waiting\n");186 // LIB_DEBUG_PRINT_SAFE("Waiting\n"); 162 187 163 188 brand_condition( this ); … … 170 195 unsigned short count = this->monitor_count; 171 196 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 172 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal173 174 LIB_DEBUG_PRINT_SAFE("count %i\n", count);175 176 __condition_node_t waiter = { this_thread(), count, user_info };197 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 198 199 // LIB_DEBUG_PRINT_SAFE("count %i\n", count); 200 201 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info }; 177 202 178 203 __condition_criterion_t criteria[count]; 179 204 for(int i = 0; i < count; i++) { 180 205 (&criteria[i]){ this->monitors[i], &waiter }; 181 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );206 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 182 207 } 183 208 … … 201 226 } 202 227 203 LIB_DEBUG_PRINT_SAFE("Will unblock: ");228 // LIB_DEBUG_PRINT_SAFE("Will unblock: "); 204 229 for(int i = 0; i < thread_count; i++) { 205 LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);206 } 207 LIB_DEBUG_PRINT_SAFE("\n");230 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]); 231 } 232 // LIB_DEBUG_PRINT_SAFE("\n"); 208 233 209 234 // Everything is ready to go to sleep 210 ScheduleInternal( locks, count, threads, thread_count );235 BlockInternal( locks, count, threads, thread_count ); 211 236 212 237 … … 222 247 bool signal( condition * this ) { 223 248 if( is_empty( this ) ) { 224 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");249 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n"); 225 250 return false; 226 251 } … … 231 256 232 257 unsigned short count = this->monitor_count; 233 258 234 259 //Some more checking in debug 235 260 LIB_DEBUG_DO( 236 thread_desc * this_thrd = this_thread ();261 thread_desc * this_thrd = this_thread; 237 262 if ( this->monitor_count != this_thrd->current_monitor_count ) { 238 263 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count ); … … 248 273 //Lock all the monitors 249 274 lock_all( this->monitors, NULL, count ); 250 LIB_DEBUG_PRINT_SAFE("Signalling");275 // LIB_DEBUG_PRINT_SAFE("Signalling"); 251 276 252 277 //Pop the head of the waiting queue … … 256 281 for(int i = 0; i < count; i++) { 257 282 __condition_criterion_t * crit = &node->criteria[i]; 258 LIB_DEBUG_PRINT_SAFE(" %p", crit->target);283 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target); 259 284 assert( !crit->ready ); 260 285 push( &crit->target->signal_stack, crit ); 261 286 } 262 287 263 LIB_DEBUG_PRINT_SAFE("\n");288 // LIB_DEBUG_PRINT_SAFE("\n"); 264 289 265 290 //Release … … 281 306 unsigned short count = this->monitor_count; 282 307 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 283 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal308 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 284 309 285 310 lock_all( this->monitors, locks, count ); 286 311 287 312 //create creteria 288 __condition_node_t waiter = { this_thread(), count, 0 };313 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 }; 289 314 290 315 __condition_criterion_t criteria[count]; 291 316 for(int i = 0; i < count; i++) { 292 317 (&criteria[i]){ this->monitors[i], &waiter }; 293 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );318 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 294 319 push( &criteria[i].target->signal_stack, &criteria[i] ); 295 320 } … … 309 334 310 335 //Everything is ready to go to sleep 311 ScheduleInternal( locks, count, &signallee, 1 );336 BlockInternal( locks, count, &signallee, 1 ); 312 337 313 338 … … 325 350 326 351 uintptr_t front( condition * this ) { 327 verifyf( !is_empty(this), 352 verifyf( !is_empty(this), 328 353 "Attempt to access user data on an empty condition.\n" 329 354 "Possible cause is not checking if the condition is empty before reading stored data." … … 335 360 // Internal scheduling 336 361 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) { 337 // thread_desc * this = this_thread ();362 // thread_desc * this = this_thread; 338 363 339 364 // unsigned short count = this->current_monitor_count; 340 365 // unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 341 // spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal366 // spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 342 367 343 368 // lock_all( this->current_monitors, locks, count ); … … 348 373 349 374 // // // Everything is ready to go to sleep 350 // // ScheduleInternal( locks, count, threads, thread_count );375 // // BlockInternal( locks, count, threads, thread_count ); 351 376 352 377 … … 393 418 static inline void lock_all( spinlock ** locks, unsigned short count ) { 394 419 for( int i = 0; i < count; i++ ) { 395 lock ( locks[i]);420 lock_yield( locks[i] DEBUG_CTX2 ); 396 421 } 397 422 } … … 400 425 for( int i = 0; i < count; i++ ) { 401 426 spinlock * l = &source[i]->lock; 402 lock ( l);427 lock_yield( l DEBUG_CTX2 ); 403 428 if(locks) locks[i] = l; 404 429 } … … 443 468 for( int i = 0; i < count; i++ ) { 444 469 445 LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );470 // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target ); 446 471 if( &criteria[i] == target ) { 447 472 criteria[i].ready = true; 448 LIB_DEBUG_PRINT_SAFE( "True\n" );473 // LIB_DEBUG_PRINT_SAFE( "True\n" ); 449 474 } 450 475 … … 452 477 } 453 478 454 LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );479 // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run ); 455 480 return ready2run ? node->waiting_thread : NULL; 456 481 } 457 482 458 483 static inline void brand_condition( condition * this ) { 459 thread_desc * thrd = this_thread ();484 thread_desc * thrd = this_thread; 460 485 if( !this->monitors ) { 461 LIB_DEBUG_PRINT_SAFE("Branding\n");486 // LIB_DEBUG_PRINT_SAFE("Branding\n"); 462 487 assertf( thrd->current_monitors != NULL, "No current monitor to brand condition", thrd->current_monitors ); 463 488 this->monitor_count = thrd->current_monitor_count; -
src/libcfa/concurrency/preemption.c
r1d776fd r9a1e509 15 15 // 16 16 17 #include "libhdr.h" 17 18 #include "preemption.h" 18 19 19 20 extern "C" { 21 #include <errno.h> 22 #include <execinfo.h> 23 #define __USE_GNU 20 24 #include <signal.h> 21 } 22 23 #define __CFA_DEFAULT_PREEMPTION__ 10 25 #undef __USE_GNU 26 #include <stdio.h> 27 #include <string.h> 28 #include <unistd.h> 29 } 30 31 32 #ifdef __USE_STREAM__ 33 #include "fstream" 34 #endif 35 36 #define __CFA_DEFAULT_PREEMPTION__ 10000 24 37 25 38 __attribute__((weak)) unsigned int default_preemption() { … … 27 40 } 28 41 42 #define __CFA_SIGCXT__ ucontext_t * 43 #define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt 44 29 45 static void preempt( processor * this ); 30 46 static void timeout( thread_desc * this ); 31 47 48 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 49 void sigHandler_alarm ( __CFA_SIGPARMS__ ); 50 void sigHandler_segv ( __CFA_SIGPARMS__ ); 51 void sigHandler_abort ( __CFA_SIGPARMS__ ); 52 53 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ); 54 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); ) 55 56 #ifdef __x86_64__ 57 #define CFA_REG_IP REG_RIP 58 #else 59 #define CFA_REG_IP REG_EIP 60 #endif 61 62 32 63 //============================================================================================= 33 64 // Kernel Preemption logic 34 65 //============================================================================================= 35 66 36 void kernel_start_preemption() {37 38 }39 40 67 void tick_preemption() { 68 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption\n" ); 69 41 70 alarm_list_t * alarms = &systemProcessor->alarms; 42 71 __cfa_time_t currtime = __kernel_get_time(); 43 72 while( alarms->head && alarms->head->alarm < currtime ) { 44 73 alarm_node_t * node = pop(alarms); 74 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node ); 75 45 76 if( node->kernel_alarm ) { 46 77 preempt( node->proc ); … … 50 81 } 51 82 83 verify( validate( alarms ) ); 84 52 85 if( node->period > 0 ) { 53 node->alarm +=node->period;86 node->alarm = currtime + node->period; 54 87 insert( alarms, node ); 55 88 } … … 62 95 __kernel_set_timer( alarms->head->alarm - currtime ); 63 96 } 97 98 verify( validate( alarms ) ); 99 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" ); 64 100 } 65 101 66 102 void update_preemption( processor * this, __cfa_time_t duration ) { 67 // assert( THREAD_GETMEM( disableInt ) && THREAD_GETMEM( disableIntCnt ) == 1 ); 103 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %lu\n", this, duration ); 104 68 105 alarm_node_t * alarm = this->preemption_alarm; 106 duration *= 1000; 69 107 70 108 // Alarms need to be enabled … … 89 127 } 90 128 129 //============================================================================================= 130 // Kernel Signal Tools 131 //============================================================================================= 132 133 LIB_DEBUG_DO( static thread_local void * last_interrupt = 0; ) 134 135 extern "C" { 136 void disable_interrupts() { 137 __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 138 verify( new_val < (unsigned short)65_000 ); 139 verify( new_val != (unsigned short) 0 ); 140 } 141 142 void enable_interrupts_noRF() { 143 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 144 verify( prev != (unsigned short) 0 ); 145 } 146 147 void enable_interrupts( DEBUG_CTX_PARAM ) { 148 processor * proc = this_processor; 149 thread_desc * thrd = this_thread; 150 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 151 verify( prev != (unsigned short) 0 ); 152 if( prev == 1 && proc->pending_preemption ) { 153 proc->pending_preemption = false; 154 BlockInternal( thrd ); 155 } 156 157 LIB_DEBUG_DO( proc->last_enable = caller; ) 158 } 159 } 160 161 static inline void signal_unblock( int sig ) { 162 sigset_t mask; 163 sigemptyset( &mask ); 164 sigaddset( &mask, sig ); 165 166 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) { 167 abortf( "internal error, pthread_sigmask" ); 168 } 169 } 170 171 static inline void signal_block( int sig ) { 172 sigset_t mask; 173 sigemptyset( &mask ); 174 sigaddset( &mask, sig ); 175 176 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 177 abortf( "internal error, pthread_sigmask" ); 178 } 179 } 180 181 static inline bool preemption_ready() { 182 return disable_preempt_count == 0 && !preemption_in_progress; 183 } 184 185 static inline void defer_ctxSwitch() { 186 this_processor->pending_preemption = true; 187 } 188 189 static inline void defer_alarm() { 190 systemProcessor->pending_alarm = true; 191 } 192 193 static void preempt( processor * this ) { 194 pthread_kill( this->kernel_thread, SIGUSR1 ); 195 } 196 197 static void timeout( thread_desc * this ) { 198 //TODO : implement waking threads 199 } 200 201 //============================================================================================= 202 // Kernel Signal Startup/Shutdown logic 203 //============================================================================================= 204 205 static pthread_t alarm_thread; 206 void * alarm_loop( __attribute__((unused)) void * args ); 207 208 void kernel_start_preemption() { 209 LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n"); 210 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); 211 // __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); 212 // __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); 213 214 signal_block( SIGALRM ); 215 216 pthread_create( &alarm_thread, NULL, alarm_loop, NULL ); 217 } 218 219 void kernel_stop_preemption() { 220 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n"); 221 222 sigset_t mask; 223 sigfillset( &mask ); 224 sigprocmask( SIG_BLOCK, &mask, NULL ); 225 226 sigval val = { 1 }; 227 pthread_sigqueue( alarm_thread, SIGALRM, val ); 228 pthread_join( alarm_thread, NULL ); 229 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n"); 230 } 231 91 232 void ?{}( preemption_scope * this, processor * proc ) { 92 233 (&this->alarm){ proc }; … … 97 238 98 239 void ^?{}( preemption_scope * this ) { 240 disable_interrupts(); 241 99 242 update_preemption( this->proc, 0 ); 100 243 } 101 244 102 245 //============================================================================================= 103 // Kernel Signal logic 104 //============================================================================================= 105 106 static inline bool preemption_ready() { 107 return this_processor->disable_preempt_count == 0; 108 } 109 110 static inline void defer_ctxSwitch() { 111 this_processor->pending_preemption = true; 112 } 113 114 static inline void defer_alarm() { 115 systemProcessor->pending_alarm = true; 116 } 117 118 void sigHandler_ctxSwitch( __attribute__((unused)) int sig ) { 246 // Kernel Signal Handlers 247 //============================================================================================= 248 249 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 250 LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); ) 119 251 if( preemption_ready() ) { 120 ScheduleInternal( this_processor->current_thread ); 252 preemption_in_progress = true; 253 signal_unblock( SIGUSR1 ); 254 this_processor->pending_preemption = false; 255 preemption_in_progress = false; 256 BlockInternal( (thread_desc*)this_thread ); 121 257 } 122 258 else { … … 125 261 } 126 262 127 void sigHandler_alarm( __attribute__((unused)) int sig ) { 128 if( try_lock( &systemProcessor->alarm_lock ) ) { 129 tick_preemption(); 130 unlock( &systemProcessor->alarm_lock ); 131 } 132 else { 133 defer_alarm(); 134 } 135 } 136 137 static void preempt( processor * this ) { 138 pthread_kill( this->kernel_thread, SIGUSR1 ); 139 } 140 141 static void timeout( thread_desc * this ) { 142 //TODO : implement waking threads 143 } 263 void * alarm_loop( __attribute__((unused)) void * args ) { 264 sigset_t mask; 265 sigemptyset( &mask ); 266 sigaddset( &mask, SIGALRM ); 267 268 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 269 abortf( "internal error, pthread_sigmask" ); 270 } 271 272 while( true ) { 273 siginfo_t info; 274 int sig = sigwaitinfo( &mask, &info ); 275 if( sig < 0 ) { 276 abortf( "internal error, sigwait" ); 277 } 278 else if( sig == SIGALRM ) 279 { 280 LIB_DEBUG_PRINT_SAFE("Kernel : Caught signal %d (%d)\n", sig, info.si_value.sival_int ); 281 if( info.si_value.sival_int == 0 ) 282 { 283 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n"); 284 lock( &systemProcessor->alarm_lock DEBUG_CTX2 ); 285 tick_preemption(); 286 unlock( &systemProcessor->alarm_lock ); 287 } 288 else if( info.si_value.sival_int == 1 ) 289 { 290 break; 291 } 292 } 293 else 294 { 295 LIB_DEBUG_PRINT_SAFE("Kernel : Unexpected signal %d (%d)\n", sig, info.si_value.sival_int); 296 } 297 } 298 299 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread stopping\n"); 300 return NULL; 301 } 302 303 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) { 304 struct sigaction act; 305 306 act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler; 307 act.sa_flags = flags; 308 309 if ( sigaction( sig, &act, NULL ) == -1 ) { 310 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 311 " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", 312 sig, handler, flags, errno, strerror( errno ) 313 ); 314 _exit( EXIT_FAILURE ); 315 } 316 } 317 318 typedef void (*sa_handler_t)(int); 319 320 static void __kernel_sigdefault( int sig ) { 321 struct sigaction act; 322 323 // act.sa_handler = SIG_DFL; 324 act.sa_flags = 0; 325 sigemptyset( &act.sa_mask ); 326 327 if ( sigaction( sig, &act, NULL ) == -1 ) { 328 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 329 " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n", 330 sig, errno, strerror( errno ) 331 ); 332 _exit( EXIT_FAILURE ); 333 } 334 } 335 336 //============================================================================================= 337 // Terminating Signals logic 338 //============================================================================================= 339 340 LIB_DEBUG_DO( 341 static void __kernel_backtrace( int start ) { 342 // skip first N stack frames 343 344 enum { Frames = 50 }; 345 void * array[Frames]; 346 int size = backtrace( array, Frames ); 347 char ** messages = backtrace_symbols( array, size ); 348 349 // find executable name 350 *index( messages[0], '(' ) = '\0'; 351 #ifdef __USE_STREAM__ 352 serr | "Stack back trace for:" | messages[0] | endl; 353 #else 354 fprintf( stderr, "Stack back trace for: %s\n", messages[0]); 355 #endif 356 357 // skip last 2 stack frames after main 358 for ( int i = start; i < size && messages != NULL; i += 1 ) { 359 char * name = NULL; 360 char * offset_begin = NULL; 361 char * offset_end = NULL; 362 363 for ( char *p = messages[i]; *p; ++p ) { 364 // find parantheses and +offset 365 if ( *p == '(' ) { 366 name = p; 367 } 368 else if ( *p == '+' ) { 369 offset_begin = p; 370 } 371 else if ( *p == ')' ) { 372 offset_end = p; 373 break; 374 } 375 } 376 377 // if line contains symbol print it 378 int frameNo = i - start; 379 if ( name && offset_begin && offset_end && name < offset_begin ) { 380 // delimit strings 381 *name++ = '\0'; 382 *offset_begin++ = '\0'; 383 *offset_end++ = '\0'; 384 385 #ifdef __USE_STREAM__ 386 serr | "(" | frameNo | ")" | messages[i] | ":" 387 | name | "+" | offset_begin | offset_end | endl; 388 #else 389 fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end); 390 #endif 391 } 392 // otherwise, print the whole line 393 else { 394 #ifdef __USE_STREAM__ 395 serr | "(" | frameNo | ")" | messages[i] | endl; 396 #else 397 fprintf( stderr, "(%i) %s\n", frameNo, messages[i] ); 398 #endif 399 } 400 } 401 402 free( messages ); 403 } 404 ) 405 406 // void sigHandler_segv( __CFA_SIGPARMS__ ) { 407 // LIB_DEBUG_DO( 408 // #ifdef __USE_STREAM__ 409 // serr | "*CFA runtime error* program cfa-cpp terminated with" 410 // | (sig == SIGSEGV ? "segment fault." : "bus error.") 411 // | endl; 412 // #else 413 // fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." ); 414 // #endif 415 416 // // skip first 2 stack frames 417 // __kernel_backtrace( 1 ); 418 // ) 419 // exit( EXIT_FAILURE ); 420 // } 421 422 // void sigHandler_abort( __CFA_SIGPARMS__ ) { 423 // // skip first 6 stack frames 424 // LIB_DEBUG_DO( __kernel_backtrace( 6 ); ) 425 426 // // reset default signal handler 427 // __kernel_sigdefault( SIGABRT ); 428 429 // raise( SIGABRT ); 430 // } -
src/libcfa/concurrency/thread
r1d776fd r9a1e509 54 54 } 55 55 56 thread_desc * this_thread(void);56 extern volatile thread_local thread_desc * this_thread; 57 57 58 58 forall( dtype T | is_thread(T) ) -
src/libcfa/concurrency/thread.c
r1d776fd r9a1e509 28 28 } 29 29 30 extern thread_local processor * this_processor;30 extern volatile thread_local processor * this_processor; 31 31 32 32 //----------------------------------------------------------------------------- … … 71 71 coroutine_desc* thrd_c = get_coroutine(this); 72 72 thread_desc* thrd_h = get_thread (this); 73 thrd_c->last = this_coroutine(); 74 this_processor->current_coroutine = thrd_c; 73 thrd_c->last = this_coroutine; 75 74 76 LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);75 // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 77 76 77 disable_interrupts(); 78 78 create_stack(&thrd_c->stack, thrd_c->stack.size); 79 this_coroutine = thrd_c; 79 80 CtxStart(this, CtxInvokeThread); 81 assert( thrd_c->last->stack.context ); 80 82 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 81 83 82 84 ScheduleThread(thrd_h); 85 enable_interrupts( DEBUG_CTX ); 83 86 } 84 87 85 88 void yield( void ) { 86 ScheduleInternal( this_processor->current_thread );89 BlockInternal( (thread_desc *)this_thread ); 87 90 } 88 91 … … 95 98 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 96 99 // set state of current coroutine to inactive 97 src->state = Inactive;100 src->state = src->state == Halted ? Halted : Inactive; 98 101 dst->state = Active; 99 102 … … 103 106 // set new coroutine that the processor is executing 104 107 // and context switch to it 105 this_processor->current_coroutine = dst; 108 this_coroutine = dst; 109 assert( src->stack.context ); 106 110 CtxSwitch( src->stack.context, dst->stack.context ); 107 this_ processor->current_coroutine = src;111 this_coroutine = src; 108 112 109 113 // set state of new coroutine to active 110 dst->state = Inactive;114 dst->state = dst->state == Halted ? Halted : Inactive; 111 115 src->state = Active; 112 116 } -
src/libcfa/exception.c
r1d776fd r9a1e509 10 10 // Created On : Mon Jun 26 15:13:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Nov 26 15:11:00 201713 // Update Count : 012 // Last Modified On : Tus Jul 11 16:36:00 2017 13 // Update Count : 1 14 14 // 15 15 … … 44 44 // RESUMPTION ================================================================ 45 45 46 void __cfaehm__throw_resum ption(exception * except) {46 void __cfaehm__throw_resume(exception * except) { 47 47 48 48 // DEBUG … … 65 65 66 66 // Fall back to termination: 67 __cfaehm__throw_terminat ion(except);67 __cfaehm__throw_terminate(except); 68 68 // TODO: Default handler for resumption. 69 69 } … … 111 111 } 112 112 113 void __cfaehm__throw_terminat ion( exception * val ) {113 void __cfaehm__throw_terminate( exception * val ) { 114 114 // Store the current exception 115 115 shared_stack.current_exception = *val; … … 147 147 148 148 // Nesting this the other way would probably be faster. 149 void __cfaehm__rethrow_terminat ion(void) {149 void __cfaehm__rethrow_terminate(void) { 150 150 // DEBUG 151 151 printf("Rethrowing termination exception\n"); 152 152 153 __cfaehm__throw_terminat ion(&shared_stack.current_exception);153 __cfaehm__throw_terminate(&shared_stack.current_exception); 154 154 } 155 155 -
src/libcfa/exception.h
r1d776fd r9a1e509 10 10 // Created On : Mon Jun 26 15:11:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Mon Nov 26 15:11:00 201713 // Update Count : 012 // Last Modified On : Tus Jul 11 16:31:00 2017 13 // Update Count : 2 14 14 // 15 15 … … 22 22 23 23 #ifdef __CFORALL__ 24 extern " BuiltinC" {24 extern "C" { 25 25 #endif 26 26 27 27 // Used in throw statement translation. 28 void __cfaehm__throw_terminat ion(exception * except) __attribute__((noreturn));29 void __cfaehm__rethrow_terminat ion() __attribute__((noreturn));30 void __cfaehm__throw_resum ption(exception * except);28 void __cfaehm__throw_terminate(exception * except) __attribute__((noreturn)); 29 void __cfaehm__rethrow_terminate() __attribute__((noreturn)); 30 void __cfaehm__throw_resume(exception * except); 31 31 32 32 // Function catches termination exceptions. -
src/libcfa/fstream
r1d776fd r9a1e509 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 1 16:37:53201713 // Update Count : 11 212 // Last Modified On : Fri Jul 7 08:32:38 2017 13 // Update Count : 117 14 14 // 15 15 16 #ifndef __FSTREAM_H__ 17 #define __FSTREAM_H__ 16 #pragma once 18 17 19 18 #include "iostream" 20 19 21 enum { sep arateSize = 16 };20 enum { sepSize = 16 }; 22 21 struct ofstream { 23 22 void * file; 24 23 _Bool sepDefault; 25 24 _Bool sepOnOff; 26 _Bool lastSepOn;25 _Bool sawNL; 27 26 const char * sepCur; 28 char separator[sep arateSize];29 char tupleSeparator[sep arateSize];27 char separator[sepSize]; 28 char tupleSeparator[sepSize]; 30 29 }; // ofstream 31 30 … … 36 35 const char * sepGetCur( ofstream * ); 37 36 void sepSetCur( ofstream *, const char * ); 38 _Bool lastSepOn( ofstream * ); 37 _Bool getNL( ofstream * ); 38 void setNL( ofstream *, _Bool ); 39 39 40 40 // public … … 75 75 extern ifstream * sin; 76 76 77 #endif // __FSTREAM_H__78 79 77 // Local Variables: // 80 78 // mode: c // -
src/libcfa/fstream.c
r1d776fd r9a1e509 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jul 1 16:37:54201713 // Update Count : 2 4212 // Last Modified On : Thu Jul 6 18:38:25 2017 13 // Update Count : 251 14 14 // 15 15 … … 33 33 this->sepDefault = sepDefault; 34 34 this->sepOnOff = sepOnOff; 35 this->lastSepOn = false;36 35 sepSet( this, separator ); 37 36 sepSetCur( this, sepGet( this ) ); … … 40 39 41 40 // private 42 _Bool lastSepOn( ofstream * os ) { return os->lastSepOn; } 43 _Bool sepPrt( ofstream * os ) { os->lastSepOn = false; return os->sepOnOff; } 41 _Bool sepPrt( ofstream * os ) { setNL( os, false ); return os->sepOnOff; } 44 42 void sepReset( ofstream * os ) { os->sepOnOff = os->sepDefault; } 45 43 void sepReset( ofstream * os, _Bool reset ) { os->sepDefault = reset; os->sepOnOff = os->sepDefault; } 46 44 const char * sepGetCur( ofstream * os ) { return os->sepCur; } 47 45 void sepSetCur( ofstream * os, const char * sepCur ) { os->sepCur = sepCur; } 46 _Bool getNL( ofstream * os ) { return os->sawNL; } 47 void setNL( ofstream * os, _Bool state ) { os->sawNL = state; } 48 48 49 49 // public 50 void sepOn( ofstream * os ) { os-> lastSepOn = true; os->sepOnOff = true; }51 void sepOff( ofstream * os ) { os-> lastSepOn = false; os->sepOnOff = 0; }50 void sepOn( ofstream * os ) { os->sepOnOff = ! getNL( os ); } 51 void sepOff( ofstream * os ) { os->sepOnOff = false; } 52 52 53 53 _Bool sepDisable( ofstream *os ) { 54 54 _Bool temp = os->sepDefault; 55 55 os->sepDefault = false; 56 os->lastSepOn = false;57 56 sepReset( os ); 58 57 return temp; … … 69 68 void sepSet( ofstream * os, const char * s ) { 70 69 assert( s ); 71 strncpy( os->separator, s, sep arateSize - 1 );72 os->separator[sep arateSize - 1] = '\0';70 strncpy( os->separator, s, sepSize - 1 ); 71 os->separator[sepSize - 1] = '\0'; 73 72 } // sepSet 74 73 … … 76 75 void sepSetTuple( ofstream * os, const char * s ) { 77 76 assert( s ); 78 strncpy( os->tupleSeparator, s, sep arateSize - 1 );79 os->tupleSeparator[sep arateSize - 1] = '\0';77 strncpy( os->tupleSeparator, s, sepSize - 1 ); 78 os->tupleSeparator[sepSize - 1] = '\0'; 80 79 } // sepSet 81 80 … … 153 152 154 153 void open( ifstream * is, const char * name, const char * mode ) { 155 FILE * t= fopen( name, mode );156 if ( t == 0 ) {// do not change unless successful154 FILE *file = fopen( name, mode ); 155 if ( file == 0 ) { // do not change unless successful 157 156 fprintf( stderr, IO_MSG "open input file \"%s\", ", name ); 158 157 perror( 0 ); 159 158 exit( EXIT_FAILURE ); 160 159 } // if 161 is->file = t;160 is->file = file; 162 161 } // open 163 162 -
src/libcfa/gmp
r1d776fd r9a1e509 10 10 // Created On : Tue Apr 19 08:43:43 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat May 27 09:55:51201713 // Update Count : 1 412 // Last Modified On : Fri Jul 7 09:33:20 2017 13 // Update Count : 15 14 14 // 15 15 16 16 // https://gmplib.org/gmp-man-6.1.1.pdf 17 18 #pragma once 17 19 18 20 #include <gmp.h> // GNU multi-precise integers -
src/libcfa/iostream
r1d776fd r9a1e509 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 2 08:42:56201713 // Update Count : 11 012 // Last Modified On : Fri Jul 7 08:35:59 2017 13 // Update Count : 118 14 14 // 15 15 16 #ifndef __IOSTREAM_H__ 17 #define __IOSTREAM_H__ 16 #pragma once 18 17 19 18 #include "iterator" … … 26 25 const char * sepGetCur( ostype * ); // get current separator string 27 26 void sepSetCur( ostype *, const char * ); // set current separator string 28 _Bool lastSepOn( ostype * ); // last manipulator is setOn (context sensitive) 27 _Bool getNL( ostype * ); // check newline 28 void setNL( ostype *, _Bool ); // saw newline 29 29 // public 30 30 void sepOn( ostype * ); // turn separator state on … … 82 82 forall( dtype ostype | ostream( ostype ) ) ostype * ?|?( ostype *, ostype * (*)( ostype * ) ); 83 83 forall( dtype ostype | ostream( ostype ) ) ostype * endl( ostype * ); 84 forall( dtype ostype | ostream( ostype ) ) ostype * sep( ostype * ); 85 forall( dtype ostype | ostream( ostype ) ) ostype * sepTuple( ostype * ); 84 86 forall( dtype ostype | ostream( ostype ) ) ostype * sepOn( ostype * ); 85 87 forall( dtype ostype | ostream( ostype ) ) ostype * sepOff( ostype * ); … … 137 139 forall( dtype istype | istream( istype ) ) istype * ?|?( istype *, _Istream_cstrC ); 138 140 139 #endif // __IOSTREAM_H140 141 141 // Local Variables: // 142 142 // mode: c // -
src/libcfa/iostream.c
r1d776fd r9a1e509 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Jul 2 08:54:02201713 // Update Count : 3 7512 // Last Modified On : Thu Jul 6 18:14:17 2017 13 // Update Count : 396 14 14 // 15 15 … … 18 18 extern "C" { 19 19 #include <stdio.h> 20 #include <stdbool.h> // true/false 20 21 #include <string.h> // strlen 21 22 #include <float.h> // DBL_DIG, LDBL_DIG … … 24 25 25 26 forall( dtype ostype | ostream( ostype ) ) 26 ostype * ?|?( ostype * os, char c ) { 27 fmt( os, "%c", c ); 27 ostype * ?|?( ostype * os, char ch ) { 28 fmt( os, "%c", ch ); 29 if ( ch == '\n' ) setNL( os, true ); 28 30 sepOff( os ); 29 31 return os; … … 180 182 181 183 // last character IS spacing or opening punctuation => turn off separator for next item 182 unsigned int len = strlen( cp ), posn = len - 1;183 ch = cp[ posn];// must make unsigned184 size_t len = strlen( cp ); 185 ch = cp[len - 1]; // must make unsigned 184 186 if ( sepPrt( os ) && mask[ ch ] != Open && mask[ ch ] != OpenClose ) { 185 187 sepOn( os ); … … 187 189 sepOff( os ); 188 190 } // if 191 if ( ch == '\n' ) setNL( os, true ); // check *AFTER* sepPrt call above as it resets NL flag 189 192 return write( os, cp, len ); 190 193 } // ?|? … … 216 219 217 220 forall( dtype ostype | ostream( ostype ) ) 221 ostype * sep( ostype * os ) { 222 os | sepGet( os ); 223 return os; 224 } // sep 225 226 forall( dtype ostype | ostream( ostype ) ) 227 ostype * sepTuple( ostype * os ) { 228 os | sepGetTuple( os ); 229 return os; 230 } // sepTuple 231 232 forall( dtype ostype | ostream( ostype ) ) 218 233 ostype * endl( ostype * os ) { 219 if ( lastSepOn( os ) ) fmt( os, "%s", sepGetCur( os ) );220 234 os | '\n'; 235 setNL( os, true ); 221 236 flush( os ); 222 237 sepOff( os ); // prepare for next line -
src/libcfa/iterator
r1d776fd r9a1e509 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Mar 2 18:06:05 201613 // Update Count : 912 // Last Modified On : Fri Jul 7 08:37:25 2017 13 // Update Count : 10 14 14 // 15 15 16 #ifndef ITERATOR_H 17 #define ITERATOR_H 16 #pragma once 18 17 19 18 // An iterator can be used to traverse a data structure. … … 39 38 40 39 forall( otype iterator_type, otype elt_type | iterator( iterator_type, elt_type ) ) 41 void for_each( iterator_type begin, iterator_type end, void (* func)( elt_type ) );40 void for_each( iterator_type begin, iterator_type end, void (* func)( elt_type ) ); 42 41 43 42 forall( otype iterator_type, otype elt_type | iterator( iterator_type, elt_type ) ) 44 void for_each_reverse( iterator_type begin, iterator_type end, void (*func)( elt_type ) ); 45 46 #endif // ITERATOR_H 43 void for_each_reverse( iterator_type begin, iterator_type end, void (* func)( elt_type ) ); 47 44 48 45 // Local Variables: // -
src/libcfa/iterator.c
r1d776fd r9a1e509 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Mar 2 18:08:11 201613 // Update Count : 2 712 // Last Modified On : Fri Jul 7 08:38:23 2017 13 // Update Count : 28 14 14 // 15 15 … … 17 17 18 18 forall( otype iterator_type, otype elt_type | iterator( iterator_type, elt_type ) ) 19 void for_each( iterator_type begin, iterator_type end, void (* func)( elt_type ) ) {19 void for_each( iterator_type begin, iterator_type end, void (* func)( elt_type ) ) { 20 20 for ( iterator_type i = begin; i != end; ++i ) { 21 21 func( *i ); 22 } 23 } 22 } // for 23 } // for_each 24 24 25 25 forall( otype iterator_type, otype elt_type | iterator( iterator_type, elt_type ) ) 26 void for_each_reverse( iterator_type begin, iterator_type end, void (* func)( elt_type ) ) {26 void for_each_reverse( iterator_type begin, iterator_type end, void (* func)( elt_type ) ) { 27 27 for ( iterator_type i = end; i != begin; ) { 28 28 --i; 29 29 func( *i ); 30 } 31 } 30 } // for 31 } // for_each_reverse 32 32 33 33 // Local Variables: // -
src/libcfa/libhdr/libalign.h
r1d776fd r9a1e509 1 // -*- Mode: C++ -*- 1 // -*- Mode: C++ -*- 2 2 // 3 3 // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo … … 18 18 // Free Software Foundation; either version 2.1 of the License, or (at your 19 19 // option) any later version. 20 // 20 // 21 21 // This library is distributed in the hope that it will be useful, but WITHOUT 22 22 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 23 23 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License 24 24 // for more details. 25 // 25 // 26 26 // You should have received a copy of the GNU Lesser General Public License 27 27 // along with this library. 28 // 28 // 29 29 30 30 … … 33 33 34 34 #include "assert" 35 #include <stdbool.h> 35 36 36 // Minimum size used to align memory boundaries for memory allocations. 37 // Minimum size used to align memory boundaries for memory allocations. 37 38 #define libAlign() (sizeof(double)) 38 39 -
src/libcfa/libhdr/libdebug.h
r1d776fd r9a1e509 18 18 19 19 #ifdef __CFA_DEBUG__ 20 #define LIB_DEBUG_DO(x) x 21 #define LIB_NO_DEBUG_DO(x) ((void)0) 20 #define LIB_DEBUG_DO(...) __VA_ARGS__ 21 #define LIB_NO_DEBUG_DO(...) 22 #define DEBUG_CTX __PRETTY_FUNCTION__ 23 #define DEBUG_CTX2 , __PRETTY_FUNCTION__ 24 #define DEBUG_CTX_PARAM const char * caller 25 #define DEBUG_CTX_PARAM2 , const char * caller 22 26 #else 23 #define LIB_DEBUG_DO(x) ((void)0) 24 #define LIB_NO_DEBUG_DO(x) x 27 #define LIB_DEBUG_DO(...) 28 #define LIB_NO_DEBUG_DO(...) __VA_ARGS__ 29 #define DEBUG_CTX 30 #define DEBUG_CTX2 31 #define DEBUG_CTX_PARAM 32 #define DEBUG_CTX_PARAM2 25 33 #endif 26 34 … … 51 59 52 60 #ifdef __CFA_DEBUG_PRINT__ 53 #define LIB_DEBUG_WRITE( fd, buffer, len ) __lib_debug_write( fd, buffer, len ) 54 #define LIB_DEBUG_ACQUIRE() __lib_debug_acquire() 55 #define LIB_DEBUG_RELEASE() __lib_debug_release() 56 #define LIB_DEBUG_PRINT_SAFE(...) __lib_debug_print_safe (__VA_ARGS__) 57 #define LIB_DEBUG_PRINT_NOLOCK(...) __lib_debug_print_nolock (__VA_ARGS__) 58 #define LIB_DEBUG_PRINT_BUFFER(...) __lib_debug_print_buffer (__VA_ARGS__) 61 #define LIB_DEBUG_WRITE( fd, buffer, len ) __lib_debug_write( fd, buffer, len ) 62 #define LIB_DEBUG_ACQUIRE() __lib_debug_acquire() 63 #define LIB_DEBUG_RELEASE() __lib_debug_release() 64 #define LIB_DEBUG_PRINT_SAFE(...) __lib_debug_print_safe (__VA_ARGS__) 65 #define LIB_DEBUG_PRINT_NOLOCK(...) __lib_debug_print_nolock (__VA_ARGS__) 66 #define LIB_DEBUG_PRINT_BUFFER(...) __lib_debug_print_buffer (__VA_ARGS__) 67 #define LIB_DEBUG_PRINT_BUFFER_DECL(fd, ...) char text[256]; int len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len ); 68 #define LIB_DEBUG_PRINT_BUFFER_LOCAL(fd, ...) len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len ); 59 69 #else 60 #define LIB_DEBUG_WRITE(...) ((void)0) 61 #define LIB_DEBUG_ACQUIRE() ((void)0) 62 #define LIB_DEBUG_RELEASE() ((void)0) 63 #define LIB_DEBUG_PRINT_SAFE(...) ((void)0) 64 #define LIB_DEBUG_PRINT_NOLOCK(...) ((void)0) 65 #define LIB_DEBUG_PRINT_BUFFER(...) ((void)0) 70 #define LIB_DEBUG_WRITE(...) ((void)0) 71 #define LIB_DEBUG_ACQUIRE() ((void)0) 72 #define LIB_DEBUG_RELEASE() ((void)0) 73 #define LIB_DEBUG_PRINT_SAFE(...) ((void)0) 74 #define LIB_DEBUG_PRINT_NOLOCK(...) ((void)0) 75 #define LIB_DEBUG_PRINT_BUFFER(...) ((void)0) 76 #define LIB_DEBUG_PRINT_BUFFER_DECL(...) ((void)0) 77 #define LIB_DEBUG_PRINT_BUFFER_LOCAL(...) ((void)0) 66 78 #endif 67 79 -
src/libcfa/limits
r1d776fd r9a1e509 10 10 // Created On : Wed Apr 6 18:06:52 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Apr 6 21:08:16 201613 // Update Count : 612 // Last Modified On : Fri Jul 7 09:33:57 2017 13 // Update Count : 7 14 14 // 15 15 16 #ifndef LIMITS_H 17 #define LIMITS_H 16 #pragma once 18 17 19 18 // Integral Constants … … 110 109 extern const long _Complex _1_SQRT_2; // 1 / sqrt(2) 111 110 112 #endif // LIMITS_H113 114 111 // Local Variables: // 115 112 // mode: c // -
src/libcfa/math
r1d776fd r9a1e509 10 10 // Created On : Mon Apr 18 23:37:04 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed May 24 17:40:39 2017 13 // Update Count : 60 14 // 15 16 #ifndef MATH_H 17 #define MATH_H 12 // Last Modified On : Fri Jul 7 09:34:15 2017 13 // Update Count : 61 14 // 15 16 #pragma once 18 17 19 18 extern "C" { … … 345 344 long double scalbln( long double, long int ); 346 345 347 #endif // MATH_H348 349 346 // Local Variables: // 350 347 // mode: c // -
src/libcfa/rational
r1d776fd r9a1e509 12 12 // Created On : Wed Apr 6 17:56:25 2016 13 13 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Mon May 15 21:30:12201715 // Update Count : 9 014 // Last Modified On : Fri Jul 7 09:34:33 2017 15 // Update Count : 93 16 16 // 17 17 18 #ifndef RATIONAL_H 19 #define RATIONAL_H 18 #pragma once 20 19 21 20 #include "iostream" … … 47 46 // implementation 48 47 49 forall ( otype RationalImpl | arithmetic( RationalImpl ) )48 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 50 49 struct Rational { 51 50 RationalImpl numerator, denominator; // invariant: denominator > 0 … … 54 53 // constructors 55 54 56 forall ( otype RationalImpl | arithmetic( RationalImpl ) )55 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 57 56 void ?{}( Rational(RationalImpl) * r ); 58 57 59 forall ( otype RationalImpl | arithmetic( RationalImpl ) )58 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 60 59 void ?{}( Rational(RationalImpl) * r, RationalImpl n ); 61 60 62 forall ( otype RationalImpl | arithmetic( RationalImpl ) )61 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 63 62 void ?{}( Rational(RationalImpl) * r, RationalImpl n, RationalImpl d ); 64 63 65 forall ( otype RationalImpl | arithmetic( RationalImpl ) )64 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 66 65 void ?{}( Rational(RationalImpl) * r, zero_t ); 67 66 68 forall ( otype RationalImpl | arithmetic( RationalImpl ) )67 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 69 68 void ?{}( Rational(RationalImpl) * r, one_t ); 70 69 71 // getter for numerator/denominator70 // numerator/denominator getter 72 71 73 forall ( otype RationalImpl | arithmetic( RationalImpl ) )72 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 74 73 RationalImpl numerator( Rational(RationalImpl) r ); 75 74 76 forall ( otype RationalImpl | arithmetic( RationalImpl ) )75 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 77 76 RationalImpl denominator( Rational(RationalImpl) r ); 78 forall ( otype RationalImpl | arithmetic( RationalImpl ) ) 77 78 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 79 79 [ RationalImpl, RationalImpl ] ?=?( * [ RationalImpl, RationalImpl ] dest, Rational(RationalImpl) src ); 80 80 81 // setter for numerator/denominator81 // numerator/denominator setter 82 82 83 forall ( otype RationalImpl | arithmetic( RationalImpl ) )83 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 84 84 RationalImpl numerator( Rational(RationalImpl) r, RationalImpl n ); 85 85 86 forall ( otype RationalImpl | arithmetic( RationalImpl ) )86 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 87 87 RationalImpl denominator( Rational(RationalImpl) r, RationalImpl d ); 88 88 89 89 // comparison 90 90 91 forall ( otype RationalImpl | arithmetic( RationalImpl ) )91 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 92 92 int ?==?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 93 93 94 forall ( otype RationalImpl | arithmetic( RationalImpl ) )94 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 95 95 int ?!=?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 96 96 97 forall ( otype RationalImpl | arithmetic( RationalImpl ) )97 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 98 98 int ?<?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 99 99 100 forall ( otype RationalImpl | arithmetic( RationalImpl ) )100 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 101 101 int ?<=?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 102 102 103 forall ( otype RationalImpl | arithmetic( RationalImpl ) )103 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 104 104 int ?>?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 105 105 106 forall ( otype RationalImpl | arithmetic( RationalImpl ) )106 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 107 107 int ?>=?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 108 108 109 109 // arithmetic 110 110 111 forall ( otype RationalImpl | arithmetic( RationalImpl ) )111 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 112 112 Rational(RationalImpl) +?( Rational(RationalImpl) r ); 113 113 114 forall ( otype RationalImpl | arithmetic( RationalImpl ) )114 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 115 115 Rational(RationalImpl) -?( Rational(RationalImpl) r ); 116 116 117 forall ( otype RationalImpl | arithmetic( RationalImpl ) )117 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 118 118 Rational(RationalImpl) ?+?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 119 119 120 forall ( otype RationalImpl | arithmetic( RationalImpl ) )120 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 121 121 Rational(RationalImpl) ?-?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 122 122 123 forall ( otype RationalImpl | arithmetic( RationalImpl ) )123 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 124 124 Rational(RationalImpl) ?*?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 125 125 126 forall ( otype RationalImpl | arithmetic( RationalImpl ) )126 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 127 127 Rational(RationalImpl) ?/?( Rational(RationalImpl) l, Rational(RationalImpl) r ); 128 128 129 129 // conversion 130 forall ( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); } )130 forall( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); } ) 131 131 double widen( Rational(RationalImpl) r ); 132 forall ( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); RationalImpl convert( double );} )132 forall( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); RationalImpl convert( double );} ) 133 133 Rational(RationalImpl) narrow( double f, RationalImpl md ); 134 134 135 135 // I/O 136 forall ( otype RationalImpl | arithmetic( RationalImpl ) )136 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 137 137 forall( dtype istype | istream( istype ) | { istype * ?|?( istype *, RationalImpl * ); } ) 138 138 istype * ?|?( istype *, Rational(RationalImpl) * ); 139 139 140 forall ( otype RationalImpl | arithmetic( RationalImpl ) )140 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 141 141 forall( dtype ostype | ostream( ostype ) | { ostype * ?|?( ostype *, RationalImpl ); } ) 142 142 ostype * ?|?( ostype *, Rational(RationalImpl ) ); 143 144 #endif // RATIONAL_H145 143 146 144 // Local Variables: // -
src/libcfa/rational.c
r1d776fd r9a1e509 10 10 // Created On : Wed Apr 6 17:54:28 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon May 15 21:29:23201713 // Update Count : 1 4912 // Last Modified On : Tue May 16 18:35:36 2017 13 // Update Count : 150 14 14 // 15 15 … … 22 22 // Calculate greatest common denominator of two numbers, the first of which may be negative. Used to reduce rationals. 23 23 // alternative: https://en.wikipedia.org/wiki/Binary_GCD_algorithm 24 forall ( otype RationalImpl | arithmetic( RationalImpl ) )24 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 25 25 static RationalImpl gcd( RationalImpl a, RationalImpl b ) { 26 26 for ( ;; ) { // Euclid's algorithm … … 33 33 } // gcd 34 34 35 forall ( otype RationalImpl | arithmetic( RationalImpl ) )35 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 36 36 static RationalImpl simplify( RationalImpl * n, RationalImpl * d ) { 37 37 if ( *d == (RationalImpl){0} ) { … … 46 46 // constructors 47 47 48 forall ( otype RationalImpl | arithmetic( RationalImpl ) )48 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 49 49 void ?{}( Rational(RationalImpl) * r ) { 50 50 r{ (RationalImpl){0}, (RationalImpl){1} }; 51 51 } // rational 52 52 53 forall ( otype RationalImpl | arithmetic( RationalImpl ) )53 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 54 54 void ?{}( Rational(RationalImpl) * r, RationalImpl n ) { 55 55 r{ n, (RationalImpl){1} }; 56 56 } // rational 57 57 58 forall ( otype RationalImpl | arithmetic( RationalImpl ) )58 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 59 59 void ?{}( Rational(RationalImpl) * r, RationalImpl n, RationalImpl d ) { 60 60 RationalImpl t = simplify( &n, &d ); // simplify … … 66 66 // getter for numerator/denominator 67 67 68 forall ( otype RationalImpl | arithmetic( RationalImpl ) )68 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 69 69 RationalImpl numerator( Rational(RationalImpl) r ) { 70 70 return r.numerator; 71 71 } // numerator 72 72 73 forall ( otype RationalImpl | arithmetic( RationalImpl ) )73 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 74 74 RationalImpl denominator( Rational(RationalImpl) r ) { 75 75 return r.denominator; 76 76 } // denominator 77 77 78 forall ( otype RationalImpl | arithmetic( RationalImpl ) )78 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 79 79 [ RationalImpl, RationalImpl ] ?=?( * [ RationalImpl, RationalImpl ] dest, Rational(RationalImpl) src ) { 80 80 return *dest = src.[ numerator, denominator ]; … … 83 83 // setter for numerator/denominator 84 84 85 forall ( otype RationalImpl | arithmetic( RationalImpl ) )85 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 86 86 RationalImpl numerator( Rational(RationalImpl) r, RationalImpl n ) { 87 87 RationalImpl prev = r.numerator; … … 92 92 } // numerator 93 93 94 forall ( otype RationalImpl | arithmetic( RationalImpl ) )94 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 95 95 RationalImpl denominator( Rational(RationalImpl) r, RationalImpl d ) { 96 96 RationalImpl prev = r.denominator; … … 104 104 // comparison 105 105 106 forall ( otype RationalImpl | arithmetic( RationalImpl ) )106 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 107 107 int ?==?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 108 108 return l.numerator * r.denominator == l.denominator * r.numerator; 109 109 } // ?==? 110 110 111 forall ( otype RationalImpl | arithmetic( RationalImpl ) )111 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 112 112 int ?!=?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 113 113 return ! ( l == r ); 114 114 } // ?!=? 115 115 116 forall ( otype RationalImpl | arithmetic( RationalImpl ) )116 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 117 117 int ?<?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 118 118 return l.numerator * r.denominator < l.denominator * r.numerator; 119 119 } // ?<? 120 120 121 forall ( otype RationalImpl | arithmetic( RationalImpl ) )121 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 122 122 int ?<=?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 123 123 return l.numerator * r.denominator <= l.denominator * r.numerator; 124 124 } // ?<=? 125 125 126 forall ( otype RationalImpl | arithmetic( RationalImpl ) )126 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 127 127 int ?>?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 128 128 return ! ( l <= r ); 129 129 } // ?>? 130 130 131 forall ( otype RationalImpl | arithmetic( RationalImpl ) )131 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 132 132 int ?>=?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 133 133 return ! ( l < r ); … … 137 137 // arithmetic 138 138 139 forall ( otype RationalImpl | arithmetic( RationalImpl ) )139 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 140 140 Rational(RationalImpl) +?( Rational(RationalImpl) r ) { 141 141 Rational(RationalImpl) t = { r.numerator, r.denominator }; … … 143 143 } // +? 144 144 145 forall ( otype RationalImpl | arithmetic( RationalImpl ) )145 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 146 146 Rational(RationalImpl) -?( Rational(RationalImpl) r ) { 147 147 Rational(RationalImpl) t = { -r.numerator, r.denominator }; … … 149 149 } // -? 150 150 151 forall ( otype RationalImpl | arithmetic( RationalImpl ) )151 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 152 152 Rational(RationalImpl) ?+?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 153 153 if ( l.denominator == r.denominator ) { // special case … … 160 160 } // ?+? 161 161 162 forall ( otype RationalImpl | arithmetic( RationalImpl ) )162 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 163 163 Rational(RationalImpl) ?-?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 164 164 if ( l.denominator == r.denominator ) { // special case … … 171 171 } // ?-? 172 172 173 forall ( otype RationalImpl | arithmetic( RationalImpl ) )173 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 174 174 Rational(RationalImpl) ?*?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 175 175 Rational(RationalImpl) t = { l.numerator * r.numerator, l.denominator * r.denominator }; … … 177 177 } // ?*? 178 178 179 forall ( otype RationalImpl | arithmetic( RationalImpl ) )179 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 180 180 Rational(RationalImpl) ?/?( Rational(RationalImpl) l, Rational(RationalImpl) r ) { 181 181 if ( r.numerator < (RationalImpl){0} ) { … … 190 190 // conversion 191 191 192 forall ( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); } )192 forall( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); } ) 193 193 double widen( Rational(RationalImpl) r ) { 194 194 return convert( r.numerator ) / convert( r.denominator ); … … 196 196 197 197 // http://www.ics.uci.edu/~eppstein/numth/frap.c 198 forall ( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); RationalImpl convert( double ); } )198 forall( otype RationalImpl | arithmetic( RationalImpl ) | { double convert( RationalImpl ); RationalImpl convert( double ); } ) 199 199 Rational(RationalImpl) narrow( double f, RationalImpl md ) { 200 200 if ( md <= (RationalImpl){1} ) { // maximum fractional digits too small? … … 227 227 // I/O 228 228 229 forall ( otype RationalImpl | arithmetic( RationalImpl ) )229 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 230 230 forall( dtype istype | istream( istype ) | { istype * ?|?( istype *, RationalImpl * ); } ) 231 231 istype * ?|?( istype * is, Rational(RationalImpl) * r ) { … … 238 238 } // ?|? 239 239 240 forall ( otype RationalImpl | arithmetic( RationalImpl ) )240 forall( otype RationalImpl | arithmetic( RationalImpl ) ) 241 241 forall( dtype ostype | ostream( ostype ) | { ostype * ?|?( ostype *, RationalImpl ); } ) 242 242 ostype * ?|?( ostype * os, Rational(RationalImpl ) r ) { -
src/libcfa/stdlib
r1d776fd r9a1e509 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 2 15:51:03 2017 13 // Update Count : 218 14 // 15 16 #ifndef STDLIB_H 17 #define STDLIB_H 12 // Last Modified On : Fri Jul 7 09:34:49 2017 13 // Update Count : 219 14 // 15 16 #pragma once 18 17 19 18 //--------------------------------------- … … 232 231 void swap( T * t1, T * t2 ); 233 232 234 #endif // STDLIB_H235 236 233 // Local Variables: // 237 234 // mode: c //
Note:
See TracChangeset
for help on using the changeset viewer.