Changeset 139775e
- Timestamp:
- Nov 6, 2020, 4:48:52 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 75baaa3
- Parents:
- 55acc3a (diff), 836c9925 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 9 added
- 1 deleted
- 46 edited
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
benchmark/readyQ/cycle.cfa
r55acc3a r139775e 1 1 #include "rq_bench.hfa" 2 2 3 thread Partner { 4 Partner * partner; 3 struct Partner { 5 4 unsigned long long count; 5 unsigned long long blocks; 6 bench_sem self; 7 bench_sem * next; 6 8 }; 7 9 8 10 void ?{}( Partner & this ) { 9 ((thread&)this){ bench_cluster };11 this.count = this.blocks = 0; 10 12 } 11 13 12 void main( Partner & this ) { 13 this.count = 0; 14 thread BThrd { 15 Partner & partner; 16 }; 17 18 void ?{}( BThrd & this, Partner * partner ) { 19 ((thread&)this){ bench_cluster }; 20 &this.partner = partner; 21 } 22 23 void ^?{}( BThrd & mutex this ) {} 24 25 void main( BThrd & thrd ) with(thrd.partner) { 26 count = 0; 14 27 for() { 15 park();16 unpark( *this.partner);17 this.count ++;28 blocks += wait( self ); 29 post( *next ); 30 count ++; 18 31 if( clock_mode && stop) break; 19 if(!clock_mode && this.count >= stop_count) break;32 if(!clock_mode && count >= stop_count) break; 20 33 } 21 34 … … 33 46 { 34 47 unsigned long long global_counter = 0; 48 unsigned long long global_blocks = 0; 35 49 unsigned tthreads = nthreads * ring_size; 36 50 Time start, end; … … 38 52 { 39 53 threads_left = tthreads; 40 Partner threads[tthreads]; 54 BThrd * threads[tthreads]; 55 Partner thddata[tthreads]; 41 56 for(i; tthreads) { 42 57 unsigned pi = (i + nthreads) % tthreads; 43 threads[i].partner = &threads[pi]; 58 thddata[i].next = &thddata[pi].self; 59 } 60 for(int i = 0; i < tthreads; i++) { 61 threads[i] = malloc(); 62 (*threads[i]){ &thddata[i] }; 44 63 } 45 64 printf("Starting\n"); … … 49 68 50 69 for(i; nthreads) { 51 unpark( threads[i]);70 post( thddata[i].self ); 52 71 } 53 72 wait(start, is_tty); … … 58 77 59 78 for(i; tthreads) { 60 global_counter += join( threads[i] ).count; 79 Partner & partner = join( *threads[i] ).partner; 80 global_counter += partner.count; 81 global_blocks += partner.blocks; 82 delete(threads[i]); 61 83 } 62 84 } 63 85 64 printf("Duration (ms) : %'ld\n", (end - start)`ms); 65 printf("Number of processors: %'d\n", nprocs); 66 printf("Number of threads : %'d\n", tthreads); 67 printf("Cycle size (# thrds): %'d\n", ring_size); 68 printf("Yields per second : %'18.2lf\n", ((double)global_counter) / (end - start)`s); 69 printf("ns per yields : %'18.2lf\n", ((double)(end - start)`ns) / global_counter); 70 printf("Total yields : %'15llu\n", global_counter); 71 printf("Yields per threads : %'15llu\n", global_counter / tthreads); 72 printf("Yields per procs : %'15llu\n", global_counter / nprocs); 73 printf("Yields/sec/procs : %'18.2lf\n", (((double)global_counter) / nprocs) / (end - start)`s); 74 printf("ns per yields/procs : %'18.2lf\n", ((double)(end - start)`ns) / (global_counter / nprocs)); 86 printf("Duration (ms) : %'ld\n", (end - start)`dms); 87 printf("Number of processors : %'d\n", nprocs); 88 printf("Number of threads : %'d\n", tthreads); 89 printf("Cycle size (# thrds) : %'d\n", ring_size); 90 printf("Total Operations(ops): %'15llu\n", global_counter); 91 printf("Total blocks : %'15llu\n", global_blocks); 92 printf("Ops per second : %'18.2lf\n", ((double)global_counter) / (end - start)`ds); 93 printf("ns per ops : %'18.2lf\n", (end - start)`dns / global_counter); 94 printf("Ops per threads : %'15llu\n", global_counter / tthreads); 95 printf("Ops per procs : %'15llu\n", global_counter / nprocs); 96 printf("Ops/sec/procs : %'18.2lf\n", (((double)global_counter) / nprocs) / (end - start)`ds); 97 printf("ns per ops/procs : %'18.2lf\n", (end - start)`dns / (global_counter / nprocs)); 75 98 fflush(stdout); 76 99 } -
benchmark/readyQ/cycle.cpp
r55acc3a r139775e 1 2 #include "rq_bench.hpp" 3 4 struct Partner { 5 unsigned long long count = 0; 6 unsigned long long blocks = 0; 7 bench_sem self; 8 bench_sem * next; 9 }; 10 11 void partner_main( Partner * self ) { 12 self->count = 0; 13 for(;;) { 14 self->blocks += self->self.wait(); 15 self->next->post(); 16 self->count ++; 17 if( clock_mode && stop) break; 18 if(!clock_mode && self->count >= stop_count) break; 19 } 20 21 __atomic_fetch_add(&threads_left, -1, __ATOMIC_SEQ_CST); 22 } 23 24 int main(int argc, char * argv[]) { 25 unsigned ring_size = 2; 26 option_t opt[] = { 27 BENCH_OPT, 28 { 'r', "ringsize", "Number of threads in a cycle", ring_size } 29 }; 30 BENCH_OPT_PARSE("cforall cycle benchmark"); 31 32 { 33 unsigned long long global_counter = 0; 34 unsigned long long global_blocks = 0; 35 unsigned tthreads = nthreads * ring_size; 36 uint64_t start, end; 37 FibreInit(1, nprocs); 38 { 39 threads_left = tthreads; 40 Fibre * threads[tthreads]; 41 Partner thddata[tthreads]; 42 for(int i = 0; i < tthreads; i++) { 43 unsigned pi = (i + nthreads) % tthreads; 44 thddata[i].next = &thddata[pi].self; 45 } 46 for(int i = 0; i < tthreads; i++) { 47 threads[i] = new Fibre( reinterpret_cast<void (*)(void *)>(partner_main), &thddata[i] ); 48 } 49 printf("Starting\n"); 50 51 bool is_tty = isatty(STDOUT_FILENO); 52 start = getTimeNsec(); 53 54 for(int i = 0; i < nthreads; i++) { 55 thddata[i].self.post(); 56 } 57 wait(start, is_tty); 58 59 stop = true; 60 end = getTimeNsec(); 61 printf("\nDone\n"); 62 63 for(int i = 0; i < tthreads; i++) { 64 fibre_join( threads[i], nullptr ); 65 global_counter += thddata[i].count; 66 global_blocks += thddata[i].blocks; 67 } 68 } 69 70 printf("Duration (ms) : %'ld\n", to_miliseconds(end - start)); 71 printf("Number of processors : %'d\n", nprocs); 72 printf("Number of threads : %'d\n", tthreads); 73 printf("Cycle size (# thrds) : %'d\n", ring_size); 74 printf("Total Operations(ops): %'15llu\n", global_counter); 75 printf("Total blocks : %'15llu\n", global_blocks); 76 printf("Ops per second : %'18.2lf\n", ((double)global_counter) / to_fseconds(end - start)); 77 printf("ns per ops : %'18.2lf\n", ((double)(end - start)) / global_counter); 78 printf("Ops per threads : %'15llu\n", global_counter / tthreads); 79 printf("Ops per procs : %'15llu\n", global_counter / nprocs); 80 printf("Ops/sec/procs : %'18.2lf\n", (((double)global_counter) / nprocs) / to_fseconds(end - start)); 81 printf("ns per ops/procs : %'18.2lf\n", ((double)(end - start)) / (global_counter / nprocs)); 82 fflush(stdout); 83 } 84 85 return 0; 86 } -
benchmark/readyQ/cycle.go
r55acc3a r139775e 2 2 3 3 import ( 4 "bufio"5 4 "flag" 6 5 "fmt" 7 "os"8 "runtime"9 6 "sync/atomic" 10 7 "time" … … 12 9 "golang.org/x/text/message" 13 10 ) 14 15 var clock_mode bool16 var threads_left int6417 var stop int3218 var duration float6419 var stop_count uint6420 21 func fflush(f *bufio.Writer) {22 defer f.Flush()23 f.Write([]byte("\r"))24 }25 26 func wait(start time.Time, is_tty bool) {27 f := bufio.NewWriter(os.Stdout)28 tdur := time.Duration(duration)29 for true {30 time.Sleep(100 * time.Millisecond)31 end := time.Now()32 delta := end.Sub(start)33 if is_tty {34 fmt.Printf(" %.1f",delta.Seconds())35 fflush(f)36 }37 if clock_mode && delta >= (tdur * time.Second) {38 break39 } else if !clock_mode && atomic.LoadInt64(&threads_left) == 0 {40 break41 }42 }43 }44 11 45 12 func partner(result chan uint64, mine chan int, next chan int) { … … 58 25 59 26 func main() { 60 var nprocs int61 var nthreads int62 27 var ring_size int 63 28 64 nprocsOpt := flag.Int("p", 1, "The number of processors")65 nthreadsOpt := flag.Int("t", 1, "The number of threads")66 29 ring_sizeOpt := flag.Int("r", 2, "The number of threads per cycles") 67 durationOpt := flag.Float64("d", 0, "Duration of the experiment in seconds")68 stopOpt := flag.Uint64("i", 0, "Duration of the experiment in iterations")69 30 70 flag.Parse()31 bench_init() 71 32 72 nprocs = *nprocsOpt73 nthreads = *nthreadsOpt74 33 ring_size = *ring_sizeOpt 75 duration = *durationOpt76 stop_count = *stopOpt77 34 78 if duration > 0 && stop_count > 0 {79 panic(fmt.Sprintf("--duration and --iterations cannot be used together\n"))80 } else if duration > 0 {81 clock_mode = true82 stop_count = 0xFFFFFFFFFFFFFFFF83 fmt.Printf("Running for %f seconds\n", duration)84 } else if stop_count > 0 {85 clock_mode = false86 fmt.Printf("Running for %d iterations\n", stop_count)87 } else {88 duration = 589 clock_mode = true90 fmt.Printf("Running for %f seconds\n", duration)91 }92 93 runtime.GOMAXPROCS(nprocs)94 35 tthreads := nthreads * ring_size 95 36 threads_left = int64(tthreads) … … 126 67 127 68 p := message.NewPrinter(language.English) 128 p.Printf("Duration (ms) : %f\n", delta.Seconds());129 p.Printf("Number of processors : %d\n", nprocs);130 p.Printf("Number of threads : %d\n", tthreads);131 p.Printf("Cycle size (# thrds) : %d\n", ring_size);132 p.Printf(" Yields per second : %18.2f\n", float64(global_counter) / delta.Seconds())133 p.Printf(" ns per yields : %18.2f\n", float64(delta.Nanoseconds()) / float64(global_counter))134 p.Printf(" Total yields : %15d\n", global_counter)135 p.Printf(" Yields per threads: %15d\n", global_counter / uint64(tthreads))136 p.Printf(" Yields per procs: %15d\n", global_counter / uint64(nprocs))137 p.Printf(" Yields/sec/procs: %18.2f\n", (float64(global_counter) / float64(nprocs)) / delta.Seconds())138 p.Printf("ns per yields/procs: %18.2f\n", float64(delta.Nanoseconds()) / (float64(global_counter) / float64(nprocs)))69 p.Printf("Duration (ms) : %f\n", delta.Seconds()); 70 p.Printf("Number of processors : %d\n", nprocs); 71 p.Printf("Number of threads : %d\n", tthreads); 72 p.Printf("Cycle size (# thrds) : %d\n", ring_size); 73 p.Printf("Total Operations(ops): %15d\n", global_counter) 74 p.Printf("Ops per second : %18.2f\n", float64(global_counter) / delta.Seconds()) 75 p.Printf("ns per ops : %18.2f\n", float64(delta.Nanoseconds()) / float64(global_counter)) 76 p.Printf("Ops per threads : %15d\n", global_counter / uint64(tthreads)) 77 p.Printf("Ops per procs : %15d\n", global_counter / uint64(nprocs)) 78 p.Printf("Ops/sec/procs : %18.2f\n", (float64(global_counter) / float64(nprocs)) / delta.Seconds()) 79 p.Printf("ns per ops/procs : %18.2f\n", float64(delta.Nanoseconds()) / (float64(global_counter) / float64(nprocs))) 139 80 140 81 } -
benchmark/readyQ/rq_bench.hfa
r55acc3a r139775e 66 66 67 67 void ^?{}( BenchCluster & this ) { 68 adelete( this. nprocs, this.procs );68 adelete( this.procs ); 69 69 ^(this.cl){}; 70 70 } … … 87 87 } 88 88 } 89 90 struct __attribute__((aligned(128))) bench_sem { 91 struct $thread * volatile ptr; 92 }; 93 94 static inline { 95 void ?{}(bench_sem & this) { 96 this.ptr = 0p; 97 } 98 99 void ^?{}(bench_sem & this) {} 100 101 bool wait(bench_sem & this) { 102 for() { 103 struct $thread * expected = this.ptr; 104 if(expected == 1p) { 105 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 106 return false; 107 } 108 } 109 else { 110 /* paranoid */ verify( expected == 0p ); 111 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 112 park(); 113 return true; 114 } 115 } 116 117 } 118 } 119 120 bool post(bench_sem & this) { 121 for() { 122 struct $thread * expected = this.ptr; 123 if(expected == 1p) return false; 124 if(expected == 0p) { 125 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 126 return false; 127 } 128 } 129 else { 130 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 131 unpark( expected ); 132 return true; 133 } 134 } 135 } 136 } 137 } -
benchmark/rmit.py
r55acc3a r139775e 10 10 11 11 import argparse 12 import datetime 12 13 import itertools 13 14 import os … … 101 102 return nopts 102 103 104 def actions_eta(actions): 105 time = 0 106 for a in actions: 107 i = 0 108 while i < len(a): 109 if a[i] == '-d': 110 i += 1 111 if i != len(a): 112 time += int(a[i]) 113 i += 1 114 return time 115 103 116 if __name__ == "__main__": 104 117 # ================================================================================ … … 160 173 # ================================================================================ 161 174 # Prepare to run 175 176 # find expected time 177 time = actions_eta(actions) 178 print("Running {} trials{}".format(len(actions), "" if time == 0 else " (expecting to take {}".format(str(datetime.timedelta(seconds=int(time)))) )) 179 162 180 random.shuffle(actions) 163 164 print("Running {} trials".format(len(actions)))165 181 result = [] 166 182 … … 209 225 d = [r[0], r[1]] 210 226 for k in headers[2:]: 211 d.append(r[2][k]) 227 try: 228 d.append(r[2][k]) 229 except: 230 d.append(0.0) 212 231 213 232 data.append(d) -
configure.ac
r55acc3a r139775e 28 28 # New AST toggling support 29 29 AH_TEMPLATE([CFA_USE_NEW_AST],[Sets whether or not to use the new-ast, this is adefault value and can be overrided by --old-ast and --new-ast]) 30 DEFAULT_NEW_AST="False" 30 31 AC_ARG_ENABLE(new-ast, 31 32 [ --enable-new-ast whether or not to use new ast as the default AST algorithm], 32 33 [case "${enableval}" in 33 yes) newast=true ; ;34 no) newast=false ;;34 yes) newast=true ; DEFAULT_NEW_AST="True" ;; 35 no) newast=false; DEFAULT_NEW_AST="False" ;; 35 36 *) AC_MSG_ERROR([bad value ${enableval} for --enable-new-ast]) ;; 36 37 esac],[newast=false]) 37 38 AC_DEFINE_UNQUOTED([CFA_USE_NEW_AST], $newast) 39 AC_SUBST(DEFAULT_NEW_AST) 38 40 39 41 #============================================================================== -
libcfa/src/concurrency/alarm.cfa
r55acc3a r139775e 45 45 //============================================================================================= 46 46 47 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period 47 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period) with( this ) { 48 48 this.thrd = thrd; 49 49 this.alarm = alarm; 50 50 this.period = period; 51 51 set = false; 52 kernel_alarm = false;52 type = User; 53 53 } 54 54 … … 58 58 this.period = period; 59 59 set = false; 60 kernel_alarm = true; 60 type = Kernel; 61 } 62 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period, Alarm_Callback callback ) with( this ) { 63 this.thrd = thrd; 64 this.alarm = alarm; 65 this.period = period; 66 this.callback = callback; 67 set = false; 68 type = Callback; 61 69 } 62 70 -
libcfa/src/concurrency/alarm.hfa
r55acc3a r139775e 39 39 //============================================================================================= 40 40 41 enum alarm_type{ Kernel = 0, User = 1, Callback = 2 }; 42 43 struct alarm_node_t; 44 45 typedef void (*Alarm_Callback)(alarm_node_t & ); 46 41 47 struct alarm_node_t { 42 48 Time alarm; // time when alarm goes off … … 50 56 }; 51 57 58 Alarm_Callback callback; 59 52 60 bool set :1; // whether or not the alarm has be registered 53 bool kernel_alarm :1; // true if this is not a user defined alarm61 enum alarm_type type; // true if this is not a user defined alarm 54 62 }; 55 63 DLISTED_MGD_IMPL_OUT(alarm_node_t) … … 57 65 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ); 58 66 void ?{}( alarm_node_t & this, processor * proc, Time alarm, Duration period ); 67 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period, Alarm_Callback callback ); 59 68 void ^?{}( alarm_node_t & this ); 60 69 -
libcfa/src/concurrency/coroutine.cfa
r55acc3a r139775e 134 134 void ^?{}($coroutine& this) { 135 135 if(this.state != Halted && this.state != Start && this.state != Primed) { 136 $coroutine * src = TL_GET( this_thread )->curr_cor;136 $coroutine * src = active_coroutine(); 137 137 $coroutine * dst = &this; 138 138 … … 240 240 241 241 struct $coroutine * __cfactx_cor_finish(void) { 242 struct $coroutine * cor = kernelTLS.this_thread->curr_cor;242 struct $coroutine * cor = active_coroutine(); 243 243 244 244 if(cor->state == Primed) { -
libcfa/src/concurrency/coroutine.hfa
r55acc3a r139775e 63 63 void prime(T & cor); 64 64 65 static inline struct $coroutine * active_coroutine() { return TL_GET( this_thread)->curr_cor; }65 static inline struct $coroutine * active_coroutine() { return active_thread()->curr_cor; } 66 66 67 67 //----------------------------------------------------------------------------- … … 87 87 88 88 // set new coroutine that task is executing 89 TL_GET( this_thread)->curr_cor = dst;89 active_thread()->curr_cor = dst; 90 90 91 91 // context switch to specified coroutine … … 112 112 // will also migrate which means this value will 113 113 // stay in syn with the TLS 114 $coroutine * src = TL_GET( this_thread )->curr_cor;114 $coroutine * src = active_coroutine(); 115 115 116 116 assertf( src->last != 0, … … 138 138 // will also migrate which means this value will 139 139 // stay in syn with the TLS 140 $coroutine * src = TL_GET( this_thread )->curr_cor;140 $coroutine * src = active_coroutine(); 141 141 $coroutine * dst = get_coroutine(cor); 142 142 143 143 if( unlikely(dst->context.SP == 0p) ) { 144 TL_GET( this_thread)->curr_cor = dst;144 active_thread()->curr_cor = dst; 145 145 __stack_prepare(&dst->stack, 65000); 146 146 __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine); 147 TL_GET( this_thread)->curr_cor = src;147 active_thread()->curr_cor = src; 148 148 } 149 149 … … 175 175 // will also migrate which means this value will 176 176 // stay in syn with the TLS 177 $coroutine * src = TL_GET( this_thread )->curr_cor;177 $coroutine * src = active_coroutine(); 178 178 179 179 // not resuming self ? -
libcfa/src/concurrency/exception.cfa
r55acc3a r139775e 72 72 void * stop_param; 73 73 74 struct $thread * this_thread = TL_GET( this_thread);74 struct $thread * this_thread = active_thread(); 75 75 if ( &this_thread->self_cor != this_thread->curr_cor ) { 76 76 struct $coroutine * cor = this_thread->curr_cor; -
libcfa/src/concurrency/io.cfa
r55acc3a r139775e 76 76 77 77 static inline bool next( __leaderlock_t & this ) { 78 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);78 /* paranoid */ verify( ! __preemption_enabled() ); 79 79 struct $thread * nextt; 80 80 for() { … … 168 168 // This is NOT thread-safe 169 169 static [int, bool] __drain_io( & struct __io_data ring ) { 170 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);170 /* paranoid */ verify( ! __preemption_enabled() ); 171 171 172 172 unsigned to_submit = 0; … … 404 404 return; 405 405 } 406 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);406 /* paranoid */ verify( ! __preemption_enabled() ); 407 407 __STATS__( true, 408 408 io.submit_q.leader += 1; … … 442 442 443 443 #if defined(LEADER_LOCK) 444 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);444 /* paranoid */ verify( ! __preemption_enabled() ); 445 445 next(ring.submit_q.submit_lock); 446 446 #else -
libcfa/src/concurrency/io/setup.cfa
r55acc3a r139775e 149 149 id.full_proc = false; 150 150 id.id = doregister(&id); 151 __cfaabi_tls.this_proc_id = &id; 151 152 __cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" ); 152 153 … … 178 179 __cfadbg_print_safe(io_core, "Kernel I/O : Unparking io poller %p\n", io_ctx); 179 180 #if !defined( __CFA_NO_STATISTICS__ ) 180 kernelTLS.this_stats = io_ctx->self.curr_cluster->stats;181 __cfaabi_tls.this_stats = io_ctx->self.curr_cluster->stats; 181 182 #endif 182 __post( io_ctx->sem, &id);183 post( io_ctx->sem ); 183 184 } 184 185 } … … 235 236 if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) { 236 237 237 ready_schedule_lock( (struct __processor_id_t *)active_processor());238 ready_schedule_lock(); 238 239 239 240 // This is the tricky case … … 253 254 thrd.preempted = __NO_PREEMPTION; 254 255 255 ready_schedule_unlock( (struct __processor_id_t *)active_processor());256 ready_schedule_unlock(); 256 257 257 258 // Pretend like the thread was blocked all along … … 275 276 } 276 277 } else { 277 unpark( &thrd);278 post( this.thrd.sem ); 278 279 } 279 280 -
libcfa/src/concurrency/kernel.cfa
r55acc3a r139775e 108 108 static $thread * __next_thread_slow(cluster * this); 109 109 static void __run_thread(processor * this, $thread * dst); 110 static void __wake_one( struct __processor_id_t * id,cluster * cltr);110 static void __wake_one(cluster * cltr); 111 111 112 112 static void push (__cluster_idles & idles, processor & proc); … … 122 122 // Because of a bug, we couldn't initialized the seed on construction 123 123 // Do it here 124 kernelTLS.rand_seed ^= rdtscl();125 kernelTLS.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);124 __cfaabi_tls.rand_seed ^= rdtscl(); 125 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner); 126 126 __tls_rand_advance_bck(); 127 127 … … 217 217 // and it make sense for it to be set in all other cases except here 218 218 // fake it 219 kernelTLS.this_thread = mainThread;219 __cfaabi_tls.this_thread = mainThread; 220 220 } 221 221 … … 230 230 // from the processor coroutine to the target thread 231 231 static void __run_thread(processor * this, $thread * thrd_dst) { 232 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);232 /* paranoid */ verify( ! __preemption_enabled() ); 233 233 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); 234 234 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next ); … … 247 247 248 248 // Update global state 249 kernelTLS .this_thread = thrd_dst;250 251 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);252 /* paranoid */ verify( kernelTLS .this_thread == thrd_dst );249 kernelTLS().this_thread = thrd_dst; 250 251 /* paranoid */ verify( ! __preemption_enabled() ); 252 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); 253 253 /* paranoid */ verify( thrd_dst->context.SP ); 254 254 /* paranoid */ verify( thrd_dst->state != Halted ); … … 267 267 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 268 268 /* paranoid */ verify( thrd_dst->context.SP ); 269 /* paranoid */ verify( kernelTLS .this_thread == thrd_dst );270 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);269 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); 270 /* paranoid */ verify( ! __preemption_enabled() ); 271 271 272 272 // Reset global state 273 kernelTLS .this_thread = 0p;273 kernelTLS().this_thread = 0p; 274 274 275 275 // We just finished running a thread, there are a few things that could have happened. … … 282 282 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 283 283 // The thread was preempted, reschedule it and reset the flag 284 __schedule_thread( (__processor_id_t*)this,thrd_dst );284 __schedule_thread( thrd_dst ); 285 285 break RUNNING; 286 286 } … … 315 315 proc_cor->state = Active; 316 316 317 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);317 /* paranoid */ verify( ! __preemption_enabled() ); 318 318 } 319 319 320 320 // KERNEL_ONLY 321 321 void returnToKernel() { 322 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);323 $coroutine * proc_cor = get_coroutine(kernelTLS .this_processor->runner);324 $thread * thrd_src = kernelTLS .this_thread;322 /* paranoid */ verify( ! __preemption_enabled() ); 323 $coroutine * proc_cor = get_coroutine(kernelTLS().this_processor->runner); 324 $thread * thrd_src = kernelTLS().this_thread; 325 325 326 326 #if !defined(__CFA_NO_STATISTICS__) 327 struct processor * last_proc = kernelTLS .this_processor;327 struct processor * last_proc = kernelTLS().this_processor; 328 328 #endif 329 329 … … 345 345 346 346 #if !defined(__CFA_NO_STATISTICS__) 347 if(last_proc != kernelTLS .this_processor) {347 if(last_proc != kernelTLS().this_processor) { 348 348 __tls_stats()->ready.threads.migration++; 349 349 } 350 350 #endif 351 351 352 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);352 /* paranoid */ verify( ! __preemption_enabled() ); 353 353 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src ); 354 354 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src ); … … 358 358 // Scheduler routines 359 359 // KERNEL ONLY 360 void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) { 360 void __schedule_thread( $thread * thrd ) { 361 /* paranoid */ verify( ! __preemption_enabled() ); 361 362 /* paranoid */ verify( thrd ); 362 363 /* paranoid */ verify( thrd->state != Halted ); 363 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );364 /* paranoid */ verify( kernelTLS().this_proc_id ); 364 365 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 365 366 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, … … 374 375 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 375 376 376 ready_schedule_lock ( id);377 ready_schedule_lock(); 377 378 push( thrd->curr_cluster, thrd ); 378 __wake_one( id,thrd->curr_cluster);379 ready_schedule_unlock( id);380 381 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);379 __wake_one(thrd->curr_cluster); 380 ready_schedule_unlock(); 381 382 /* paranoid */ verify( ! __preemption_enabled() ); 382 383 } 383 384 384 385 // KERNEL ONLY 385 386 static inline $thread * __next_thread(cluster * this) with( *this ) { 386 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 387 388 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 387 /* paranoid */ verify( ! __preemption_enabled() ); 388 /* paranoid */ verify( kernelTLS().this_proc_id ); 389 390 ready_schedule_lock(); 389 391 $thread * thrd = pop( this ); 390 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 391 392 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 392 ready_schedule_unlock(); 393 394 /* paranoid */ verify( kernelTLS().this_proc_id ); 395 /* paranoid */ verify( ! __preemption_enabled() ); 393 396 return thrd; 394 397 } … … 396 399 // KERNEL ONLY 397 400 static inline $thread * __next_thread_slow(cluster * this) with( *this ) { 398 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 399 400 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 401 /* paranoid */ verify( ! __preemption_enabled() ); 402 /* paranoid */ verify( kernelTLS().this_proc_id ); 403 404 ready_schedule_lock(); 401 405 $thread * thrd = pop_slow( this ); 402 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 403 404 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 406 ready_schedule_unlock(); 407 408 /* paranoid */ verify( kernelTLS().this_proc_id ); 409 /* paranoid */ verify( ! __preemption_enabled() ); 405 410 return thrd; 406 411 } 407 412 408 // KERNEL ONLY unpark with out disabling interrupts 409 void __unpark( struct __processor_id_t * id, $thread * thrd ) { 413 void unpark( $thread * thrd ) { 414 if( !thrd ) return; 415 410 416 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 411 417 switch(old_ticket) { … … 417 423 /* paranoid */ verify( thrd->state == Blocked ); 418 424 419 // Wake lost the race, 420 __schedule_thread( id, thrd ); 425 { 426 /* paranoid */ verify( publicTLS_get(this_proc_id) ); 427 bool full = publicTLS_get(this_proc_id)->full_proc; 428 if(full) disable_interrupts(); 429 430 /* paranoid */ verify( ! __preemption_enabled() ); 431 432 // Wake lost the race, 433 __schedule_thread( thrd ); 434 435 /* paranoid */ verify( ! __preemption_enabled() ); 436 437 if(full) enable_interrupts( __cfaabi_dbg_ctx ); 438 /* paranoid */ verify( publicTLS_get(this_proc_id) ); 439 } 440 421 441 break; 422 442 default: … … 426 446 } 427 447 428 void unpark( $thread * thrd ) { 429 if( !thrd ) return; 430 448 void park( void ) { 449 /* paranoid */ verify( __preemption_enabled() ); 431 450 disable_interrupts(); 432 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd ); 451 /* paranoid */ verify( ! __preemption_enabled() ); 452 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); 453 454 returnToKernel(); 455 456 /* paranoid */ verify( ! __preemption_enabled() ); 433 457 enable_interrupts( __cfaabi_dbg_ctx ); 434 } 435 436 void park( void ) { 437 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 438 disable_interrupts(); 439 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 440 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 441 442 returnToKernel(); 443 444 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 445 enable_interrupts( __cfaabi_dbg_ctx ); 446 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 458 /* paranoid */ verify( __preemption_enabled() ); 447 459 448 460 } … … 453 465 // Should never return 454 466 void __cfactx_thrd_leave() { 455 $thread * thrd = TL_GET( this_thread);467 $thread * thrd = active_thread(); 456 468 $monitor * this = &thrd->self_mon; 457 469 … … 462 474 463 475 thrd->state = Halted; 464 476 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } 465 477 if( thrd != this->owner || this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 466 478 467 479 // Leave the thread 468 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);480 /* paranoid */ verify( ! __preemption_enabled() ); 469 481 returnToKernel(); 470 482 abort(); … … 476 488 // KERNEL ONLY 477 489 bool force_yield( __Preemption_Reason reason ) { 478 /* paranoid */ verify( kernelTLS.preemption_state.enabled);490 /* paranoid */ verify( __preemption_enabled() ); 479 491 disable_interrupts(); 480 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);481 482 $thread * thrd = kernelTLS .this_thread;492 /* paranoid */ verify( ! __preemption_enabled() ); 493 494 $thread * thrd = kernelTLS().this_thread; 483 495 /* paranoid */ verify(thrd->state == Active); 484 496 … … 494 506 } 495 507 496 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);508 /* paranoid */ verify( ! __preemption_enabled() ); 497 509 enable_interrupts_noPoll(); 498 /* paranoid */ verify( kernelTLS.preemption_state.enabled);510 /* paranoid */ verify( __preemption_enabled() ); 499 511 500 512 return preempted; … … 505 517 //============================================================================================= 506 518 // Wake a thread from the front if there are any 507 static void __wake_one( struct __processor_id_t * id,cluster * this) {508 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);509 /* paranoid */ verify( ready_schedule_islocked( id) );519 static void __wake_one(cluster * this) { 520 /* paranoid */ verify( ! __preemption_enabled() ); 521 /* paranoid */ verify( ready_schedule_islocked() ); 510 522 511 523 // Check if there is a sleeping processor … … 525 537 #endif 526 538 527 /* paranoid */ verify( ready_schedule_islocked( id) );528 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);539 /* paranoid */ verify( ready_schedule_islocked() ); 540 /* paranoid */ verify( ! __preemption_enabled() ); 529 541 530 542 return; … … 536 548 537 549 disable_interrupts(); 538 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);550 /* paranoid */ verify( ! __preemption_enabled() ); 539 551 post( this->idle ); 540 552 enable_interrupts( __cfaabi_dbg_ctx ); … … 542 554 543 555 static void push (__cluster_idles & this, processor & proc) { 544 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);556 /* paranoid */ verify( ! __preemption_enabled() ); 545 557 lock( this ); 546 558 this.idle++; … … 549 561 insert_first(this.list, proc); 550 562 unlock( this ); 551 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);563 /* paranoid */ verify( ! __preemption_enabled() ); 552 564 } 553 565 554 566 static void remove(__cluster_idles & this, processor & proc) { 555 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);567 /* paranoid */ verify( ! __preemption_enabled() ); 556 568 lock( this ); 557 569 this.idle--; … … 560 572 remove(proc); 561 573 unlock( this ); 562 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);574 /* paranoid */ verify( ! __preemption_enabled() ); 563 575 } 564 576 … … 604 616 } 605 617 606 return kernelTLS.this_thread;618 return __cfaabi_tls.this_thread; 607 619 } 608 620 … … 629 641 630 642 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 631 return get_coroutine(kernelTLS .this_thread) == get_coroutine(mainThread) ? 4 : 2;643 return get_coroutine(kernelTLS().this_thread) == get_coroutine(mainThread) ? 4 : 2; 632 644 } 633 645 … … 661 673 if ( count < 0 ) { 662 674 // queue current task 663 append( waiting, kernelTLS.this_thread);675 append( waiting, active_thread() ); 664 676 665 677 // atomically release spin lock and block … … 711 723 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) { 712 724 this.prev_name = prev_name; 713 this.prev_thrd = kernelTLS .this_thread;725 this.prev_thrd = kernelTLS().this_thread; 714 726 } 715 727 } … … 728 740 this.print_halts = true; 729 741 } 742 743 void print_stats_now( cluster & this, int flags ) { 744 __print_stats( this.stats, this.print_stats, true, this.name, (void*)&this ); 745 } 730 746 #endif 731 747 // Local Variables: // -
libcfa/src/concurrency/kernel.hfa
r55acc3a r139775e 275 275 static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; } 276 276 277 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE278 static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; }277 static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE 278 static inline struct cluster * active_cluster () { return publicTLS_get( this_processor )->cltr; } 279 279 280 280 #if !defined(__CFA_NO_STATISTICS__) 281 void print_stats_now( cluster & this, int flags ); 282 281 283 static inline void print_stats_at_exit( cluster & this, int flags ) { 282 284 this.print_stats |= flags; -
libcfa/src/concurrency/kernel/fwd.hfa
r55acc3a r139775e 35 35 extern "Cforall" { 36 36 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 37 struct $thread * volatile this_thread; 38 struct processor * volatile this_processor; 39 struct __stats_t * volatile this_stats; 37 struct $thread * volatile this_thread; 38 struct processor * volatile this_processor; 39 struct __processor_id_t * volatile this_proc_id; 40 struct __stats_t * volatile this_stats; 40 41 41 42 struct { … … 54 55 uint64_t bck_seed; 55 56 } ready_rng; 56 } kernelTLS__attribute__ ((tls_model ( "initial-exec" )));57 } __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" ))); 57 58 59 extern bool __preemption_enabled(); 58 60 61 static inline KernelThreadData & kernelTLS( void ) { 62 /* paranoid */ verify( ! __preemption_enabled() ); 63 return __cfaabi_tls; 64 } 65 66 extern uintptr_t __cfatls_get( unsigned long int member ); 67 // #define publicTLS_get( member ) ((typeof(__cfaabi_tls.member))__cfatls_get( __builtin_offsetof(KernelThreadData, member) )) 68 #define publicTLS_get( member ) (__cfaabi_tls.member) 69 // extern forall(otype T) T __cfatls_get( T * member, T value ); 70 // #define publicTLS_set( member, value ) __cfatls_set( (typeof(member)*)__builtin_offsetof(KernelThreadData, member), value ); 59 71 60 72 static inline uint64_t __tls_rand() { 61 73 #if defined(__SIZEOF_INT128__) 62 return __lehmer64( kernelTLS .rand_seed );74 return __lehmer64( kernelTLS().rand_seed ); 63 75 #else 64 return __xorshift64( kernelTLS .rand_seed );76 return __xorshift64( kernelTLS().rand_seed ); 65 77 #endif 66 78 } … … 74 86 static inline unsigned __tls_rand_fwd() { 75 87 76 kernelTLS .ready_rng.fwd_seed = (A * kernelTLS.ready_rng.fwd_seed + C) & (M - 1);77 return kernelTLS .ready_rng.fwd_seed >> D;88 kernelTLS().ready_rng.fwd_seed = (A * kernelTLS().ready_rng.fwd_seed + C) & (M - 1); 89 return kernelTLS().ready_rng.fwd_seed >> D; 78 90 } 79 91 80 92 static inline unsigned __tls_rand_bck() { 81 unsigned int r = kernelTLS .ready_rng.bck_seed >> D;82 kernelTLS .ready_rng.bck_seed = AI * (kernelTLS.ready_rng.bck_seed - C) & (M - 1);93 unsigned int r = kernelTLS().ready_rng.bck_seed >> D; 94 kernelTLS().ready_rng.bck_seed = AI * (kernelTLS().ready_rng.bck_seed - C) & (M - 1); 83 95 return r; 84 96 } … … 91 103 92 104 static inline void __tls_rand_advance_bck(void) { 93 kernelTLS .ready_rng.bck_seed = kernelTLS.ready_rng.fwd_seed;105 kernelTLS().ready_rng.bck_seed = kernelTLS().ready_rng.fwd_seed; 94 106 } 95 107 } 96 108 97 #if 0 // def __ARM_ARCH98 // function prototypes are only really used by these macros on ARM99 void disable_global_interrupts();100 void enable_global_interrupts();101 109 102 #define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \103 disable_global_interrupts(); \104 target = kernelTLS.member; \105 enable_global_interrupts(); \106 target; } )107 #define TL_SET( member, value ) disable_global_interrupts(); \108 kernelTLS.member = value; \109 enable_global_interrupts();110 #else111 #define TL_GET( member ) kernelTLS.member112 #define TL_SET( member, value ) kernelTLS.member = value;113 #endif114 110 115 111 extern void disable_interrupts(); … … 120 116 extern void park( void ); 121 117 extern void unpark( struct $thread * this ); 122 static inline struct $thread * active_thread () { return TL_GET( this_thread ); } 118 static inline struct $thread * active_thread () { 119 struct $thread * t = publicTLS_get( this_thread ); 120 /* paranoid */ verify( t ); 121 return t; 122 } 123 123 124 124 extern bool force_yield( enum __Preemption_Reason ); … … 139 139 #if !defined(__CFA_NO_STATISTICS__) 140 140 static inline struct __stats_t * __tls_stats() { 141 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);142 /* paranoid */ verify( kernelTLS .this_stats );143 return kernelTLS .this_stats;141 /* paranoid */ verify( ! __preemption_enabled() ); 142 /* paranoid */ verify( kernelTLS().this_stats ); 143 return kernelTLS().this_stats; 144 144 } 145 145 -
libcfa/src/concurrency/kernel/startup.cfa
r55acc3a r139775e 118 118 //----------------------------------------------------------------------------- 119 119 // Global state 120 thread_local struct KernelThreadData kernelTLS__attribute__ ((tls_model ( "initial-exec" ))) @= {120 thread_local struct KernelThreadData __cfaabi_tls __attribute__ ((tls_model ( "initial-exec" ))) @= { 121 121 NULL, // cannot use 0p 122 NULL, 122 123 NULL, 123 124 NULL, … … 155 156 // Kernel boot procedures 156 157 static void __kernel_startup(void) { 157 verify( ! kernelTLS.preemption_state.enabled);158 /* paranoid */ verify( ! __preemption_enabled() ); 158 159 __cfadbg_print_safe(runtime_core, "Kernel : Starting\n"); 159 160 … … 211 212 212 213 //initialize the global state variables 213 kernelTLS.this_processor = mainProcessor; 214 kernelTLS.this_thread = mainThread; 214 __cfaabi_tls.this_processor = mainProcessor; 215 __cfaabi_tls.this_proc_id = (__processor_id_t*)mainProcessor; 216 __cfaabi_tls.this_thread = mainThread; 215 217 216 218 #if !defined( __CFA_NO_STATISTICS__ ) 217 kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats;218 __init_stats( kernelTLS.this_stats );219 __cfaabi_tls.this_stats = (__stats_t *)& storage_mainProcStats; 220 __init_stats( __cfaabi_tls.this_stats ); 219 221 #endif 220 222 … … 227 229 // Add the main thread to the ready queue 228 230 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 229 __schedule_thread( (__processor_id_t *)mainProcessor,mainThread);231 __schedule_thread(mainThread); 230 232 231 233 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 232 234 // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that 233 235 // mainThread is on the ready queue when this call is made. 234 __kernel_first_resume( kernelTLS.this_processor );236 __kernel_first_resume( __cfaabi_tls.this_processor ); 235 237 236 238 … … 249 251 __cfadbg_print_safe(runtime_core, "Kernel : Started\n--------------------------------------------------\n\n"); 250 252 251 verify( ! kernelTLS.preemption_state.enabled);253 /* paranoid */ verify( ! __preemption_enabled() ); 252 254 enable_interrupts( __cfaabi_dbg_ctx ); 253 verify( TL_GET( preemption_state.enabled ) ); 255 /* paranoid */ verify( __preemption_enabled() ); 256 254 257 } 255 258 … … 260 263 mainCluster->io.ctxs = 0p; 261 264 262 /* paranoid */ verify( TL_GET( preemption_state.enabled) );265 /* paranoid */ verify( __preemption_enabled() ); 263 266 disable_interrupts(); 264 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);267 /* paranoid */ verify( ! __preemption_enabled() ); 265 268 266 269 __cfadbg_print_safe(runtime_core, "\n--------------------------------------------------\nKernel : Shutting down\n"); … … 270 273 // which is currently here 271 274 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 272 __kernel_last_resume( kernelTLS.this_processor );275 __kernel_last_resume( __cfaabi_tls.this_processor ); 273 276 mainThread->self_cor.state = Halted; 274 277 … … 319 322 __stats_t local_stats; 320 323 __init_stats( &local_stats ); 321 kernelTLS.this_stats = &local_stats;324 __cfaabi_tls.this_stats = &local_stats; 322 325 #endif 323 326 324 327 processor * proc = (processor *) arg; 325 kernelTLS.this_processor = proc; 326 kernelTLS.this_thread = 0p; 327 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 328 __cfaabi_tls.this_processor = proc; 329 __cfaabi_tls.this_proc_id = (__processor_id_t*)proc; 330 __cfaabi_tls.this_thread = 0p; 331 __cfaabi_tls.preemption_state.[enabled, disable_count] = [false, 1]; 328 332 // SKULLDUGGERY: We want to create a context for the processor coroutine 329 333 // which is needed for the 2-step context switch. However, there is no reason … … 337 341 338 342 //Set global state 339 kernelTLS.this_thread = 0p;343 __cfaabi_tls.this_thread = 0p; 340 344 341 345 //We now have a proper context from which to schedule threads … … 367 371 $coroutine * dst = get_coroutine(this->runner); 368 372 369 verify( ! kernelTLS.preemption_state.enabled);370 371 kernelTLS.this_thread->curr_cor = dst;373 /* paranoid */ verify( ! __preemption_enabled() ); 374 375 __cfaabi_tls.this_thread->curr_cor = dst; 372 376 __stack_prepare( &dst->stack, 65000 ); 373 377 __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine); 374 378 375 verify( ! kernelTLS.preemption_state.enabled);379 /* paranoid */ verify( ! __preemption_enabled() ); 376 380 377 381 dst->last = &src->self_cor; … … 391 395 /* paranoid */ verify(src->state == Active); 392 396 393 verify( ! kernelTLS.preemption_state.enabled);397 /* paranoid */ verify( ! __preemption_enabled() ); 394 398 } 395 399 … … 399 403 $coroutine * dst = get_coroutine(this->runner); 400 404 401 verify( ! kernelTLS.preemption_state.enabled);402 verify( dst->starter == src );403 verify( dst->context.SP );405 /* paranoid */ verify( ! __preemption_enabled() ); 406 /* paranoid */ verify( dst->starter == src ); 407 /* paranoid */ verify( dst->context.SP ); 404 408 405 409 // SKULLDUGGERY in debug the processors check that the … … 543 547 544 548 P( terminated ); 545 verify( kernelTLS.this_processor!= &this);549 /* paranoid */ verify( active_processor() != &this); 546 550 } 547 551 … … 693 697 #if defined(__CFA_WITH_VERIFY__) 694 698 static bool verify_fwd_bck_rng(void) { 695 kernelTLS.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&verify_fwd_bck_rng);699 __cfaabi_tls.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&verify_fwd_bck_rng); 696 700 697 701 unsigned values[10]; -
libcfa/src/concurrency/kernel_private.hfa
r55acc3a r139775e 33 33 } 34 34 35 void __schedule_thread( struct __processor_id_t *,$thread * )35 void __schedule_thread( $thread * ) 36 36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__)) 37 __attribute__((nonnull ( 2)))37 __attribute__((nonnull (1))) 38 38 #endif 39 39 ; 40 41 extern bool __preemption_enabled(); 40 42 41 43 //release/wake-up the following resources … … 63 65 ) 64 66 65 // KERNEL ONLY unpark with out disabling interrupts66 void __unpark( struct __processor_id_t *, $thread * thrd );67 68 67 #define TICKET_BLOCKED (-1) // thread is blocked 69 68 #define TICKET_RUNNING ( 0) // thread is running 70 69 #define TICKET_UNBLOCK ( 1) // thread should ignore next block 71 72 static inline bool __post(single_sem & this, struct __processor_id_t * id) {73 for() {74 struct $thread * expected = this.ptr;75 if(expected == 1p) return false;76 if(expected == 0p) {77 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {78 return false;79 }80 }81 else {82 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {83 __unpark( id, expected );84 return true;85 }86 }87 }88 }89 70 90 71 //----------------------------------------------------------------------------- … … 201 182 // Reader side : acquire when using the ready queue to schedule but not 202 183 // creating/destroying queues 203 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 204 unsigned iproc = proc->id; 205 /*paranoid*/ verify(data[iproc].handle == proc); 184 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 185 /* paranoid */ verify( ! __preemption_enabled() ); 186 /* paranoid */ verify( kernelTLS().this_proc_id ); 187 188 unsigned iproc = kernelTLS().this_proc_id->id; 189 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 206 190 /*paranoid*/ verify(iproc < ready); 207 191 … … 225 209 } 226 210 227 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 228 unsigned iproc = proc->id; 229 /*paranoid*/ verify(data[iproc].handle == proc); 211 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 212 /* paranoid */ verify( ! __preemption_enabled() ); 213 /* paranoid */ verify( kernelTLS().this_proc_id ); 214 215 unsigned iproc = kernelTLS().this_proc_id->id; 216 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 230 217 /*paranoid*/ verify(iproc < ready); 231 218 /*paranoid*/ verify(data[iproc].lock); … … 239 226 240 227 #ifdef __CFA_WITH_VERIFY__ 241 static inline bool ready_schedule_islocked( struct __processor_id_t * proc) { 228 static inline bool ready_schedule_islocked(void) { 229 /* paranoid */ verify( ! __preemption_enabled() ); 230 /*paranoid*/ verify( kernelTLS().this_proc_id ); 231 __processor_id_t * proc = kernelTLS().this_proc_id; 242 232 return __scheduler_lock->data[proc->id].owned; 243 233 } -
libcfa/src/concurrency/locks.cfa
r55acc3a r139775e 15 15 this.t = t; 16 16 this.lock = 0p; 17 this.listed = false; 17 18 } 18 19 … … 21 22 this.info = info; 22 23 this.lock = 0p; 24 this.listed = false; 23 25 } 24 26 … … 74 76 75 77 void lock( blocking_lock & this ) with( this ) { 78 $thread * thrd = active_thread(); 76 79 lock( lock __cfaabi_dbg_ctx2 ); 77 if ( owner == kernelTLS.this_thread && !multi_acquisition) {80 if ( owner == thrd && !multi_acquisition) { 78 81 fprintf(stderr, "A single acquisition lock holder attempted to reacquire the lock resulting in a deadlock."); // Possibly throw instead 79 80 } else if ( owner != 0p && owner != kernelTLS.this_thread ) {81 append( blocked_threads, kernelTLS.this_thread );82 exit(EXIT_FAILURE); 83 } else if ( owner != 0p && owner != thrd ) { 84 append( blocked_threads, thrd ); 82 85 wait_count++; 83 86 unlock( lock ); 84 park( __cfaabi_dbg_ctx);85 } else if ( owner == kernelTLS.this_thread && multi_acquisition ) {87 park( ); 88 } else if ( owner == thrd && multi_acquisition ) { 86 89 recursion_count++; 87 90 unlock( lock ); 88 91 } else { 89 owner = kernelTLS.this_thread;92 owner = thrd; 90 93 recursion_count = 1; 91 94 unlock( lock ); … … 94 97 95 98 bool try_lock( blocking_lock & this ) with( this ) { 99 $thread * thrd = active_thread(); 96 100 bool ret = false; 97 101 lock( lock __cfaabi_dbg_ctx2 ); 98 102 if ( owner == 0p ) { 99 owner = kernelTLS.this_thread;103 owner = thrd; 100 104 if ( multi_acquisition ) recursion_count = 1; 101 105 ret = true; 102 } else if ( owner == kernelTLS.this_thread && multi_acquisition ) {106 } else if ( owner == thrd && multi_acquisition ) { 103 107 recursion_count++; 104 108 ret = true; … … 113 117 fprintf( stderr, "There was an attempt to release a lock that isn't held" ); 114 118 return; 115 } else if ( strict_owner && owner != kernelTLS.this_thread) {119 } else if ( strict_owner && active_thread() ) { 116 120 fprintf( stderr, "A thread other than the owner attempted to release an owner lock" ); 117 121 return; … … 123 127 recursion_count = ( thrd && multi_acquisition ? 1 : 0 ); 124 128 wait_count--; 125 unpark( thrd __cfaabi_dbg_ctx2);129 unpark( thrd ); 126 130 } 127 131 unlock( lock ); … … 150 154 owner = t; 151 155 if ( multi_acquisition ) recursion_count = 1; 152 unpark( t __cfaabi_dbg_ctx2 ); 156 #if !defined( __CFA_NO_STATISTICS__ ) 157 kernelTLS.this_stats = t->curr_cluster->stats; 158 #endif 159 unpark( t ); 153 160 unlock( lock ); 154 161 } … … 159 166 if ( owner == 0p ){ // no owner implies lock isn't held 160 167 fprintf( stderr, "A lock that is not held was passed to a synchronization lock" ); 161 } else if ( strict_owner && owner != kernelTLS.this_thread) {168 } else if ( strict_owner && active_thread() ) { 162 169 fprintf( stderr, "A thread other than the owner of a lock passed it to a synchronization lock" ); 163 170 } else { … … 166 173 recursion_count = ( thrd && multi_acquisition ? 1 : 0 ); 167 174 wait_count--; 168 unpark( thrd __cfaabi_dbg_ctx2);175 unpark( thrd ); 169 176 } 170 177 unlock( lock ); … … 175 182 /////////////////////////////////////////////////////////////////// 176 183 177 // In an ideal world this may not be necessary 178 // Is it possible for nominal inheritance to inherit traits?? 179 // If that occurs we would avoid all this extra code 184 // This is temporary until an inheritance bug is fixed 180 185 181 186 void lock( mutex_lock & this ){ … … 228 233 229 234 /////////////////////////////////////////////////////////////////// 230 //// Synchronization Locks235 //// condition variable 231 236 /////////////////////////////////////////////////////////////////// 232 237 233 238 forall(dtype L | is_blocking_lock(L)) { 234 void ?{}( synchronization_lock(L) & this, bool reacquire_after_signal ){ 239 240 void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) { 241 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin. 242 lock( cond->lock __cfaabi_dbg_ctx2 ); 243 if ( (*i)->listed ) { // is thread on queue 244 info_thread(L) * copy = *i; 245 remove( cond->blocked_threads, i ); //remove this thread O(1) 246 cond->wait_count--; 247 if( !copy->lock ) { 248 unlock( cond->lock ); 249 #if !defined( __CFA_NO_STATISTICS__ ) 250 #warning unprotected access to tls TODO discuss this 251 kernelTLS.this_stats = copy->t->curr_cluster->stats; 252 #endif 253 unpark( copy->t ); 254 } else { 255 add_(*copy->lock, copy->t); // call lock's add_ 256 } 257 } 258 unlock( cond->lock ); 259 } 260 261 void alarm_node_wrap_cast( alarm_node_t & a ) { 262 timeout_handler( (alarm_node_wrap(L) &)a ); 263 } 264 265 void ?{}( condition_variable(L) & this ){ 235 266 this.lock{}; 236 267 this.blocked_threads{}; 237 268 this.count = 0; 238 this.reacquire_after_signal = reacquire_after_signal;239 }240 241 void ^?{}( synchronization_lock(L) & this ){242 // default243 }244 245 void ?{}( condition_variable(L) & this ){246 ((synchronization_lock(L) &)this){ true };247 269 } 248 270 … … 251 273 } 252 274 253 void ?{}( thread_queue(L) & this ){254 ((synchronization_lock(L) &)this){ false};255 } 256 257 void ^?{}( thread_queue(L) & this ){275 void ?{}( alarm_node_wrap(L) & this, $thread * thrd, Time alarm, Duration period, Alarm_Callback callback ) { 276 this.alarm_node{ thrd, alarm, period, callback }; 277 } 278 279 void ^?{}( alarm_node_wrap(L) & this ) { 258 280 // default 259 281 } 260 282 261 bool notify_one( synchronization_lock(L) & this ) with( this ) {283 bool notify_one( condition_variable(L) & this ) with( this ) { 262 284 lock( lock __cfaabi_dbg_ctx2 ); 263 285 bool ret = !!blocked_threads; 264 286 info_thread(L) * popped = pop_head( blocked_threads ); 287 popped->listed = false; 265 288 if(popped != 0p) { 266 if( reacquire_after_signal ){ 289 count--; 290 if (popped->lock) { 267 291 add_(*popped->lock, popped->t); 268 292 } else { 269 unpark( 270 popped->t __cfaabi_dbg_ctx2 271 ); 293 unpark(popped->t); 272 294 } 273 295 } … … 276 298 } 277 299 278 bool notify_all( synchronization_lock(L) & this ) with(this) {300 bool notify_all( condition_variable(L) & this ) with(this) { 279 301 lock( lock __cfaabi_dbg_ctx2 ); 280 302 bool ret = blocked_threads ? true : false; 281 303 while( blocked_threads ) { 282 304 info_thread(L) * popped = pop_head( blocked_threads ); 305 popped->listed = false; 283 306 if(popped != 0p){ 284 if( reacquire_after_signal ){ 307 count--; 308 if (popped->lock) { 285 309 add_(*popped->lock, popped->t); 286 310 } else { 287 unpark( 288 popped->t __cfaabi_dbg_ctx2 289 ); 311 unpark(popped->t); 290 312 } 291 313 } … … 295 317 } 296 318 297 uintptr_t front( synchronization_lock(L) & this ) with(this) { 298 return (*peek(blocked_threads)).info; 299 } 300 301 bool empty( synchronization_lock(L) & this ) with(this) { 319 uintptr_t front( condition_variable(L) & this ) with(this) { 320 if(!blocked_threads) return NULL; 321 return peek(blocked_threads)->info; 322 } 323 324 bool empty( condition_variable(L) & this ) with(this) { 302 325 return blocked_threads ? false : true; 303 326 } 304 327 305 int counter( synchronization_lock(L) & this ) with(this) {328 int counter( condition_variable(L) & this ) with(this) { 306 329 return count; 307 330 } 308 331 309 void queue_info_thread( synchronization_lock(L) & this, info_thread(L) & i ) with(this) { 310 lock( lock __cfaabi_dbg_ctx2 ); 311 append( blocked_threads, &i ); 312 count++; 313 unlock( lock ); 314 park( __cfaabi_dbg_ctx ); 315 } 316 317 318 void wait( synchronization_lock(L) & this ) with(this) { 319 info_thread( L ) i = { kernelTLS.this_thread }; 320 queue_info_thread( this, i ); 321 } 322 323 void wait( synchronization_lock(L) & this, uintptr_t info ) with(this) { 324 info_thread( L ) i = { kernelTLS.this_thread, info }; 325 queue_info_thread( this, i ); 326 } 327 // I still need to implement the time delay wait routines 328 bool wait( synchronization_lock(L) & this, Duration duration ) with(this) { 329 timeval tv = { time(0) }; 330 Time t = { tv }; 331 return wait( this, t + duration ); 332 } 333 334 bool wait( synchronization_lock(L) & this, uintptr_t info, Duration duration ) with(this) { 335 // TODO: ADD INFO 336 return wait( this, duration ); 337 } 338 339 bool wait( synchronization_lock(L) & this, Time time ) with(this) { 340 return false; //default 341 } 342 343 bool wait( synchronization_lock(L) & this, uintptr_t info, Time time ) with(this) { 344 // TODO: ADD INFO 345 return wait( this, time ); 346 } 347 348 void queue_info_thread_unlock( synchronization_lock(L) & this, L & l, info_thread(L) & i ) with(this) { 332 // helper for wait()'s' without a timeout 333 void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 349 334 lock( lock __cfaabi_dbg_ctx2 ); 350 335 append( this.blocked_threads, &i ); 351 336 count++; 352 i.lock = &l; 353 size_t recursion_count = get_recursion_count(l); 354 remove_( l ); 355 unlock( lock ); 356 park( __cfaabi_dbg_ctx ); // blocks here 357 358 set_recursion_count(l, recursion_count); // resets recursion count here after waking 359 } 360 361 void wait( synchronization_lock(L) & this, L & l ) with(this) { 362 info_thread(L) i = { kernelTLS.this_thread }; 363 queue_info_thread_unlock( this, l, i ); 364 } 365 366 void wait( synchronization_lock(L) & this, L & l, uintptr_t info ) with(this) { 367 info_thread(L) i = { kernelTLS.this_thread, info }; 368 queue_info_thread_unlock( this, l, i ); 369 } 370 371 bool wait( synchronization_lock(L) & this, L & l, Duration duration ) with(this) { 372 timeval tv = { time(0) }; 373 Time t = { tv }; 374 return wait( this, l, t + duration ); 375 } 376 377 bool wait( synchronization_lock(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { 378 // TODO: ADD INFO 379 return wait( this, l, duration ); 380 } 381 382 bool wait( synchronization_lock(L) & this, L & l, Time time ) with(this) { 383 return false; //default 384 } 385 386 bool wait( synchronization_lock(L) & this, L & l, uintptr_t info, Time time ) with(this) { 387 // TODO: ADD INFO 388 return wait( this, l, time ); 389 } 390 } 391 392 /////////////////////////////////////////////////////////////////// 393 //// condition lock alternative approach 394 /////////////////////////////////////////////////////////////////// 395 396 // the solution below is less efficient but does not require the lock to have a specific add/remove routine 397 398 /////////////////////////////////////////////////////////////////// 399 //// is_simple_lock 400 /////////////////////////////////////////////////////////////////// 401 402 forall(dtype L | is_simple_lock(L)) { 403 void ?{}( condition_lock(L) & this ){ 404 // default 405 } 406 407 void ^?{}( condition_lock(L) & this ){ 408 // default 409 } 410 411 bool notify_one( condition_lock(L) & this ) with(this) { 412 return notify_one( c_var ); 413 } 414 415 bool notify_all( condition_lock(L) & this ) with(this) { 416 return notify_all( c_var ); 417 } 418 419 void wait( condition_lock(L) & this, L & l ) with(this) { 420 lock( m_lock ); 421 size_t recursion = get_recursion_count( l ); 422 unlock( l ); 423 wait( c_var, m_lock ); 424 lock( l ); 425 set_recursion_count( l , recursion ); 426 unlock( m_lock ); 427 } 428 } 337 i.listed = true; 338 size_t recursion_count; 339 if (i.lock) { 340 recursion_count = get_recursion_count(*i.lock); 341 remove_( *i.lock ); 342 } 343 344 unlock( lock ); 345 park( ); // blocks here 346 347 if (i.lock) set_recursion_count(*i.lock, recursion_count); // resets recursion count here after waking 348 } 349 350 // helper for wait()'s' with a timeout 351 void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Time t ) with(this) { 352 lock( lock __cfaabi_dbg_ctx2 ); 353 354 info_thread(L) * queue_ptr = &info; 355 356 alarm_node_wrap(L) node_wrap = { info.t, t, 0`s, alarm_node_wrap_cast }; 357 node_wrap.cond = &this; 358 node_wrap.i = &queue_ptr; 359 360 register_self( &node_wrap.alarm_node ); 361 362 append( blocked_threads, queue_ptr ); 363 info.listed = true; 364 count++; 365 366 size_t recursion_count; 367 if (info.lock) { 368 recursion_count = get_recursion_count(*info.lock); 369 remove_( *info.lock ); 370 } 371 372 unlock( lock ); 373 park(); 374 375 if (info.lock) set_recursion_count(*info.lock, recursion_count); 376 } 377 378 void wait( condition_variable(L) & this ) with(this) { 379 info_thread( L ) i = { active_thread() }; 380 queue_info_thread( this, i ); 381 } 382 383 void wait( condition_variable(L) & this, uintptr_t info ) with(this) { 384 info_thread( L ) i = { active_thread(), info }; 385 queue_info_thread( this, i ); 386 } 387 388 void wait( condition_variable(L) & this, Duration duration ) with(this) { 389 info_thread( L ) i = { active_thread() }; 390 queue_info_thread_timeout(this, i, __kernel_get_time() + duration ); 391 } 392 393 void wait( condition_variable(L) & this, uintptr_t info, Duration duration ) with(this) { 394 info_thread( L ) i = { active_thread(), info }; 395 queue_info_thread_timeout(this, i, __kernel_get_time() + duration ); 396 } 397 398 void wait( condition_variable(L) & this, Time time ) with(this) { 399 info_thread( L ) i = { active_thread() }; 400 queue_info_thread_timeout(this, i, time); 401 } 402 403 void wait( condition_variable(L) & this, uintptr_t info, Time time ) with(this) { 404 info_thread( L ) i = { active_thread(), info }; 405 queue_info_thread_timeout(this, i, time); 406 } 407 408 void wait( condition_variable(L) & this, L & l ) with(this) { 409 info_thread(L) i = { active_thread() }; 410 i.lock = &l; 411 queue_info_thread( this, i ); 412 } 413 414 void wait( condition_variable(L) & this, L & l, uintptr_t info ) with(this) { 415 info_thread(L) i = { active_thread(), info }; 416 i.lock = &l; 417 queue_info_thread( this, i ); 418 } 419 420 void wait( condition_variable(L) & this, L & l, Duration duration ) with(this) { 421 info_thread(L) i = { active_thread() }; 422 i.lock = &l; 423 queue_info_thread_timeout(this, i, __kernel_get_time() + duration ); 424 } 425 426 void wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { 427 info_thread(L) i = { active_thread(), info }; 428 i.lock = &l; 429 queue_info_thread_timeout(this, i, __kernel_get_time() + duration ); 430 } 431 432 void wait( condition_variable(L) & this, L & l, Time time ) with(this) { 433 info_thread(L) i = { active_thread() }; 434 i.lock = &l; 435 queue_info_thread_timeout(this, i, time ); 436 } 437 438 void wait( condition_variable(L) & this, L & l, uintptr_t info, Time time ) with(this) { 439 info_thread(L) i = { active_thread(), info }; 440 i.lock = &l; 441 queue_info_thread_timeout(this, i, time ); 442 } 443 } -
libcfa/src/concurrency/locks.hfa
r55acc3a r139775e 1 #pragma once 2 1 3 #include <stdbool.h> 2 4 … … 10 12 #include "time.hfa" 11 13 #include <sys/time.h> 14 #include "alarm.hfa" 12 15 13 16 /////////////////////////////////////////////////////////////////// … … 32 35 info_thread(L) * next; 33 36 L * lock; 37 bool listed; // true if info_thread is on queue, false otherwise; 34 38 }; 35 39 … … 119 123 /////////////////////////////////////////////////////////////////// 120 124 forall(dtype L | is_blocking_lock(L)) { 121 struct synchronization_lock{125 struct condition_variable { 122 126 // Spin lock used for mutual exclusion 123 127 __spinlock_t lock; … … 128 132 // Count of current blocked threads 129 133 int count; 130 131 // If true threads will reacquire the lock they block on upon waking132 bool reacquire_after_signal;133 134 }; 134 135 struct condition_variable {136 inline synchronization_lock(L);137 };138 139 struct thread_queue {140 inline synchronization_lock(L);141 };142 143 144 void ?{}( synchronization_lock(L) & this, bool multi_acquisition, bool strict_owner );145 void ^?{}( synchronization_lock(L) & this );146 135 147 136 void ?{}( condition_variable(L) & this ); 148 137 void ^?{}( condition_variable(L) & this ); 149 138 150 void ?{}( thread_queue(L) & this );151 void ^?{}( thread_queue(L) & this );139 struct alarm_node_wrap { 140 alarm_node_t alarm_node; 152 141 153 bool notify_one( synchronization_lock(L) & this ); 154 bool notify_all( synchronization_lock(L) & this ); 142 condition_variable(L) * cond; 155 143 156 uintptr_t front( synchronization_lock(L) & this ); 157 158 bool empty( synchronization_lock(L) & this ); 159 int counter( synchronization_lock(L) & this ); 160 161 // wait functions that are not passed a mutex lock 162 void wait( synchronization_lock(L) & this ); 163 void wait( synchronization_lock(L) & this, uintptr_t info ); 164 bool wait( synchronization_lock(L) & this, Duration duration ); 165 bool wait( synchronization_lock(L) & this, uintptr_t info, Duration duration ); 166 bool wait( synchronization_lock(L) & this, Time time ); 167 bool wait( synchronization_lock(L) & this, uintptr_t info, Time time ); 168 169 // wait functions that are passed a lock 170 bool notify_one( synchronization_lock(L) & this, L & l ); 171 bool notify_all( synchronization_lock(L) & this, L & l ); 172 173 void wait( synchronization_lock(L) & this, L & l ); 174 void wait( synchronization_lock(L) & this, L & l, uintptr_t info ); 175 bool wait( synchronization_lock(L) & this, L & l, Duration duration ); 176 bool wait( synchronization_lock(L) & this, L & l, uintptr_t info, Duration duration ); 177 bool wait( synchronization_lock(L) & this, L & l, Time time ); 178 bool wait( synchronization_lock(L) & this, L & l, uintptr_t info, Time time ); 179 } 180 181 /////////////////////////////////////////////////////////////////// 182 //// condition lock alternative approach 183 /////////////////////////////////////////////////////////////////// 184 185 186 /////////////////////////////////////////////////////////////////// 187 //// is_simple_lock 188 /////////////////////////////////////////////////////////////////// 189 190 trait is_simple_lock(dtype L | sized(L)) { 191 void lock( L & ); // For synchronization locks to use when acquiring 192 void unlock( L & ); // For synchronization locks to use when releasing 193 size_t get_recursion_count( L & ); // to get recursion count for cond lock to reset after waking 194 void set_recursion_count( L &, size_t recursion ); // to set recursion count after getting signalled; 195 }; 196 197 forall(dtype L | is_simple_lock(L)) { 198 struct condition_lock { 199 // Spin lock used for mutual exclusion 200 mutex_lock m_lock; 201 202 condition_variable( mutex_lock ) c_var; 144 info_thread(L) ** i; 203 145 }; 204 146 205 void ?{}( condition_lock(L) & this);206 void ^?{}( condition_lock(L) & this );147 void ?{}( alarm_node_wrap(L) & this, $thread * thrd, Time alarm, Duration period, Alarm_Callback callback ); 148 void ^?{}( alarm_node_wrap(L) & this ); 207 149 208 bool notify_one( condition_lock(L) & this ); 209 bool notify_all( condition_lock(L) & this ); 210 void wait( condition_lock(L) & this, L & l ); 150 void alarm_node_callback( alarm_node_wrap(L) & this ); 151 152 void alarm_node_wrap_cast( alarm_node_t & a ); 153 154 bool notify_one( condition_variable(L) & this ); 155 bool notify_all( condition_variable(L) & this ); 156 157 uintptr_t front( condition_variable(L) & this ); 158 159 bool empty( condition_variable(L) & this ); 160 int counter( condition_variable(L) & this ); 161 162 // TODO: look into changing timout routines to return bool showing if signalled or woken by kernel 163 void wait( condition_variable(L) & this ); 164 void wait( condition_variable(L) & this, uintptr_t info ); 165 void wait( condition_variable(L) & this, Duration duration ); 166 void wait( condition_variable(L) & this, uintptr_t info, Duration duration ); 167 void wait( condition_variable(L) & this, Time time ); 168 void wait( condition_variable(L) & this, uintptr_t info, Time time ); 169 170 void wait( condition_variable(L) & this, L & l ); 171 void wait( condition_variable(L) & this, L & l, uintptr_t info ); 172 void wait( condition_variable(L) & this, L & l, Duration duration ); 173 void wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ); 174 void wait( condition_variable(L) & this, L & l, Time time ); 175 void wait( condition_variable(L) & this, L & l, uintptr_t info, Time time ); 211 176 } -
libcfa/src/concurrency/monitor.cfa
r55acc3a r139775e 82 82 // Enter single monitor 83 83 static void __enter( $monitor * this, const __monitor_group_t & group ) { 84 $thread * thrd = active_thread(); 85 84 86 // Lock the monitor spinlock 85 87 lock( this->lock __cfaabi_dbg_ctx2 ); 86 // Interrupts disable inside critical section87 $thread * thrd = kernelTLS.this_thread;88 88 89 89 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); … … 126 126 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 127 127 128 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );128 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 129 129 return; 130 130 } … … 132 132 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 133 133 134 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );134 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 135 135 /* paranoid */ verify( this->lock.lock ); 136 136 … … 141 141 142 142 static void __dtor_enter( $monitor * this, fptr_t func, bool join ) { 143 $thread * thrd = active_thread(); 144 143 145 // Lock the monitor spinlock 144 146 lock( this->lock __cfaabi_dbg_ctx2 ); 145 // Interrupts disable inside critical section146 $thread * thrd = kernelTLS.this_thread;147 147 148 148 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); … … 155 155 __set_owner( this, thrd ); 156 156 157 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );157 verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 158 158 159 159 unlock( this->lock ); … … 174 174 this->owner = thrd; 175 175 176 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );176 verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 177 177 178 178 unlock( this->lock ); … … 200 200 201 201 // Release the next thread 202 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );202 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 203 203 unpark( urgent->owner->waiting_thread ); 204 204 … … 207 207 208 208 // Some one was waiting for us, enter 209 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );209 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 210 210 } 211 211 else { … … 224 224 park(); 225 225 226 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );226 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 227 227 return; 228 228 } … … 237 237 lock( this->lock __cfaabi_dbg_ctx2 ); 238 238 239 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);240 241 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );239 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", active_thread(), this, this->owner); 240 241 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 242 242 243 243 // Leaving a recursion level, decrement the counter … … 270 270 void __dtor_leave( $monitor * this, bool join ) { 271 271 __cfaabi_dbg_debug_do( 272 if( TL_GET( this_thread) != this->owner ) {273 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread), this->owner);272 if( active_thread() != this->owner ) { 273 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner); 274 274 } 275 275 if( this->recursion != 1 && !join ) { … … 287 287 /* paranoid */ verify( this->lock.lock ); 288 288 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 289 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);289 /* paranoid */ verify( ! __preemption_enabled() ); 290 290 /* paranoid */ verify( thrd->state == Halted ); 291 291 /* paranoid */ verify( this->recursion == 1 ); … … 303 303 // Unpark the next owner if needed 304 304 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 305 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);305 /* paranoid */ verify( ! __preemption_enabled() ); 306 306 /* paranoid */ verify( thrd->state == Halted ); 307 307 unpark( new_owner ); … … 327 327 // Sorts monitors before entering 328 328 void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) { 329 $thread * thrd = TL_GET( this_thread);329 $thread * thrd = active_thread(); 330 330 331 331 // Store current array … … 362 362 363 363 // Restore thread context 364 TL_GET( this_thread)->monitors = this.prev;364 active_thread()->monitors = this.prev; 365 365 } 366 366 … … 369 369 void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func, bool join ) { 370 370 // optimization 371 $thread * thrd = TL_GET( this_thread);371 $thread * thrd = active_thread(); 372 372 373 373 // Store current array … … 392 392 393 393 // Restore thread context 394 TL_GET( this_thread)->monitors = this.prev;394 active_thread()->monitors = this.prev; 395 395 } 396 396 … … 432 432 433 433 // Create the node specific to this wait operation 434 wait_ctx( TL_GET( this_thread), user_info );434 wait_ctx( active_thread(), user_info ); 435 435 436 436 // Append the current wait operation to the ones already queued on the condition … … 483 483 //Some more checking in debug 484 484 __cfaabi_dbg_debug_do( 485 $thread * this_thrd = TL_GET( this_thread);485 $thread * this_thrd = active_thread(); 486 486 if ( this.monitor_count != this_thrd->monitors.size ) { 487 487 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 531 531 532 532 // Create the node specific to this wait operation 533 wait_ctx_primed( kernelTLS.this_thread, 0 )533 wait_ctx_primed( active_thread(), 0 ) 534 534 535 535 //save contexts … … 630 630 631 631 // Create the node specific to this wait operation 632 wait_ctx_primed( kernelTLS.this_thread, 0 );632 wait_ctx_primed( active_thread(), 0 ); 633 633 634 634 // Save monitor states … … 682 682 683 683 // Create the node specific to this wait operation 684 wait_ctx_primed( kernelTLS.this_thread, 0 );684 wait_ctx_primed( active_thread(), 0 ); 685 685 686 686 monitor_save; … … 688 688 689 689 for( __lock_size_t i = 0; i < count; i++) { 690 verify( monitors[i]->owner == kernelTLS.this_thread);690 verify( monitors[i]->owner == active_thread() ); 691 691 } 692 692 … … 724 724 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) { 725 725 /* paranoid */ verify ( monitors[0]->lock.lock ); 726 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );726 /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] ); 727 727 monitors[0]->owner = owner; 728 728 monitors[0]->recursion = 1; 729 729 for( __lock_size_t i = 1; i < count; i++ ) { 730 730 /* paranoid */ verify ( monitors[i]->lock.lock ); 731 /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] );731 /* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] ); 732 732 monitors[i]->owner = owner; 733 733 monitors[i]->recursion = 0; … … 755 755 //regardless of if we are ready to baton pass, 756 756 //we need to set the monitor as in use 757 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );757 /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 758 758 __set_owner( this, urgent->owner->waiting_thread ); 759 759 … … 764 764 // Get the next thread in the entry_queue 765 765 $thread * new_owner = pop_head( this->entry_queue ); 766 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );766 /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 767 767 /* paranoid */ verify( !new_owner || new_owner->link.next == 0p ); 768 768 __set_owner( this, new_owner ); … … 892 892 893 893 static inline void brand_condition( condition & this ) { 894 $thread * thrd = TL_GET( this_thread);894 $thread * thrd = active_thread(); 895 895 if( !this.monitors ) { 896 896 // __cfaabi_dbg_print_safe( "Branding\n" ); -
libcfa/src/concurrency/mutex.cfa
r55acc3a r139775e 40 40 lock( lock __cfaabi_dbg_ctx2 ); 41 41 if( is_locked ) { 42 append( blocked_threads, kernelTLS.this_thread);42 append( blocked_threads, active_thread() ); 43 43 unlock( lock ); 44 44 park(); … … 86 86 lock( lock __cfaabi_dbg_ctx2 ); 87 87 if( owner == 0p ) { 88 owner = kernelTLS.this_thread;88 owner = active_thread(); 89 89 recursion_count = 1; 90 90 unlock( lock ); 91 91 } 92 else if( owner == kernelTLS.this_thread) {92 else if( owner == active_thread() ) { 93 93 recursion_count++; 94 94 unlock( lock ); 95 95 } 96 96 else { 97 append( blocked_threads, kernelTLS.this_thread);97 append( blocked_threads, active_thread() ); 98 98 unlock( lock ); 99 99 park(); … … 105 105 lock( lock __cfaabi_dbg_ctx2 ); 106 106 if( owner == 0p ) { 107 owner = kernelTLS.this_thread;107 owner = active_thread(); 108 108 recursion_count = 1; 109 109 ret = true; 110 110 } 111 else if( owner == kernelTLS.this_thread) {111 else if( owner == active_thread() ) { 112 112 recursion_count++; 113 113 ret = true; … … 159 159 void wait(condition_variable & this) { 160 160 lock( this.lock __cfaabi_dbg_ctx2 ); 161 append( this.blocked_threads, kernelTLS.this_thread);161 append( this.blocked_threads, active_thread() ); 162 162 unlock( this.lock ); 163 163 park(); … … 167 167 void wait(condition_variable & this, L & l) { 168 168 lock( this.lock __cfaabi_dbg_ctx2 ); 169 append( this.blocked_threads, kernelTLS.this_thread);169 append( this.blocked_threads, active_thread() ); 170 170 unlock(l); 171 171 unlock(this.lock); -
libcfa/src/concurrency/preemption.cfa
r55acc3a r139775e 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Aug 26 16:46:03 202013 // Update Count : 5 312 // Last Modified On : Fri Nov 6 07:42:13 2020 13 // Update Count : 54 14 14 // 15 15 … … 38 38 // FwdDeclarations : timeout handlers 39 39 static void preempt( processor * this ); 40 static void timeout( struct __processor_id_t * id,$thread * this );40 static void timeout( $thread * this ); 41 41 42 42 // FwdDeclarations : Signal handlers … … 91 91 92 92 // Tick one frame of the Discrete Event Simulation for alarms 93 static void tick_preemption( struct __processor_id_t * id) {93 static void tick_preemption(void) { 94 94 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 95 95 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading … … 105 105 106 106 // Check if this is a kernel 107 if( node-> kernel_alarm) {107 if( node->type == Kernel ) { 108 108 preempt( node->proc ); 109 109 } 110 else if( node->type == User ) { 111 timeout( node->thrd ); 112 } 110 113 else { 111 timeout( id, node->thrd);114 node->callback(*node); 112 115 } 113 116 … … 160 163 // Kernel Signal Tools 161 164 //============================================================================================= 162 163 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 165 // In a user-level threading system, there are handful of thread-local variables where this problem occurs on the ARM. 166 // 167 // For each kernel thread running user-level threads, there is a flag variable to indicate if interrupts are 168 // enabled/disabled for that kernel thread. Therefore, this variable is made thread local. 169 // 170 // For example, this code fragment sets the state of the "interrupt" variable in thread-local memory. 171 // 172 // _Thread_local volatile int interrupts; 173 // int main() { 174 // interrupts = 0; // disable interrupts } 175 // 176 // which generates the following code on the ARM 177 // 178 // (gdb) disassemble main 179 // Dump of assembler code for function main: 180 // 0x0000000000000610 <+0>: mrs x1, tpidr_el0 181 // 0x0000000000000614 <+4>: mov w0, #0x0 // #0 182 // 0x0000000000000618 <+8>: add x1, x1, #0x0, lsl #12 183 // 0x000000000000061c <+12>: add x1, x1, #0x10 184 // 0x0000000000000620 <+16>: str wzr, [x1] 185 // 0x0000000000000624 <+20>: ret 186 // 187 // The mrs moves a pointer from coprocessor register tpidr_el0 into register x1. Register w0 is set to 0. The two adds 188 // increase the TLS pointer with the displacement (offset) 0x10, which is the location in the TSL of variable 189 // "interrupts". Finally, 0 is stored into "interrupts" through the pointer in register x1 that points into the 190 // TSL. Now once x1 has the pointer to the location of the TSL for kernel thread N, it can be be preempted at a 191 // user-level and the user thread is put on the user-level ready-queue. When the preempted thread gets to the front of 192 // the user-level ready-queue it is run on kernel thread M. It now stores 0 into "interrupts" back on kernel thread N, 193 // turning off interrupt on the wrong kernel thread. 194 // 195 // On the x86, the following code is generated for the same code fragment. 196 // 197 // (gdb) disassemble main 198 // Dump of assembler code for function main: 199 // 0x0000000000400420 <+0>: movl $0x0,%fs:0xfffffffffffffffc 200 // 0x000000000040042c <+12>: xor %eax,%eax 201 // 0x000000000040042e <+14>: retq 202 // 203 // and there is base-displacement addressing used to atomically reset variable "interrupts" off of the TSL pointer in 204 // register "fs". 205 // 206 // Hence, the ARM has base-displacement address for the general purpose registers, BUT not to the coprocessor 207 // registers. As a result, generating the address for the write into variable "interrupts" is no longer atomic. 208 // 209 // Note this problem does NOT occur when just using multiple kernel threads because the preemption ALWAYS restarts the 210 // thread on the same kernel thread. 211 // 212 // The obvious question is why does ARM use a coprocessor register to store the TSL pointer given that coprocessor 213 // registers are second-class registers with respect to the instruction set. One possible answer is that they did not 214 // want to dedicate one of the general registers to hold the TLS pointer and there was a free coprocessor register 215 // available. 216 217 //---------- 218 // special case for preemption since used often 219 bool __preemption_enabled() { 220 // create a assembler label before 221 // marked as clobber all to avoid movement 222 asm volatile("__cfaasm_check_before:":::"memory"); 223 224 // access tls as normal 225 bool enabled = __cfaabi_tls.preemption_state.enabled; 226 227 // create a assembler label after 228 // marked as clobber all to avoid movement 229 asm volatile("__cfaasm_check_after:":::"memory"); 230 return enabled; 231 } 232 233 //---------- 234 // Get data from the TLS block 235 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems 236 uintptr_t __cfatls_get( unsigned long int offset ) { 237 // create a assembler label before 238 // marked as clobber all to avoid movement 239 asm volatile("__cfaasm_get_before:":::"memory"); 240 241 // access tls as normal (except for pointer arithmetic) 242 uintptr_t val = *(uintptr_t*)((uintptr_t)&__cfaabi_tls + offset); 243 244 // create a assembler label after 245 // marked as clobber all to avoid movement 246 asm volatile("__cfaasm_get_after:":::"memory"); 247 return val; 248 } 164 249 165 250 extern "C" { 166 251 // Disable interrupts by incrementing the counter 167 252 void disable_interrupts() { 168 with( kernelTLS.preemption_state ) { 253 // create a assembler label before 254 // marked as clobber all to avoid movement 255 asm volatile("__cfaasm_disable_before:":::"memory"); 256 257 with( __cfaabi_tls.preemption_state ) { 169 258 #if GCC_VERSION > 50000 170 259 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); … … 183 272 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 184 273 } 274 275 // create a assembler label after 276 // marked as clobber all to avoid movement 277 asm volatile("__cfaasm_disable_after:":::"memory"); 185 278 } 186 279 … … 188 281 // If counter reaches 0, execute any pending __cfactx_switch 189 282 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 190 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 283 // create a assembler label before 284 // marked as clobber all to avoid movement 285 asm volatile("__cfaasm_enable_before:":::"memory"); 286 287 processor * proc = __cfaabi_tls.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 191 288 /* paranoid */ verify( proc ); 192 289 193 with( kernelTLS.preemption_state ){290 with( __cfaabi_tls.preemption_state ){ 194 291 unsigned short prev = disable_count; 195 292 disable_count -= 1; … … 218 315 // For debugging purposes : keep track of the last person to enable the interrupts 219 316 __cfaabi_dbg_debug_do( proc->last_enable = caller; ) 317 318 // create a assembler label after 319 // marked as clobber all to avoid movement 320 asm volatile("__cfaasm_enable_after:":::"memory"); 220 321 } 221 322 … … 223 324 // Don't execute any pending __cfactx_switch even if counter reaches 0 224 325 void enable_interrupts_noPoll() { 225 unsigned short prev = kernelTLS.preemption_state.disable_count; 226 kernelTLS.preemption_state.disable_count -= 1; 326 // create a assembler label before 327 // marked as clobber all to avoid movement 328 asm volatile("__cfaasm_nopoll_before:":::"memory"); 329 330 unsigned short prev = __cfaabi_tls.preemption_state.disable_count; 331 __cfaabi_tls.preemption_state.disable_count -= 1; 227 332 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 228 333 if( prev == 1 ) { 229 334 #if GCC_VERSION > 50000 230 static_assert(__atomic_always_lock_free(sizeof( kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");335 static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free"); 231 336 #endif 232 337 // Set enabled flag to true 233 338 // should be atomic to avoid preemption in the middle of the operation. 234 339 // use memory order RELAXED since there is no inter-thread on this variable requirements 235 __atomic_store_n(& kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);340 __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED); 236 341 237 342 // Signal the compiler that a fence is needed but only for signal handlers 238 343 __atomic_signal_fence(__ATOMIC_RELEASE); 239 344 } 345 346 // create a assembler label after 347 // marked as clobber all to avoid movement 348 asm volatile("__cfaasm_nopoll_after:":::"memory"); 240 349 } 241 350 } … … 270 379 271 380 // reserved for future use 272 static void timeout( struct __processor_id_t * id,$thread * this ) {381 static void timeout( $thread * this ) { 273 382 #if !defined( __CFA_NO_STATISTICS__ ) 274 kernelTLS .this_stats = this->curr_cluster->stats;383 kernelTLS().this_stats = this->curr_cluster->stats; 275 384 #endif 276 __unpark( id,this );385 unpark( this ); 277 386 } 278 387 … … 283 392 static inline bool preemption_ready() { 284 393 // Check if preemption is safe 285 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;394 bool ready = __cfaabi_tls.preemption_state.enabled && ! __cfaabi_tls.preemption_state.in_progress; 286 395 287 396 // Adjust the pending flag accordingly 288 kernelTLS.this_processor->pending_preemption = !ready;397 __cfaabi_tls.this_processor->pending_preemption = !ready; 289 398 return ready; 290 399 } … … 300 409 301 410 // Start with preemption disabled until ready 302 kernelTLS.preemption_state.enabled = false;303 kernelTLS.preemption_state.disable_count = 1;411 __cfaabi_tls.preemption_state.enabled = false; 412 __cfaabi_tls.preemption_state.disable_count = 1; 304 413 305 414 // Initialize the event kernel … … 359 468 // Kernel Signal Handlers 360 469 //============================================================================================= 470 struct asm_region { 471 void * before; 472 void * after; 473 }; 474 475 //----------------------------------------------------------------------------- 476 // Some assembly required 477 #if defined( __i386 ) 478 #define __cfaasm_label( label ) \ 479 ({ \ 480 struct asm_region region; \ 481 asm( \ 482 "movl $__cfaasm_" #label "_before, %[vb]\n\t" \ 483 "movl $__cfaasm_" #label "_after , %[va]\n\t" \ 484 : [vb]"=r"(region.before), [vb]"=r"(region.before) \ 485 ); \ 486 region; \ 487 }); 488 #elif defined( __x86_64 ) 489 #ifdef __PIC__ 490 #define PLT "@PLT" 491 #else 492 #define PLT "" 493 #endif 494 #define __cfaasm_label( label ) \ 495 ({ \ 496 struct asm_region region; \ 497 asm( \ 498 "movq $__cfaasm_" #label "_before" PLT ", %[vb]\n\t" \ 499 "movq $__cfaasm_" #label "_after" PLT ", %[va]\n\t" \ 500 : [vb]"=r"(region.before), [va]"=r"(region.after) \ 501 ); \ 502 region; \ 503 }); 504 #elif defined( __aarch64__ ) 505 #error __cfaasm_label undefined for arm 506 #else 507 #error unknown hardware architecture 508 #endif 509 510 __cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; ) 361 511 362 512 // Context switch signal handler 363 513 // Receives SIGUSR1 signal and causes the current thread to yield 364 514 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 365 __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); ) 515 void * ip = (void *)(cxt->uc_mcontext.CFA_REG_IP); 516 __cfaabi_dbg_debug_do( last_interrupt = ip; ) 366 517 367 518 // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it, … … 369 520 // before the kernel thread has even started running. When that happens, an interrupt 370 521 // with a null 'this_processor' will be caught, just ignore it. 371 if(! kernelTLS.this_processor ) return;522 if(! __cfaabi_tls.this_processor ) return; 372 523 373 524 choose(sfp->si_value.sival_int) { 374 525 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 375 case PREEMPT_TERMINATE: verify( __atomic_load_n( & kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );526 case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) ); 376 527 default: 377 528 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 381 532 if( !preemption_ready() ) { return; } 382 533 383 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); 534 struct asm_region region; 535 region = __cfaasm_label( get ); if( ip >= region.before && ip <= region.after ) return; 536 region = __cfaasm_label( check ); if( ip >= region.before && ip <= region.after ) return; 537 region = __cfaasm_label( disable ); if( ip >= region.before && ip <= region.after ) return; 538 region = __cfaasm_label( enable ); if( ip >= region.before && ip <= region.after ) return; 539 region = __cfaasm_label( nopoll ); if( ip >= region.before && ip <= region.after ) return; 540 541 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", __cfaabi_tls.this_processor, __cfaabi_tls.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); 384 542 385 543 // Sync flag : prevent recursive calls to the signal handler 386 kernelTLS.preemption_state.in_progress = true;544 __cfaabi_tls.preemption_state.in_progress = true; 387 545 388 546 // Clear sighandler mask before context switching. … … 394 552 } 395 553 396 // TODO: this should go in finish action397 554 // Clear the in progress flag 398 kernelTLS.preemption_state.in_progress = false;555 __cfaabi_tls.preemption_state.in_progress = false; 399 556 400 557 // Preemption can occur here … … 413 570 id.full_proc = false; 414 571 id.id = doregister(&id); 572 __cfaabi_tls.this_proc_id = &id; 415 573 416 574 // Block sigalrms to control when they arrive … … 458 616 // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" ); 459 617 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 460 tick_preemption( &id);618 tick_preemption(); 461 619 unlock( event_kernel->lock ); 462 620 break; … … 480 638 481 639 void __cfaabi_check_preemption() { 482 bool ready = kernelTLS.preemption_state.enabled;640 bool ready = __preemption_enabled(); 483 641 if(!ready) { abort("Preemption should be ready"); } 484 642 … … 503 661 #ifdef __CFA_WITH_VERIFY__ 504 662 bool __cfaabi_dbg_in_kernel() { 505 return ! kernelTLS.preemption_state.enabled;663 return !__preemption_enabled(); 506 664 } 507 665 #endif -
libcfa/src/concurrency/ready_queue.cfa
r55acc3a r139775e 150 150 // queues or removing them. 151 151 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { 152 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);152 /* paranoid */ verify( ! __preemption_enabled() ); 153 153 154 154 // Step 1 : lock global lock … … 166 166 } 167 167 168 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);168 /* paranoid */ verify( ! __preemption_enabled() ); 169 169 return s; 170 170 } 171 171 172 172 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { 173 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);173 /* paranoid */ verify( ! __preemption_enabled() ); 174 174 175 175 // Step 1 : release local locks … … 188 188 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); 189 189 190 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled);190 /* paranoid */ verify( ! __preemption_enabled() ); 191 191 } 192 192 … … 252 252 preferred = 253 253 //* 254 kernelTLS .this_processor ? kernelTLS.this_processor->id * 4 : -1;254 kernelTLS().this_processor ? kernelTLS().this_processor->id * 4 : -1; 255 255 /*/ 256 256 thrd->link.preferred * 4; … … 331 331 // Don't bother trying locally too much 332 332 int local_tries = 8; 333 preferred = kernelTLS .this_processor->id * 4;333 preferred = kernelTLS().this_processor->id * 4; 334 334 #endif 335 335 -
libcfa/src/concurrency/thread.cfa
r55acc3a r139775e 127 127 verify( this_thrd->context.SP ); 128 128 129 __schedule_thread( (__processor_id_t *)kernelTLS.this_processor, this_thrd);129 __schedule_thread( this_thrd ); 130 130 enable_interrupts( __cfaabi_dbg_ctx ); 131 131 } -
src/AST/Convert.cpp
r55acc3a r139775e 25 25 #include "AST/Init.hpp" 26 26 #include "AST/Stmt.hpp" 27 #include "AST/TranslationUnit.hpp" 27 28 #include "AST/TypeSubstitution.hpp" 28 29 … … 1404 1405 }; 1405 1406 1406 std::list< Declaration * > convert( const std::list< ast::ptr< ast::Decl > >&& translationUnit ) {1407 std::list< Declaration * > convert( const ast::TranslationUnit && translationUnit ) { 1407 1408 ConverterNewToOld c; 1408 1409 std::list< Declaration * > decls; 1409 for(auto d : translationUnit ) {1410 for(auto d : translationUnit.decls) { 1410 1411 decls.emplace_back( c.decl( d ) ); 1411 1412 } … … 2803 2804 #undef GET_ACCEPT_1 2804 2805 2805 std::list< ast::ptr< ast::Decl > >convert( const std::list< Declaration * > && translationUnit ) {2806 ast::TranslationUnit convert( const std::list< Declaration * > && translationUnit ) { 2806 2807 ConverterOldToNew c; 2807 std::list< ast::ptr< ast::Decl > > decls;2808 ast::TranslationUnit unit; 2808 2809 for(auto d : translationUnit) { 2809 2810 d->accept( c ); 2810 decls.emplace_back( c.decl() );2811 unit.decls.emplace_back( c.decl() ); 2811 2812 } 2812 2813 deleteAll(translationUnit); 2813 return decls;2814 return unit; 2814 2815 } -
src/AST/Convert.hpp
r55acc3a r139775e 18 18 #include <list> 19 19 20 #include "AST/Node.hpp"21 22 20 class Declaration; 23 21 namespace ast { 24 class Decl;22 class TranslationUnit; 25 23 }; 26 24 27 std::list< Declaration * > convert( const std::list< ast::ptr< ast::Decl > >&& translationUnit );28 std::list< ast::ptr< ast::Decl > >convert( const std::list< Declaration * > && translationUnit );25 std::list< Declaration * > convert( const ast::TranslationUnit && translationUnit ); 26 ast::TranslationUnit convert( const std::list< Declaration * > && translationUnit ); -
src/AST/Fwd.hpp
r55acc3a r139775e 137 137 typedef unsigned int UniqueId; 138 138 139 class TranslationUnit; 140 // TODO: Get from the TranslationUnit: 139 141 extern Type * sizeType; 140 142 extern FunctionDecl * dereferenceOperator; -
src/AST/Pass.hpp
r55acc3a r139775e 103 103 /// Construct and run a pass on a translation unit. 104 104 template< typename... Args > 105 static void run( std::list< ptr<Decl> >& decls, Args &&... args ) {105 static void run( TranslationUnit & decls, Args &&... args ) { 106 106 Pass<core_t> visitor( std::forward<Args>( args )... ); 107 107 accept_all( decls, visitor ); … … 119 119 // Versions of the above for older compilers. 120 120 template< typename... Args > 121 static void run( std::list< ptr<Decl> >& decls ) {121 static void run( TranslationUnit & decls ) { 122 122 Pass<core_t> visitor; 123 123 accept_all( decls, visitor ); … … 303 303 void accept_all( std::list< ast::ptr<ast::Decl> > &, ast::Pass<core_t> & visitor ); 304 304 305 template<typename core_t> 306 void accept_all( ast::TranslationUnit &, ast::Pass<core_t> & visitor ); 307 305 308 //------------------------------------------------------------------------------------------------- 306 309 // PASS ACCESSORIES -
src/AST/Pass.impl.hpp
r55acc3a r139775e 20 20 #include <unordered_map> 21 21 22 #include "AST/TranslationUnit.hpp" 22 23 #include "AST/TypeSubstitution.hpp" 23 24 … … 430 431 pass_visitor_stats.depth--; 431 432 if ( !errors.isEmpty() ) { throw errors; } 433 } 434 435 template< typename core_t > 436 inline void ast::accept_all( ast::TranslationUnit & unit, ast::Pass< core_t > & visitor ) { 437 return ast::accept_all( unit.decls, visitor ); 432 438 } 433 439 … … 674 680 const ast::CompoundStmt * ast::Pass< core_t >::visit( const ast::CompoundStmt * node ) { 675 681 VISIT_START( node ); 676 VISIT( {682 VISIT( 677 683 // Do not enter (or leave) a new scope if atFunctionTop. Remember to save the result. 678 684 auto guard1 = makeFuncGuard( [this, enterScope = !this->atFunctionTop]() { … … 681 687 if ( leaveScope ) __pass::symtab::leave(core, 0); 682 688 }); 683 ValueGuard< bool > guard2( inFunction ); 689 ValueGuard< bool > guard2( atFunctionTop ); 690 atFunctionTop = false; 684 691 guard_scope guard3 { *this }; 685 inFunction = false;686 692 maybe_accept( node, &CompoundStmt::kids ); 687 })693 ) 688 694 VISIT_END( CompoundStmt, node ); 689 695 } -
src/AST/Pass.proto.hpp
r55acc3a r139775e 22 22 template<typename core_t> 23 23 class Pass; 24 25 class TranslationUnit; 24 26 25 27 struct PureVisitor; -
src/InitTweak/FixGlobalInit.cc
r55acc3a r139775e 109 109 } 110 110 111 void fixGlobalInit( std::list<ast::ptr<ast::Decl>>& translationUnit, bool inLibrary) {111 void fixGlobalInit(ast::TranslationUnit & translationUnit, bool inLibrary) { 112 112 ast::Pass<GlobalFixer_new> fixer; 113 113 accept_all(translationUnit, fixer); … … 119 119 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("constructor", std::move(ctorParams))}); 120 120 121 translationUnit. emplace_back( initFunction );121 translationUnit.decls.emplace_back( initFunction ); 122 122 } // if 123 123 … … 128 128 ast::Storage::Static, ast::Linkage::C, {new ast::Attribute("destructor", std::move(dtorParams))}); 129 129 130 translationUnit. emplace_back(destroyFunction);130 translationUnit.decls.emplace_back(destroyFunction); 131 131 } // if 132 132 } … … 183 183 } // if 184 184 if ( const ast::Stmt * ctor = ctorInit->ctor ) { 185 addDataSectionAttribute(mutDecl); 185 186 initStmts.push_back( ctor ); 186 187 mutDecl->init = nullptr; -
src/InitTweak/FixGlobalInit.h
r55acc3a r139775e 29 29 /// function is for library code. 30 30 void fixGlobalInit( std::list< Declaration * > & translationUnit, bool inLibrary ); 31 void fixGlobalInit( std::list< ast::ptr<ast::Decl> >& translationUnit, bool inLibrary );31 void fixGlobalInit( ast::TranslationUnit & translationUnit, bool inLibrary ); 32 32 } // namespace 33 33 -
src/InitTweak/FixInit.h
r55acc3a r139775e 19 19 #include <string> // for string 20 20 21 #include <AST/Fwd.hpp>22 23 21 class Declaration; 22 namespace ast { 23 class TranslationUnit; 24 } 24 25 25 26 namespace InitTweak { … … 27 28 void fix( std::list< Declaration * > & translationUnit, bool inLibrary ); 28 29 29 void fix( std::list<ast::ptr<ast::Decl>>& translationUnit, bool inLibrary);30 void fix( ast::TranslationUnit & translationUnit, bool inLibrary); 30 31 } // namespace 31 32 -
src/InitTweak/FixInitNew.cpp
r55acc3a r139775e 179 179 /// expand each object declaration to use its constructor after it is declared. 180 180 struct FixInit : public ast::WithStmtsToAdd<> { 181 static void fixInitializers( std::list< ast::ptr<ast::Decl> >&translationUnit );181 static void fixInitializers( ast::TranslationUnit &translationUnit ); 182 182 183 183 const ast::DeclWithType * postvisit( const ast::ObjectDecl *objDecl ); … … 225 225 } // namespace 226 226 227 void fix( std::list< ast::ptr<ast::Decl> >& translationUnit, bool inLibrary ) {227 void fix( ast::TranslationUnit & translationUnit, bool inLibrary ) { 228 228 ast::Pass<SelfAssignChecker>::run( translationUnit ); 229 229 … … 308 308 } 309 309 310 void FixInit::fixInitializers( std::list< ast::ptr<ast::Decl> >& translationUnit ) {310 void FixInit::fixInitializers( ast::TranslationUnit & translationUnit ) { 311 311 ast::Pass<FixInit> fixer; 312 312 … … 314 314 // can't use DeclMutator, because sometimes need to insert IfStmt, etc. 315 315 SemanticErrorException errors; 316 for ( auto i = translationUnit. begin(); i != translationUnit.end(); ++i ) {316 for ( auto i = translationUnit.decls.begin(); i != translationUnit.decls.end(); ++i ) { 317 317 try { 318 318 // maybeAccept( *i, fixer ); translationUnit should never contain null 319 319 *i = (*i)->accept(fixer); 320 translationUnit. splice( i, fixer.core.staticDtorDecls );320 translationUnit.decls.splice( i, fixer.core.staticDtorDecls ); 321 321 } catch( SemanticErrorException &e ) { 322 322 errors.append( e ); … … 864 864 if ( const ast::Stmt * ctor = ctorInit->ctor ) { 865 865 if ( objDecl->storage.is_static ) { 866 addDataSectionAttribute(objDecl); 866 867 // originally wanted to take advantage of gcc nested functions, but 867 868 // we get memory errors with this approach. To remedy this, the static … … 947 948 objDecl->name = objDecl->name + staticNamer.newName(); 948 949 objDecl->mangleName = Mangle::mangle( objDecl ); 950 objDecl->init = nullptr; 949 951 950 952 // xxx - temporary hack: need to return a declaration, but want to hoist the current object out of this scope -
src/InitTweak/InitTweak.cc
r55acc3a r139775e 1113 1113 } 1114 1114 1115 void addDataSectionAttribute( ast::ObjectDecl * objDecl ) { 1116 auto strLitT = new ast::PointerType(new ast::BasicType(ast::BasicType::Char)); 1117 objDecl->attributes.push_back(new ast::Attribute("section", {new ast::ConstantExpr(objDecl->location, strLitT, "\".data#\"", std::nullopt)})); 1118 } 1119 1115 1120 } -
src/InitTweak/InitTweak.h
r55acc3a r139775e 119 119 void addDataSectonAttribute( ObjectDecl * objDecl ); 120 120 121 void addDataSectionAttribute( ast::ObjectDecl * objDecl ); 122 121 123 class InitExpander_old { 122 124 public: -
src/ResolvExpr/Resolver.cc
r55acc3a r139775e 1274 1274 // size_t Resolver_new::traceId = Stats::Heap::new_stacktrace_id("Resolver"); 1275 1275 1276 void resolve( std::list< ast::ptr< ast::Decl > >& translationUnit ) {1276 void resolve( ast::TranslationUnit& translationUnit ) { 1277 1277 ast::Pass< Resolver_new >::run( translationUnit ); 1278 1278 } -
src/ResolvExpr/Resolver.h
r55acc3a r139775e 35 35 class StmtExpr; 36 36 class SymbolTable; 37 class TranslationUnit; 37 38 class Type; 38 39 class TypeEnvironment; … … 55 56 56 57 /// Checks types and binds syntactic constructs to typed representations 57 void resolve( std::list< ast::ptr<ast::Decl> >& translationUnit );58 void resolve( ast::TranslationUnit& translationUnit ); 58 59 /// Searches expr and returns the first DeletedExpr found, otherwise nullptr 59 60 const ast::DeletedExpr * findDeletedExpr( const ast::Expr * expr ); … … 69 70 const ast::Expr * untyped, const ast::SymbolTable & symtab); 70 71 /// Resolves a constructor init expression 71 ast::ptr< ast::Init > resolveCtorInit( 72 ast::ptr< ast::Init > resolveCtorInit( 72 73 const ast::ConstructorInit * ctorInit, const ast::SymbolTable & symtab ); 73 74 /// Resolves a statement expression 74 ast::ptr< ast::Expr > resolveStmtExpr( 75 ast::ptr< ast::Expr > resolveStmtExpr( 75 76 const ast::StmtExpr * stmtExpr, const ast::SymbolTable & symtab ); 76 77 } // namespace ResolvExpr -
tests/Makefile.am
r55acc3a r139775e 53 53 54 54 # adjust CC to current flags 55 CC = LC_ALL=C $(if $(DISTCC_CFA_PATH),distcc $(DISTCC_CFA_PATH) ${ARCH_FLAGS} ,$(TARGET_CFA) ${DEBUG_FLAGS} ${ARCH_FLAGS})55 CC = LC_ALL=C $(if $(DISTCC_CFA_PATH),distcc $(DISTCC_CFA_PATH) ${ARCH_FLAGS} ${AST_FLAGS},$(TARGET_CFA) ${DEBUG_FLAGS} ${ARCH_FLAGS} ${AST_FLAGS}) 56 56 CFACC = $(CC) 57 57 … … 60 60 61 61 # adjusted CC but without the actual distcc call 62 CFACCLOCAL = $(if $(DISTCC_CFA_PATH),$(DISTCC_CFA_PATH) ${ARCH_FLAGS} ,$(TARGET_CFA) ${DEBUG_FLAGS} ${ARCH_FLAGS})62 CFACCLOCAL = $(if $(DISTCC_CFA_PATH),$(DISTCC_CFA_PATH) ${ARCH_FLAGS} ${AST_FLAGS},$(TARGET_CFA) ${DEBUG_FLAGS} ${ARCH_FLAGS} ${AST_FLAGS}) 63 63 CFACCLINK = $(CFACCLOCAL) -quiet $(if $(test), 2> $(test), ) $($(shell echo "${@}_FLAGSLD" | sed 's/-\|\//_/g')) 64 64 -
tests/config.py.in
r55acc3a r139775e 9 9 HOSTARCH = "@host_cpu@" 10 10 DISTRIBUTE = @HAS_DISTCC@ 11 NEWAST = @DEFAULT_NEW_AST@ -
tests/pybin/settings.py
r55acc3a r139775e 85 85 def filter(self, tests): 86 86 return [test for test in tests if not test.arch or self.target == test.arch] 87 return True if not arch else self.target == arch88 87 89 88 @staticmethod … … 98 97 self.path = "debug" if value else "nodebug" 99 98 99 class AST: 100 def __init__(self, ast): 101 if ast == "new": 102 self.target = ast 103 self.string = "New AST" 104 self.flags = """AST_FLAGS=-XCFA,--new-ast""" 105 elif ast == "old": 106 self.target = ast 107 self.string = "Old AST" 108 self.flags = """AST_FLAGS=-XCFA,--old-ast""" 109 elif ast == None: 110 self.target = "new" if config.NEWAST else "old" 111 self.string = "Default AST (%s)" % self.target 112 self.flags = """AST_FLAGS=""" 113 else: 114 print("""ERROR: Invalid ast configuration, must be "old", "new" or left unspecified, was %s""" % (value), file=sys.stderr) 115 sys.exit(1) 116 117 def filter(self, tests): 118 119 return [test for test in tests if not test.astv or self.target == test.astv] 120 100 121 class Install: 101 122 def __init__(self, value): … … 120 141 121 142 def init( options ): 143 global all_ast 122 144 global all_arch 123 145 global all_debug 124 146 global all_install 147 global ast 125 148 global arch 149 global debug 126 150 global archive 151 global install 152 127 153 global continue_ 128 global debug129 154 global dry_run 130 155 global generating 131 global install132 156 global make 133 157 global output_width … … 135 159 global timeout2gdb 136 160 161 all_ast = [AST(o) for o in list(dict.fromkeys(options.ast ))] if options.ast else [AST(None)] 137 162 all_arch = [Architecture(o) for o in list(dict.fromkeys(options.arch ))] if options.arch else [Architecture(None)] 138 163 all_debug = [Debug(o) for o in list(dict.fromkeys(options.debug ))] -
tests/pybin/test_run.py
r55acc3a r139775e 11 11 self.path = '' 12 12 self.arch = '' 13 self.astv = '' 13 14 14 15 def toString(self): 15 return "{:25s} ({:5s} {:s})".format( self.name, self.arch if self.archelse "Any", self.target() )16 return "{:25s} ({:5s} arch, {:s} ast: {:s})".format( self.name, self.arch if self.arch else "Any", self.astv if self.astv else "Any", self.target() ) 16 17 17 18 def prepare(self): … … 20 21 21 22 def expect(self): 22 return os.path.normpath( os.path.join(settings.SRCDIR , self.path, ".expect", "%s%s.txt" % (self.name,'' if not self.arch else ".%s" % self.arch)) ) 23 arch = '' if not self.arch else ".%s" % self.arch 24 astv = '' if not self.astv else ".nast" if self.astv == "new" else ".oast" 25 return os.path.normpath( os.path.join(settings.SRCDIR , self.path, ".expect", "%s%s%s.txt" % (self.name,astv,arch)) ) 23 26 24 27 def error_log(self): … … 45 48 46 49 @staticmethod 47 def new_target(target, arch ):50 def new_target(target, arch, astv): 48 51 test = Test() 49 52 test.name = os.path.basename(target) 50 53 test.path = os.path.relpath (os.path.dirname(target), settings.SRCDIR) 51 54 test.arch = arch.target if arch else '' 55 test.astv = astv.target if astv else '' 52 56 return test 53 57 -
tests/pybin/tools.py
r55acc3a r139775e 181 181 '-s' if silent else None, 182 182 test_param, 183 settings.ast.flags, 183 184 settings.arch.flags, 184 185 settings.debug.flags, -
tests/test.py
r55acc3a r139775e 24 24 25 25 def match_test(path): 26 match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\. [\w\-_]+)?\.txt$" % settings.SRCDIR, path)26 match = re.search("^%s\/([\w\/\-_]*).expect\/([\w\-_]+)(\.nast|\.oast)?(\.[\w\-_]+)?\.txt$" % settings.SRCDIR, path) 27 27 if match : 28 28 test = Test() 29 29 test.name = match.group(2) 30 30 test.path = match.group(1) 31 test.arch = match.group(3)[1:] if match.group(3) else None 31 test.arch = match.group(4)[1:] if match.group(4) else None 32 33 astv = match.group(3)[1:] if match.group(3) else None 34 if astv == 'oast': 35 test.astv = 'old' 36 elif astv == 'nast': 37 test.astv = 'new' 38 elif astv: 39 print('ERROR: "%s", expect file has astv but it is not "nast" or "oast"' % testname, file=sys.stderr) 40 sys.exit(1) 41 32 42 expected.append(test) 33 43 … … 66 76 if options.regenerate_expected : 67 77 for testname in options.tests : 68 testname = canonical_path( testname ) 78 testname = os.path.normpath( os.path.join(settings.SRCDIR, testname) ) 79 69 80 # first check if this is a valid name to regenerate 70 81 if Test.valid_name(testname): 71 82 # this is a valid name, let's check if it already exists 72 83 found = [test for test in all_tests if canonical_path( test.target() ) == testname] 84 setup = itertools.product(settings.all_arch if options.arch else [None], settings.all_ast if options.ast else [None]) 73 85 if not found: 74 # it's a new name, create it according to the name and specified architecture 75 if options.arch: 76 # user specified one or multiple architectures, assume the tests will have architecture specific results 77 tests.extend( [Test.new_target(testname, arch) for arch in settings.all_arch] ) 78 else: 79 # user didn't specify an architecture, just create a cross platform test 80 tests.append( Test.new_target( testname, None ) ) 86 # it's a new name, create it according to the name and specified architecture/ast version 87 tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] ) 81 88 elif len(found) == 1 and not found[0].arch: 82 89 # we found a single test, the user better be wanting to create a cross platform test 83 90 if options.arch: 84 91 print('ERROR: "%s", test has no specified architecture but --arch was specified, ignoring it' % testname, file=sys.stderr) 92 elif options.ast: 93 print('ERROR: "%s", test has no specified ast version but --ast was specified, ignoring it' % testname, file=sys.stderr) 85 94 else: 86 95 tests.append( found[0] ) 87 96 else: 88 97 # this test is already cross platform, just add a test for each platform the user asked 89 tests.extend( [Test.new_target(testname, arch ) for arch in settings.all_arch] )98 tests.extend( [Test.new_target(testname, arch, ast) for arch, ast in setup] ) 90 99 91 100 # print a warning if it users didn't ask for a specific architecture 92 101 if not options.arch: 93 102 print('WARNING: "%s", test has architecture specific expected files but --arch was not specified, regenerating only for current host' % testname, file=sys.stderr) 103 104 105 # print a warning if it users didn't ask for a specific ast version 106 if not options.ast: 107 print('WARNING: "%s", test has ast version specific expected files but --ast was not specified, regenerating only for current ast' % testname, file=sys.stderr) 94 108 95 109 else : … … 112 126 # create a parser with the arguments for the tests script 113 127 parser = argparse.ArgumentParser(description='Script which runs cforall tests') 128 parser.add_argument('--ast', help='Test for specific ast', type=comma_separated(str), default=None) 129 parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None) 114 130 parser.add_argument('--debug', help='Run all tests in debug or release', type=comma_separated(yes_no), default='yes') 115 131 parser.add_argument('--install', help='Run all tests based on installed binaries or tree binaries', type=comma_separated(yes_no), default='no') 116 parser.add_argument('--arch', help='Test for specific architecture', type=comma_separated(str), default=None)117 132 parser.add_argument('--continue', help='When multiple specifications are passed (debug/install/arch), sets whether or not to continue if the last specification failed', type=yes_no, default='yes', dest='continue_') 118 133 parser.add_argument('--timeout', help='Maximum duration in seconds after a single test is considered to have timed out', type=int, default=120) … … 251 266 except KeyboardInterrupt: 252 267 return False, "" 253 except Exception as ex:254 print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr)255 sys.stderr.flush()256 return False, ""268 # except Exception as ex: 269 # print("Unexpected error in worker thread running {}: {}".format(t.target(), ex), file=sys.stderr) 270 # sys.stderr.flush() 271 # return False, "" 257 272 258 273 … … 362 377 # for each build configurations, run the test 363 378 with Timed() as total_dur: 364 for arch, debug, install in itertools.product(settings.all_arch, settings.all_debug, settings.all_install): 379 for ast, arch, debug, install in itertools.product(settings.all_ast, settings.all_arch, settings.all_debug, settings.all_install): 380 settings.ast = ast 365 381 settings.arch = arch 366 382 settings.debug = debug … … 369 385 # filter out the tests for a different architecture 370 386 # tests are the same across debug/install 371 local_tests = settings.arch.filter( tests ) 387 local_tests = settings.ast.filter( tests ) 388 local_tests = settings.arch.filter( local_tests ) 372 389 options.jobs, forceJobs = job_count( options, local_tests ) 373 390 settings.update_make_cmd(forceJobs, options.jobs) … … 377 394 378 395 # print configuration 379 print('%s %i tests on %i cores (%s:%s )' % (396 print('%s %i tests on %i cores (%s:%s - %s)' % ( 380 397 'Regenerating' if settings.generating else 'Running', 381 398 len(local_tests), 382 399 options.jobs, 400 settings.ast.string, 383 401 settings.arch.string, 384 402 settings.debug.string 385 403 )) 404 if not local_tests : 405 print('WARNING: No tests for this configuration') 406 continue 386 407 387 408 # otherwise run all tests and make sure to return the correct error code -
tools/stat.py
r55acc3a r139775e 1 #!/usr/bin/python 1 #!/usr/bin/python3 2 2 3 3 import sys … … 17 17 avg = numpy.mean (content) 18 18 std = numpy.std (content) 19 print "median {0:.1f} avg {1:.1f} stddev {2:.1f}".format( med, avg, std)19 print("median {0:.1f} avg {1:.1f} stddev {2:.1f}".format( med, avg, std )) 20 20 21 21
Note: See TracChangeset
for help on using the changeset viewer.