- Timestamp:
- Jun 29, 2021, 5:35:19 PM (3 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- dcad80a
- Parents:
- 5a46e09 (diff), d02e547 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa
- Files:
-
- 4 added
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/configure.ac
r5a46e09 r660665f 131 131 #io_uring 5.5 uses enum values 132 132 #io_uring 5.6 and later uses probes 133 134 AH_TEMPLATE([CFA_HAVE_LINUX_RSEQ_H],[Defined if rseq support is present when compiling libcfathread.]) 135 AC_CHECK_HEADERS([linux/rseq.h], [AC_DEFINE(CFA_HAVE_LINUX_RSEQ_H)]) 136 137 AH_TEMPLATE([CFA_HAVE_LINUX_LIBRSEQ],[Defined if librseq support is present when compiling libcfathread.]) 138 AC_CHECK_LIB([rseq], [rseq_available], [AC_DEFINE(CFA_HAVE_LINUX_RSEQ_H)], []) 133 139 134 140 AH_TEMPLATE([CFA_HAVE_LINUX_IO_URING_H],[Defined if io_uring support is present when compiling libcfathread.]) -
libcfa/prelude/defines.hfa.in
r5a46e09 r660665f 171 171 #undef CFA_HAVE_LINUX_IO_URING_H 172 172 173 /* Defined if librseq support is present when compiling libcfathread. */ 174 #undef CFA_HAVE_LINUX_LIBRSEQ 175 176 /* Defined if rseq support is present when compiling libcfathread. */ 177 #undef CFA_HAVE_LINUX_RSEQ_H 178 173 179 /* Defined if openat2 support is present when compiling libcfathread. */ 174 180 #undef CFA_HAVE_OPENAT2 … … 205 211 #undef HAVE_LINUX_IO_URING_H 206 212 213 /* Define to 1 if you have the <linux/rseq.h> header file. */ 214 #undef HAVE_LINUX_RSEQ_H 215 207 216 /* Define to 1 if you have the <memory.h> header file. */ 208 217 #undef HAVE_MEMORY_H -
libcfa/src/Makefile.am
r5a46e09 r660665f 61 61 containers/queueLockFree.hfa \ 62 62 containers/stackLockFree.hfa \ 63 containers/vector2.hfa \ 63 64 vec/vec.hfa \ 64 65 vec/vec2.hfa \ … … 69 70 common.hfa \ 70 71 fstream.hfa \ 71 strstream.hfa \72 72 heap.hfa \ 73 73 iostream.hfa \ … … 78 78 rational.hfa \ 79 79 stdlib.hfa \ 80 strstream.hfa \ 80 81 time.hfa \ 81 82 bits/weakso_locks.hfa \ … … 83 84 containers/pair.hfa \ 84 85 containers/result.hfa \ 85 containers/vector.hfa 86 containers/vector.hfa \ 87 device/cpu.hfa 86 88 87 89 libsrc = ${inst_headers_src} ${inst_headers_src:.hfa=.cfa} \ -
libcfa/src/bits/signal.hfa
r5a46e09 r660665f 20 20 21 21 #include <errno.h> 22 #define __USE_GNU23 22 #include <signal.h> 24 #undef __USE_GNU25 23 #include <stdlib.h> 26 24 #include <string.h> -
libcfa/src/concurrency/coroutine.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 17 18 18 19 #include "coroutine.hfa" -
libcfa/src/concurrency/io.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 17 18 18 19 #if defined(__CFA_DEBUG__) … … 23 24 24 25 #if defined(CFA_HAVE_LINUX_IO_URING_H) 25 #define _GNU_SOURCE /* See feature_test_macros(7) */26 26 #include <errno.h> 27 27 #include <signal.h> -
libcfa/src/concurrency/io/setup.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE /* See feature_test_macros(7) */17 #define _GNU_SOURCE 18 18 19 19 #if defined(__CFA_DEBUG__) -
libcfa/src/concurrency/kernel.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 18 17 19 // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ 18 20 … … 278 280 279 281 // Spin a little on I/O, just in case 280 282 for(5) { 281 283 __maybe_io_drain( this ); 282 284 readyThread = pop_fast( this->cltr ); … … 285 287 286 288 // no luck, try stealing a few times 287 289 for(5) { 288 290 if( __maybe_io_drain( this ) ) { 289 291 readyThread = pop_fast( this->cltr ); … … 422 424 __cfactx_switch( &proc_cor->context, &thrd_dst->context ); 423 425 // when __cfactx_switch returns we are back in the processor coroutine 426 427 424 428 425 429 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); … … 522 526 523 527 /* paranoid */ verify( ! __preemption_enabled() ); 524 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) , "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );525 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) , "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );528 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src ); 529 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src ); 526 530 } 527 531 -
libcfa/src/concurrency/kernel.hfa
r5a46e09 r660665f 66 66 unsigned id; 67 67 unsigned target; 68 unsigned last; 68 69 unsigned long long int cutoff; 69 70 } rdq; -
libcfa/src/concurrency/kernel/startup.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 17 18 18 19 // C Includes 19 20 #include <errno.h> // errno 21 #include <signal.h> 20 22 #include <string.h> // strerror 21 23 #include <unistd.h> // sysconf 24 22 25 extern "C" { 23 26 #include <limits.h> // PTHREAD_STACK_MIN 27 #include <unistd.h> // syscall 24 28 #include <sys/eventfd.h> // eventfd 25 29 #include <sys/mman.h> // mprotect … … 136 140 }; 137 141 142 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 143 // No data needed 144 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 145 extern "Cforall" { 146 __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq @= { 147 .cpu_id : RSEQ_CPU_ID_UNINITIALIZED, 148 }; 149 } 150 #else 151 // No data needed 152 #endif 153 138 154 //----------------------------------------------------------------------------- 139 155 // Struct to steal stack … … 468 484 self_mon_p = &self_mon; 469 485 link.next = 0p; 470 link.ts = 0;486 link.ts = -1llu; 471 487 preferred = -1u; 472 488 last_proc = 0p; … … 497 513 this.rdq.id = -1u; 498 514 this.rdq.target = -1u; 515 this.rdq.last = -1u; 499 516 this.rdq.cutoff = 0ull; 500 517 do_terminate = false; -
libcfa/src/concurrency/kernel_private.hfa
r5a46e09 r660665f 16 16 #pragma once 17 17 18 #if !defined(__cforall_thread__) 19 #error kernel_private.hfa should only be included in libcfathread source 20 #endif 21 18 22 #include "kernel.hfa" 19 23 #include "thread.hfa" … … 22 26 #include "stats.hfa" 23 27 28 extern "C" { 29 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 30 #include <rseq/rseq.h> 31 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 32 #include <linux/rseq.h> 33 #else 34 #ifndef _GNU_SOURCE 35 #error kernel_private requires gnu_source 36 #endif 37 #include <sched.h> 38 #endif 39 } 40 24 41 //----------------------------------------------------------------------------- 25 42 // Scheduler 26 27 28 43 extern "C" { 29 44 void disable_interrupts() OPTIONAL_THREAD; … … 39 54 40 55 //----------------------------------------------------------------------------- 56 // Hardware 57 58 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 59 // No data needed 60 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 61 extern "Cforall" { 62 extern __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq; 63 } 64 #else 65 // No data needed 66 #endif 67 68 static inline int __kernel_getcpu() { 69 /* paranoid */ verify( ! __preemption_enabled() ); 70 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 71 return rseq_current_cpu(); 72 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 73 int r = __cfaabi_rseq.cpu_id; 74 /* paranoid */ verify( r >= 0 ); 75 return r; 76 #else 77 return sched_getcpu(); 78 #endif 79 } 80 81 //----------------------------------------------------------------------------- 41 82 // Processor 42 83 void main(processorCtx_t *); … … 44 85 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 45 86 void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ); 46 47 48 87 49 88 extern cluster * mainCluster; -
libcfa/src/concurrency/locks.cfa
r5a46e09 r660665f 16 16 17 17 #define __cforall_thread__ 18 #define _GNU_SOURCE 18 19 19 20 #include "locks.hfa" -
libcfa/src/concurrency/locks.hfa
r5a46e09 r660665f 24 24 #include "containers/list.hfa" 25 25 26 #include "limits.hfa" 26 27 #include "thread.hfa" 27 28 … … 87 88 bool tryP(BinaryBenaphore & this) { 88 89 ssize_t c = this.counter; 90 /* paranoid */ verify( c > MIN ); 89 91 return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); 90 92 } … … 94 96 ssize_t c = 0; 95 97 for () { 98 /* paranoid */ verify( this.counter < MAX ); 96 99 if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 97 100 if (c == 0) return true; … … 173 176 ThreadBenaphore sem; 174 177 }; 178 179 static inline void ?{}(fast_lock & this) { this.owner = 0p; } 175 180 176 181 static inline bool $try_lock(fast_lock & this, $thread * thrd) { -
libcfa/src/concurrency/monitor.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 17 18 18 19 #include "monitor.hfa" -
libcfa/src/concurrency/mutex.cfa
r5a46e09 r660665f 17 17 18 18 #define __cforall_thread__ 19 #define _GNU_SOURCE 19 20 20 21 #include "mutex.hfa" -
libcfa/src/concurrency/preemption.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 18 17 19 // #define __CFA_DEBUG_PRINT_PREEMPTION__ 18 20 -
libcfa/src/concurrency/ready_queue.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 18 17 19 // #define __CFA_DEBUG_PRINT_READY_QUEUE__ 18 20 … … 20 22 #define USE_RELAXED_FIFO 21 23 // #define USE_WORK_STEALING 24 // #define USE_CPU_WORK_STEALING 22 25 23 26 #include "bits/defs.hfa" 27 #include "device/cpu.hfa" 24 28 #include "kernel_private.hfa" 25 29 26 #define _GNU_SOURCE27 30 #include "stdlib.hfa" 28 31 #include "math.hfa" 29 32 33 #include <errno.h> 30 34 #include <unistd.h> 35 36 extern "C" { 37 #include <sys/syscall.h> // __NR_xxx 38 } 31 39 32 40 #include "ready_subqueue.hfa" … … 46 54 #endif 47 55 48 #if defined(USE_RELAXED_FIFO) 56 #if defined(USE_CPU_WORK_STEALING) 57 #define READYQ_SHARD_FACTOR 2 58 #elif defined(USE_RELAXED_FIFO) 49 59 #define BIAS 4 50 60 #define READYQ_SHARD_FACTOR 4 … … 85 95 } 86 96 97 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 98 // No forward declaration needed 99 #define __kernel_rseq_register rseq_register_current_thread 100 #define __kernel_rseq_unregister rseq_unregister_current_thread 101 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 102 void __kernel_raw_rseq_register (void); 103 void __kernel_raw_rseq_unregister(void); 104 105 #define __kernel_rseq_register __kernel_raw_rseq_register 106 #define __kernel_rseq_unregister __kernel_raw_rseq_unregister 107 #else 108 // No forward declaration needed 109 // No initialization needed 110 static inline void noop(void) {} 111 112 #define __kernel_rseq_register noop 113 #define __kernel_rseq_unregister noop 114 #endif 115 87 116 //======================================================================= 88 117 // Cluster wide reader-writer lock … … 107 136 // Lock-Free registering/unregistering of threads 108 137 unsigned register_proc_id( void ) with(*__scheduler_lock) { 138 __kernel_rseq_register(); 139 109 140 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); 110 141 bool * handle = (bool *)&kernelTLS().sched_lock; … … 161 192 162 193 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); 194 195 __kernel_rseq_unregister(); 163 196 } 164 197 … … 214 247 //======================================================================= 215 248 void ?{}(__ready_queue_t & this) with (this) { 216 lanes.data = 0p; 217 lanes.tscs = 0p; 218 lanes.count = 0; 249 #if defined(USE_CPU_WORK_STEALING) 250 lanes.count = cpu_info.hthrd_count * READYQ_SHARD_FACTOR; 251 lanes.data = alloc( lanes.count ); 252 lanes.tscs = alloc( lanes.count ); 253 254 for( idx; (size_t)lanes.count ) { 255 (lanes.data[idx]){}; 256 lanes.tscs[idx].tv = rdtscl(); 257 } 258 #else 259 lanes.data = 0p; 260 lanes.tscs = 0p; 261 lanes.count = 0; 262 #endif 219 263 } 220 264 221 265 void ^?{}(__ready_queue_t & this) with (this) { 222 verify( SEQUENTIAL_SHARD == lanes.count ); 266 #if !defined(USE_CPU_WORK_STEALING) 267 verify( SEQUENTIAL_SHARD == lanes.count ); 268 #endif 269 223 270 free(lanes.data); 224 271 free(lanes.tscs); … … 226 273 227 274 //----------------------------------------------------------------------- 275 #if defined(USE_CPU_WORK_STEALING) 276 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) { 277 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 278 279 processor * const proc = kernelTLS().this_processor; 280 const bool external = !push_local || (!proc) || (cltr != proc->cltr); 281 282 const int cpu = __kernel_getcpu(); 283 /* paranoid */ verify(cpu >= 0); 284 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 285 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 286 287 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 288 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 289 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 290 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 291 292 const int start = map.self * READYQ_SHARD_FACTOR; 293 unsigned i; 294 do { 295 unsigned r; 296 if(unlikely(external)) { r = __tls_rand(); } 297 else { r = proc->rdq.its++; } 298 i = start + (r % READYQ_SHARD_FACTOR); 299 // If we can't lock it retry 300 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 301 302 // Actually push it 303 push(lanes.data[i], thrd); 304 305 // Unlock and return 306 __atomic_unlock( &lanes.data[i].lock ); 307 308 #if !defined(__CFA_NO_STATISTICS__) 309 if(unlikely(external)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED); 310 else __tls_stats()->ready.push.local.success++; 311 #endif 312 313 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first); 314 315 } 316 317 // Pop from the ready queue from a given cluster 318 __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 319 /* paranoid */ verify( lanes.count > 0 ); 320 /* paranoid */ verify( kernelTLS().this_processor ); 321 322 const int cpu = __kernel_getcpu(); 323 /* paranoid */ verify(cpu >= 0); 324 /* paranoid */ verify(cpu < cpu_info.hthrd_count); 325 /* paranoid */ verify(cpu * READYQ_SHARD_FACTOR < lanes.count); 326 327 const cpu_map_entry_t & map = cpu_info.llc_map[cpu]; 328 /* paranoid */ verify(map.start * READYQ_SHARD_FACTOR < lanes.count); 329 /* paranoid */ verify(map.self * READYQ_SHARD_FACTOR < lanes.count); 330 /* paranoid */ verifyf((map.start + map.count) * READYQ_SHARD_FACTOR <= lanes.count, "have %zu lanes but map can go up to %u", lanes.count, (map.start + map.count) * READYQ_SHARD_FACTOR); 331 332 processor * const proc = kernelTLS().this_processor; 333 const int start = map.self * READYQ_SHARD_FACTOR; 334 335 // Did we already have a help target 336 if(proc->rdq.target == -1u) { 337 // if We don't have a 338 unsigned long long min = ts(lanes.data[start]); 339 for(i; READYQ_SHARD_FACTOR) { 340 unsigned long long tsc = ts(lanes.data[start + i]); 341 if(tsc < min) min = tsc; 342 } 343 proc->rdq.cutoff = min; 344 345 /* paranoid */ verify(lanes.count < 65536); // The following code assumes max 65536 cores. 346 /* paranoid */ verify(map.count < 65536); // The following code assumes max 65536 cores. 347 uint64_t chaos = __tls_rand(); 348 uint64_t high_chaos = (chaos >> 32); 349 uint64_t mid_chaos = (chaos >> 16) & 0xffff; 350 uint64_t low_chaos = chaos & 0xffff; 351 352 unsigned me = map.self; 353 unsigned cpu_chaos = map.start + (mid_chaos % map.count); 354 bool global = cpu_chaos == me; 355 356 if(global) { 357 proc->rdq.target = high_chaos % lanes.count; 358 } else { 359 proc->rdq.target = (cpu_chaos * READYQ_SHARD_FACTOR) + (low_chaos % READYQ_SHARD_FACTOR); 360 /* paranoid */ verify(proc->rdq.target >= (map.start * READYQ_SHARD_FACTOR)); 361 /* paranoid */ verify(proc->rdq.target < ((map.start + map.count) * READYQ_SHARD_FACTOR)); 362 } 363 364 /* paranoid */ verify(proc->rdq.target != -1u); 365 } 366 else { 367 const unsigned long long bias = 0; //2_500_000_000; 368 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; 369 { 370 unsigned target = proc->rdq.target; 371 proc->rdq.target = -1u; 372 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 373 $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 374 proc->rdq.last = target; 375 if(t) return t; 376 } 377 } 378 379 unsigned last = proc->rdq.last; 380 if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) { 381 $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); 382 if(t) return t; 383 } 384 else { 385 proc->rdq.last = -1u; 386 } 387 } 388 389 for(READYQ_SHARD_FACTOR) { 390 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 391 if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 392 } 393 394 // All lanes where empty return 0p 395 return 0p; 396 } 397 398 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 399 processor * const proc = kernelTLS().this_processor; 400 unsigned last = proc->rdq.last; 401 if(last != -1u) { 402 struct $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal)); 403 if(t) return t; 404 proc->rdq.last = -1u; 405 } 406 407 unsigned i = __tls_rand() % lanes.count; 408 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 409 } 410 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) { 411 return search(cltr); 412 } 413 #endif 228 414 #if defined(USE_RELAXED_FIFO) 229 415 //----------------------------------------------------------------------- … … 519 705 if(is_empty(sl)) { 520 706 assert( sl.anchor.next == 0p ); 521 assert( sl.anchor.ts == 0);707 assert( sl.anchor.ts == -1llu ); 522 708 assert( mock_head(sl) == sl.prev ); 523 709 } else { 524 710 assert( sl.anchor.next != 0p ); 525 assert( sl.anchor.ts != 0);711 assert( sl.anchor.ts != -1llu ); 526 712 assert( mock_head(sl) != sl.prev ); 527 713 } … … 573 759 lanes.tscs = alloc(lanes.count, lanes.tscs`realloc); 574 760 for(i; lanes.count) { 575 unsigned long long tsc = ts(lanes.data[i]); 576 lanes.tscs[i].tv = tsc != 0 ? tsc : rdtscl(); 761 unsigned long long tsc1 = ts(lanes.data[i]); 762 unsigned long long tsc2 = rdtscl(); 763 lanes.tscs[i].tv = min(tsc1, tsc2); 577 764 } 578 765 #endif 579 766 } 580 767 581 // Grow the ready queue 582 void ready_queue_grow(struct cluster * cltr) { 583 size_t ncount; 584 int target = cltr->procs.total; 585 586 /* paranoid */ verify( ready_mutate_islocked() ); 587 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 588 589 // Make sure that everything is consistent 590 /* paranoid */ check( cltr->ready_queue ); 591 592 // grow the ready queue 593 with( cltr->ready_queue ) { 594 // Find new count 595 // Make sure we always have atleast 1 list 596 if(target >= 2) { 597 ncount = target * READYQ_SHARD_FACTOR; 598 } else { 599 ncount = SEQUENTIAL_SHARD; 600 } 601 602 // Allocate new array (uses realloc and memcpies the data) 603 lanes.data = alloc( ncount, lanes.data`realloc ); 604 605 // Fix the moved data 606 for( idx; (size_t)lanes.count ) { 607 fix(lanes.data[idx]); 608 } 609 610 // Construct new data 611 for( idx; (size_t)lanes.count ~ ncount) { 612 (lanes.data[idx]){}; 613 } 614 615 // Update original 616 lanes.count = ncount; 617 } 618 619 fix_times(cltr); 620 621 reassign_cltr_id(cltr); 622 623 // Make sure that everything is consistent 624 /* paranoid */ check( cltr->ready_queue ); 625 626 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 627 628 /* paranoid */ verify( ready_mutate_islocked() ); 629 } 630 631 // Shrink the ready queue 632 void ready_queue_shrink(struct cluster * cltr) { 633 /* paranoid */ verify( ready_mutate_islocked() ); 634 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 635 636 // Make sure that everything is consistent 637 /* paranoid */ check( cltr->ready_queue ); 638 639 int target = cltr->procs.total; 640 641 with( cltr->ready_queue ) { 642 // Remember old count 643 size_t ocount = lanes.count; 644 645 // Find new count 646 // Make sure we always have atleast 1 list 647 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; 648 /* paranoid */ verify( ocount >= lanes.count ); 649 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); 650 651 // for printing count the number of displaced threads 652 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 653 __attribute__((unused)) size_t displaced = 0; 654 #endif 655 656 // redistribute old data 657 for( idx; (size_t)lanes.count ~ ocount) { 658 // Lock is not strictly needed but makes checking invariants much easier 659 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); 660 verify(locked); 661 662 // As long as we can pop from this lane to push the threads somewhere else in the queue 663 while(!is_empty(lanes.data[idx])) { 664 struct $thread * thrd; 665 unsigned long long _; 666 [thrd, _] = pop(lanes.data[idx]); 667 668 push(cltr, thrd, true); 669 670 // for printing count the number of displaced threads 671 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 672 displaced++; 673 #endif 674 } 675 676 // Unlock the lane 677 __atomic_unlock(&lanes.data[idx].lock); 678 679 // TODO print the queue statistics here 680 681 ^(lanes.data[idx]){}; 682 } 683 684 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 685 686 // Allocate new array (uses realloc and memcpies the data) 687 lanes.data = alloc( lanes.count, lanes.data`realloc ); 688 689 // Fix the moved data 690 for( idx; (size_t)lanes.count ) { 691 fix(lanes.data[idx]); 692 } 693 } 694 695 fix_times(cltr); 696 697 reassign_cltr_id(cltr); 698 699 // Make sure that everything is consistent 700 /* paranoid */ check( cltr->ready_queue ); 701 702 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 703 /* paranoid */ verify( ready_mutate_islocked() ); 704 } 768 #if defined(USE_CPU_WORK_STEALING) 769 // ready_queue size is fixed in this case 770 void ready_queue_grow(struct cluster * cltr) {} 771 void ready_queue_shrink(struct cluster * cltr) {} 772 #else 773 // Grow the ready queue 774 void ready_queue_grow(struct cluster * cltr) { 775 size_t ncount; 776 int target = cltr->procs.total; 777 778 /* paranoid */ verify( ready_mutate_islocked() ); 779 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n"); 780 781 // Make sure that everything is consistent 782 /* paranoid */ check( cltr->ready_queue ); 783 784 // grow the ready queue 785 with( cltr->ready_queue ) { 786 // Find new count 787 // Make sure we always have atleast 1 list 788 if(target >= 2) { 789 ncount = target * READYQ_SHARD_FACTOR; 790 } else { 791 ncount = SEQUENTIAL_SHARD; 792 } 793 794 // Allocate new array (uses realloc and memcpies the data) 795 lanes.data = alloc( ncount, lanes.data`realloc ); 796 797 // Fix the moved data 798 for( idx; (size_t)lanes.count ) { 799 fix(lanes.data[idx]); 800 } 801 802 // Construct new data 803 for( idx; (size_t)lanes.count ~ ncount) { 804 (lanes.data[idx]){}; 805 } 806 807 // Update original 808 lanes.count = ncount; 809 } 810 811 fix_times(cltr); 812 813 reassign_cltr_id(cltr); 814 815 // Make sure that everything is consistent 816 /* paranoid */ check( cltr->ready_queue ); 817 818 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n"); 819 820 /* paranoid */ verify( ready_mutate_islocked() ); 821 } 822 823 // Shrink the ready queue 824 void ready_queue_shrink(struct cluster * cltr) { 825 /* paranoid */ verify( ready_mutate_islocked() ); 826 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n"); 827 828 // Make sure that everything is consistent 829 /* paranoid */ check( cltr->ready_queue ); 830 831 int target = cltr->procs.total; 832 833 with( cltr->ready_queue ) { 834 // Remember old count 835 size_t ocount = lanes.count; 836 837 // Find new count 838 // Make sure we always have atleast 1 list 839 lanes.count = target >= 2 ? target * READYQ_SHARD_FACTOR: SEQUENTIAL_SHARD; 840 /* paranoid */ verify( ocount >= lanes.count ); 841 /* paranoid */ verify( lanes.count == target * READYQ_SHARD_FACTOR || target < 2 ); 842 843 // for printing count the number of displaced threads 844 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 845 __attribute__((unused)) size_t displaced = 0; 846 #endif 847 848 // redistribute old data 849 for( idx; (size_t)lanes.count ~ ocount) { 850 // Lock is not strictly needed but makes checking invariants much easier 851 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock); 852 verify(locked); 853 854 // As long as we can pop from this lane to push the threads somewhere else in the queue 855 while(!is_empty(lanes.data[idx])) { 856 struct $thread * thrd; 857 unsigned long long _; 858 [thrd, _] = pop(lanes.data[idx]); 859 860 push(cltr, thrd, true); 861 862 // for printing count the number of displaced threads 863 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__) 864 displaced++; 865 #endif 866 } 867 868 // Unlock the lane 869 __atomic_unlock(&lanes.data[idx].lock); 870 871 // TODO print the queue statistics here 872 873 ^(lanes.data[idx]){}; 874 } 875 876 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced); 877 878 // Allocate new array (uses realloc and memcpies the data) 879 lanes.data = alloc( lanes.count, lanes.data`realloc ); 880 881 // Fix the moved data 882 for( idx; (size_t)lanes.count ) { 883 fix(lanes.data[idx]); 884 } 885 } 886 887 fix_times(cltr); 888 889 reassign_cltr_id(cltr); 890 891 // Make sure that everything is consistent 892 /* paranoid */ check( cltr->ready_queue ); 893 894 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n"); 895 /* paranoid */ verify( ready_mutate_islocked() ); 896 } 897 #endif 705 898 706 899 #if !defined(__CFA_NO_STATISTICS__) … … 710 903 } 711 904 #endif 905 906 907 #if defined(CFA_HAVE_LINUX_LIBRSEQ) 908 // No definition needed 909 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 910 911 #if defined( __x86_64 ) || defined( __i386 ) 912 #define RSEQ_SIG 0x53053053 913 #elif defined( __ARM_ARCH ) 914 #ifdef __ARMEB__ 915 #define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */ 916 #else 917 #define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */ 918 #endif 919 #endif 920 921 extern void __disable_interrupts_hard(); 922 extern void __enable_interrupts_hard(); 923 924 void __kernel_raw_rseq_register (void) { 925 /* paranoid */ verify( __cfaabi_rseq.cpu_id == RSEQ_CPU_ID_UNINITIALIZED ); 926 927 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, (sigset_t *)0p, _NSIG / 8); 928 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), 0, RSEQ_SIG); 929 if(ret != 0) { 930 int e = errno; 931 switch(e) { 932 case EINVAL: abort("KERNEL ERROR: rseq register invalid argument"); 933 case ENOSYS: abort("KERNEL ERROR: rseq register no supported"); 934 case EFAULT: abort("KERNEL ERROR: rseq register with invalid argument"); 935 case EBUSY : abort("KERNEL ERROR: rseq register already registered"); 936 case EPERM : abort("KERNEL ERROR: rseq register sig argument on unregistration does not match the signature received on registration"); 937 default: abort("KERNEL ERROR: rseq register unexpected return %d", e); 938 } 939 } 940 } 941 942 void __kernel_raw_rseq_unregister(void) { 943 /* paranoid */ verify( __cfaabi_rseq.cpu_id >= 0 ); 944 945 // int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, (sigset_t *)0p, _NSIG / 8); 946 int ret = syscall(__NR_rseq, &__cfaabi_rseq, sizeof(struct rseq), RSEQ_FLAG_UNREGISTER, RSEQ_SIG); 947 if(ret != 0) { 948 int e = errno; 949 switch(e) { 950 case EINVAL: abort("KERNEL ERROR: rseq unregister invalid argument"); 951 case ENOSYS: abort("KERNEL ERROR: rseq unregister no supported"); 952 case EFAULT: abort("KERNEL ERROR: rseq unregister with invalid argument"); 953 case EBUSY : abort("KERNEL ERROR: rseq unregister already registered"); 954 case EPERM : abort("KERNEL ERROR: rseq unregister sig argument on unregistration does not match the signature received on registration"); 955 default: abort("KERNEL ERROR: rseq unregisteunexpected return %d", e); 956 } 957 } 958 } 959 #else 960 // No definition needed 961 #endif -
libcfa/src/concurrency/ready_subqueue.hfa
r5a46e09 r660665f 32 32 this.prev = mock_head(this); 33 33 this.anchor.next = 0p; 34 this.anchor.ts = 0;34 this.anchor.ts = -1llu; 35 35 #if !defined(__CFA_NO_STATISTICS__) 36 36 this.cnt = 0; … … 44 44 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts ); 45 45 /* paranoid */ verify( mock_head(this)->link.next == 0p ); 46 /* paranoid */ verify( mock_head(this)->link.ts == 0);46 /* paranoid */ verify( mock_head(this)->link.ts == -1llu ); 47 47 /* paranoid */ verify( mock_head(this) == this.prev ); 48 48 /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 ); … … 55 55 // Make sure the list is empty 56 56 /* paranoid */ verify( this.anchor.next == 0p ); 57 /* paranoid */ verify( this.anchor.ts == 0);57 /* paranoid */ verify( this.anchor.ts == -1llu ); 58 58 /* paranoid */ verify( mock_head(this) == this.prev ); 59 59 } … … 64 64 /* paranoid */ verify( this.lock ); 65 65 /* paranoid */ verify( node->link.next == 0p ); 66 /* paranoid */ verify( node->link.ts == 0);66 /* paranoid */ verify( node->link.ts == -1llu ); 67 67 /* paranoid */ verify( this.prev->link.next == 0p ); 68 /* paranoid */ verify( this.prev->link.ts == 0);68 /* paranoid */ verify( this.prev->link.ts == -1llu ); 69 69 if( this.anchor.next == 0p ) { 70 70 /* paranoid */ verify( this.anchor.next == 0p ); 71 /* paranoid */ verify( this.anchor.ts == 0 ); 71 /* paranoid */ verify( this.anchor.ts == -1llu ); 72 /* paranoid */ verify( this.anchor.ts != 0 ); 72 73 /* paranoid */ verify( this.prev == mock_head( this ) ); 73 74 } else { 74 75 /* paranoid */ verify( this.anchor.next != 0p ); 76 /* paranoid */ verify( this.anchor.ts != -1llu ); 75 77 /* paranoid */ verify( this.anchor.ts != 0 ); 76 78 /* paranoid */ verify( this.prev != mock_head( this ) ); … … 92 94 /* paranoid */ verify( this.lock ); 93 95 /* paranoid */ verify( this.anchor.next != 0p ); 96 /* paranoid */ verify( this.anchor.ts != -1llu ); 94 97 /* paranoid */ verify( this.anchor.ts != 0 ); 95 98 … … 99 102 this.anchor.next = node->link.next; 100 103 this.anchor.ts = node->link.ts; 101 bool is_empty = this.anchor. ts == 0;104 bool is_empty = this.anchor.next == 0p; 102 105 node->link.next = 0p; 103 node->link.ts = 0;106 node->link.ts = -1llu; 104 107 #if !defined(__CFA_NO_STATISTICS__) 105 108 this.cnt--; … … 110 113 111 114 /* paranoid */ verify( node->link.next == 0p ); 112 /* paranoid */ verify( node->link.ts == 0 ); 115 /* paranoid */ verify( node->link.ts == -1llu ); 116 /* paranoid */ verify( node->link.ts != 0 ); 117 /* paranoid */ verify( this.anchor.ts != 0 ); 113 118 return [node, ts]; 114 119 } … … 116 121 // Check whether or not list is empty 117 122 static inline bool is_empty(__intrusive_lane_t & this) { 118 return this.anchor. ts == 0;123 return this.anchor.next == 0p; 119 124 } 120 125 … … 122 127 static inline unsigned long long ts(__intrusive_lane_t & this) { 123 128 // Cannot verify here since it may not be locked 129 /* paranoid */ verify(this.anchor.ts != 0); 124 130 return this.anchor.ts; 125 131 } -
libcfa/src/concurrency/thread.cfa
r5a46e09 r660665f 15 15 16 16 #define __cforall_thread__ 17 #define _GNU_SOURCE 17 18 18 19 #include "thread.hfa" … … 39 40 curr_cluster = &cl; 40 41 link.next = 0p; 41 link.ts = 0;42 link.ts = -1llu; 42 43 preferred = -1u; 43 44 last_proc = 0p; -
libcfa/src/containers/array.hfa
r5a46e09 r660665f 1 1 2 2 3 // a type whose size is n 4 #define Z(n) char[n] 5 6 // the inverse of Z(-) 7 #define z(N) sizeof(N) 8 9 forall( T & ) struct tag {}; 3 forall( __CFA_tysys_id_only_X & ) struct tag {}; 10 4 #define ttag(T) ((tag(T)){}) 11 #define ztag(n) ttag( Z(n))5 #define ztag(n) ttag(n) 12 6 13 7 … … 18 12 forall( [N], S & | sized(S), Timmed &, Tbase & ) { 19 13 struct arpk { 20 S strides[ z(N)];14 S strides[N]; 21 15 }; 22 16 … … 56 50 57 51 static inline size_t ?`len( arpk(N, S, Timmed, Tbase) & a ) { 58 return z(N);52 return N; 59 53 } 60 54 61 55 // workaround #226 (and array relevance thereof demonstrated in mike102/otype-slow-ndims.cfa) 62 56 static inline void ?{}( arpk(N, S, Timmed, Tbase) & this ) { 63 void ?{}( S (&inner)[ z(N)] ) {}57 void ?{}( S (&inner)[N] ) {} 64 58 ?{}(this.strides); 65 59 } 66 60 static inline void ^?{}( arpk(N, S, Timmed, Tbase) & this ) { 67 void ^?{}( S (&inner)[ z(N)] ) {}61 void ^?{}( S (&inner)[N] ) {} 68 62 ^?{}(this.strides); 69 63 } … … 143 137 // Base 144 138 forall( [Nq], Sq & | sized(Sq), Tbase & ) 145 static inline tag(arpk(Nq, Sq, Tbase, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(Tbase) ) {} 139 static inline tag(arpk(Nq, Sq, Tbase, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(Tbase) ) { 140 tag(arpk(Nq, Sq, Tbase, Tbase)) ret; 141 return ret; 142 } 146 143 147 144 // Rec 148 145 forall( [Nq], Sq & | sized(Sq), [N], S & | sized(S), recq &, recr &, Tbase & | { tag(recr) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(recq) ); } ) 149 static inline tag(arpk(N, S, recr, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(arpk(N, S, recq, Tbase)) ) {} 146 static inline tag(arpk(N, S, recr, Tbase)) enq_( tag(Tbase), tag(Nq), tag(Sq), tag(arpk(N, S, recq, Tbase)) ) { 147 tag(arpk(N, S, recr, Tbase)) ret; 148 return ret; 149 } 150 150 151 151 // Wrapper -
libcfa/src/exception.c
r5a46e09 r660665f 27 27 #include "stdhdr/assert.h" 28 28 #include "virtual.h" 29 30 #if defined( __ARM_ARCH )31 #warning FIX ME: temporary hack to keep ARM build working32 #ifndef _URC_FATAL_PHASE1_ERROR33 #define _URC_FATAL_PHASE1_ERROR 334 #endif // ! _URC_FATAL_PHASE1_ERROR35 #ifndef _URC_FATAL_PHASE2_ERROR36 #define _URC_FATAL_PHASE2_ERROR 237 #endif // ! _URC_FATAL_PHASE2_ERROR38 #endif // __ARM_ARCH39 40 29 #include "lsda.h" 41 30 … … 267 256 // the whole stack. 268 257 258 #if defined( __x86_64 ) || defined( __i386 ) 269 259 // We did not simply reach the end of the stack without finding a handler. This is an error. 270 260 if ( ret != _URC_END_OF_STACK ) { 261 #else // defined( __ARM_ARCH ) 262 // The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack. 263 // This workaround tries to keep default exception handling working. 264 if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) { 265 #endif 271 266 printf("UNWIND ERROR %d after raise exception\n", ret); 272 267 abort(); … … 301 296 } 302 297 303 #if defined( __x86_64 ) || defined( __i386 ) 298 #if defined( __x86_64 ) || defined( __i386 ) || defined( __ARM_ARCH ) 304 299 // This is our personality routine. For every stack frame annotated with 305 300 // ".cfi_personality 0x3,__gcfa_personality_v0" this function will be called twice when unwinding. … … 419 414 _Unwind_GetCFA(unwind_context) + 24; 420 415 # elif defined( __ARM_ARCH ) 421 # warning FIX ME: check if anything needed for ARM 422 42; 416 _Unwind_GetCFA(unwind_context) + 40; 423 417 # endif 424 418 int (*matcher)(exception_t *) = *(int(**)(exception_t *))match_pos; … … 537 531 // HEADER 538 532 ".LFECFA1:\n" 533 #if defined( __x86_64 ) || defined( __i386 ) 539 534 " .globl __gcfa_personality_v0\n" 535 #else // defined( __ARM_ARCH ) 536 " .global __gcfa_personality_v0\n" 537 #endif 540 538 " .section .gcc_except_table,\"a\",@progbits\n" 541 539 // TABLE HEADER (important field is the BODY length at the end) … … 569 567 // No clue what this does specifically 570 568 " .section .data.rel.local.CFA.ref.__gcfa_personality_v0,\"awG\",@progbits,CFA.ref.__gcfa_personality_v0,comdat\n" 569 #if defined( __x86_64 ) || defined( __i386 ) 571 570 " .align 8\n" 571 #else // defined( __ARM_ARCH ) 572 " .align 3\n" 573 #endif 572 574 " .type CFA.ref.__gcfa_personality_v0, @object\n" 573 575 " .size CFA.ref.__gcfa_personality_v0, 8\n" … … 575 577 #if defined( __x86_64 ) 576 578 " .quad __gcfa_personality_v0\n" 577 #el se // then __i386579 #elif defined( __i386 ) 578 580 " .long __gcfa_personality_v0\n" 581 #else // defined( __ARM_ARCH ) 582 " .xword __gcfa_personality_v0\n" 579 583 #endif 580 584 ); … … 583 587 // HEADER 584 588 ".LFECFA1:\n" 589 #if defined( __x86_64 ) || defined( __i386 ) 585 590 " .globl __gcfa_personality_v0\n" 591 #else // defined( __ARM_ARCH ) 592 " .global __gcfa_personality_v0\n" 593 #endif 586 594 " .section .gcc_except_table,\"a\",@progbits\n" 587 595 // TABLE HEADER (important field is the BODY length at the end) … … 612 620 #pragma GCC pop_options 613 621 614 #elif defined( __ARM_ARCH )615 _Unwind_Reason_Code __gcfa_personality_v0(616 int version,617 _Unwind_Action actions,618 unsigned long long exception_class,619 struct _Unwind_Exception * unwind_exception,620 struct _Unwind_Context * unwind_context) {621 return _URC_CONTINUE_UNWIND;622 }623 624 __attribute__((noinline))625 void __cfaehm_try_terminate(void (*try_block)(),626 void (*catch_block)(int index, exception_t * except),627 __attribute__((unused)) int (*match_block)(exception_t * except)) {628 }629 622 #else 630 623 #error unsupported hardware architecture 631 #endif // __x86_64 || __i386 624 #endif // __x86_64 || __i386 || __ARM_ARCH -
libcfa/src/interpose.cfa
r5a46e09 r660665f 95 95 96 96 extern "C" { 97 void __cfaabi_interpose_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_CORE ) ));98 97 void __cfaabi_interpose_startup( void ) { 99 98 const char *version = 0p; -
libcfa/src/startup.cfa
r5a46e09 r660665f 20 20 21 21 extern "C" { 22 23 22 void __cfaabi_appready_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_APPREADY ) )); 23 void __cfaabi_appready_startup( void ) { 24 24 tzset(); // initialize time global variables 25 25 setlocale( LC_NUMERIC, getenv("LANG") ); … … 28 28 heapAppStart(); 29 29 #endif // __CFA_DEBUG__ 30 30 } // __cfaabi_appready_startup 31 31 32 33 32 void __cfaabi_appready_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_APPREADY ) )); 33 void __cfaabi_appready_shutdown( void ) { 34 34 #ifdef __CFA_DEBUG__ 35 35 extern void heapAppStop(); 36 36 heapAppStop(); 37 37 #endif // __CFA_DEBUG__ 38 38 } // __cfaabi_appready_shutdown 39 39 40 void disable_interrupts() __attribute__(( weak )) {} 41 void enable_interrupts() __attribute__(( weak )) {} 40 void disable_interrupts() __attribute__(( weak )) {} 41 void enable_interrupts() __attribute__(( weak )) {} 42 43 44 extern void __cfaabi_interpose_startup( void ); 45 extern void __cfaabi_device_startup ( void ); 46 extern void __cfaabi_device_shutdown ( void ); 47 48 void __cfaabi_core_startup( void ) __attribute__(( constructor( STARTUP_PRIORITY_CORE ) )); 49 void __cfaabi_core_startup( void ) { 50 __cfaabi_interpose_startup(); 51 __cfaabi_device_startup(); 52 } // __cfaabi_core_startup 53 54 void __cfaabi_core_shutdown( void ) __attribute__(( destructor( STARTUP_PRIORITY_CORE ) )); 55 void __cfaabi_core_shutdown( void ) { 56 __cfaabi_device_shutdown(); 57 } // __cfaabi_core_shutdown 42 58 } // extern "C" 43 59
Note: See TracChangeset
for help on using the changeset viewer.