Changeset 74ec742 for libcfa/src
- Timestamp:
- May 20, 2022, 10:36:45 AM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 25fa20a
- Parents:
- 29d8c02 (diff), 7831e8fb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 51 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/Makefile.am
r29d8c02 r74ec742 33 33 # The built sources must not depend on the installed inst_headers_src 34 34 AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr -I$(srcdir)/concurrency $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@ 35 AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions - pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@35 AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -fvisibility=hidden -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@ 36 36 AM_CCASFLAGS = -g -Wall -Werror=return-type -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@ 37 37 CFACC = @CFACC@ … … 194 194 195 195 prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 196 ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c - o ${@}196 ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@} 197 197 198 198 prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@ 199 199 ${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \ 200 $(CFACOMPILE) -quiet -XCFA,-l ${<} -c - o ${@}200 $(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@} 201 201 202 202 concurrency/io/call.cfa: $(srcdir)/concurrency/io/call.cfa.in -
libcfa/src/algorithms/range_iterator.cfa
r29d8c02 r74ec742 20 20 #include <fstream.hfa> 21 21 22 void main(RangeIter & this) { 22 #include "bits/defs.hfa" 23 24 void main(RangeIter & this) libcfa_public { 23 25 for() { 24 26 this._start = -1; -
libcfa/src/assert.cfa
r29d8c02 r74ec742 19 19 #include <unistd.h> // STDERR_FILENO 20 20 #include "bits/debug.hfa" 21 #include "bits/defs.hfa" 21 22 22 23 extern "C" { … … 26 27 27 28 // called by macro assert in assert.h 28 void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) { 29 // would be cool to remove libcfa_public but it's needed for libcfathread 30 void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) libcfa_public { 29 31 __cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file ); 30 32 abort(); … … 32 34 33 35 // called by macro assertf 34 void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) { 36 // would be cool to remove libcfa_public but it's needed for libcfathread 37 void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) libcfa_public { 35 38 __cfaabi_bits_acquire(); 36 39 __cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file ); -
libcfa/src/bits/align.hfa
r29d8c02 r74ec742 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Nov 16 18:58:22 201913 // Update Count : 312 // Last Modified On : Fri Apr 29 19:14:43 2022 13 // Update Count : 4 14 14 // 15 15 // This library is free software; you can redistribute it and/or modify it … … 35 35 //#define libAlign() (sizeof(double)) 36 36 // gcc-7 uses xmms instructions, which require 16 byte alignment. 37 #define libAlign() ( 16)37 #define libAlign() (__BIGGEST_ALIGNMENT__) 38 38 39 39 // Check for power of 2 -
libcfa/src/bits/debug.cfa
r29d8c02 r74ec742 21 21 #include <unistd.h> 22 22 23 #include "bits/defs.hfa" 24 23 25 enum { buffer_size = 4096 }; 24 26 static char buffer[ buffer_size ]; 25 27 26 28 extern "C" { 27 void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) { 29 // would be cool to remove libcfa_public but it's needed for libcfathread 30 void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) libcfa_public { 28 31 // ensure all data is written 29 32 for ( int count = 0, retcode; count < len; count += retcode ) { … … 44 47 void __cfaabi_bits_release() __attribute__((__weak__)) {} 45 48 46 int __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) { 49 // would be cool to remove libcfa_public but it's needed for libcfathread 50 int __cfaabi_bits_print_safe ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) libcfa_public { 47 51 va_list args; 48 52 -
libcfa/src/bits/defs.hfa
r29d8c02 r74ec742 36 36 #define __cfa_dlink(x) struct { struct x * next; struct x * back; } __dlink_substitute 37 37 #endif 38 39 #define libcfa_public __attribute__((visibility("default"))) 38 40 39 41 #ifdef __cforall -
libcfa/src/bits/weakso_locks.cfa
r29d8c02 r74ec742 18 18 #include "bits/weakso_locks.hfa" 19 19 20 #pragma GCC visibility push(default) 21 20 22 void ?{}( blocking_lock &, bool, bool ) {} 21 23 void ^?{}( blocking_lock & ) {} -
libcfa/src/common.cfa
r29d8c02 r74ec742 18 18 #include <stdlib.h> // div_t, *div 19 19 20 #pragma GCC visibility push(default) 21 20 22 //--------------------------------------- 21 23 -
libcfa/src/concurrency/alarm.cfa
r29d8c02 r74ec742 141 141 //============================================================================================= 142 142 143 void sleep( Duration duration ) {143 void sleep( Duration duration ) libcfa_public { 144 144 alarm_node_t node = { active_thread(), duration, 0`s }; 145 145 -
libcfa/src/concurrency/clib/cfathread.cfa
r29d8c02 r74ec742 326 326 } 327 327 328 #pragma GCC visibility push(default) 329 328 330 //================================================================================ 329 331 // Main Api 330 332 extern "C" { 331 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {333 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public { 332 334 *cl = new(); 333 335 return 0; 334 336 } 335 337 336 cfathread_cluster_t cfathread_cluster_self(void) {338 cfathread_cluster_t cfathread_cluster_self(void) libcfa_public { 337 339 return active_cluster(); 338 340 } 339 341 340 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {342 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public { 341 343 #if !defined(__CFA_NO_STATISTICS__) 342 344 print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO ); -
libcfa/src/concurrency/coroutine.cfa
r29d8c02 r74ec742 48 48 //----------------------------------------------------------------------------- 49 49 forall(T &) 50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public { 51 51 dst->virtual_table = src->virtual_table; 52 52 dst->the_coroutine = src->the_coroutine; … … 55 55 56 56 forall(T &) 57 const char * msg(CoroutineCancelled(T) *) {57 const char * msg(CoroutineCancelled(T) *) libcfa_public { 58 58 return "CoroutineCancelled(...)"; 59 59 } … … 62 62 forall(T & | is_coroutine(T)) 63 63 void __cfaehm_cancelled_coroutine( 64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) libcfa_public { 65 65 verify( desc->cancellation ); 66 66 desc->state = Cancelled; … … 89 89 90 90 void __stack_prepare( __stack_info_t * this, size_t create_size ); 91 void __stack_clean ( __stack_info_t * this );91 static void __stack_clean ( __stack_info_t * this ); 92 92 93 93 //----------------------------------------------------------------------------- … … 114 114 } 115 115 116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) { 117 117 (this.context){0p, 0p}; 118 118 (this.stack){storage, storageSize}; … … 124 124 } 125 125 126 void ^?{}(coroutine$& this) {126 void ^?{}(coroutine$& this) libcfa_public { 127 127 if(this.state != Halted && this.state != Start && this.state != Primed) { 128 128 coroutine$ * src = active_coroutine(); … … 147 147 // Not inline since only ever called once per coroutine 148 148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); }) 149 void prime(T& cor) {149 void prime(T& cor) libcfa_public { 150 150 coroutine$* this = get_coroutine(cor); 151 151 assert(this->state == Start); … … 155 155 } 156 156 157 [void *, size_t] __stack_alloc( size_t storageSize ) {157 static [void *, size_t] __stack_alloc( size_t storageSize ) { 158 158 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 159 159 assert(__page_size != 0l); … … 193 193 } 194 194 195 void __stack_clean ( __stack_info_t * this ) {195 static void __stack_clean ( __stack_info_t * this ) { 196 196 void * storage = this->storage->limit; 197 197 … … 215 215 } 216 216 217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {217 void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public { 218 218 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 219 219 bool userStack; -
libcfa/src/concurrency/coroutine.hfa
r29d8c02 r74ec742 113 113 114 114 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 115 extern void __stack_clean ( __stack_info_t * this );116 117 115 118 116 // Suspend implementation inlined for performance -
libcfa/src/concurrency/exception.cfa
r29d8c02 r74ec742 64 64 extern "C" { 65 65 66 struct exception_context_t * this_exception_context(void) {66 struct exception_context_t * this_exception_context(void) libcfa_public { 67 67 return &__get_stack( active_coroutine() )->exception_context; 68 68 } 69 69 70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public { 71 71 _Unwind_Stop_Fn stop_func; 72 72 void * stop_param; -
libcfa/src/concurrency/invoke.c
r29d8c02 r74ec742 36 36 extern void enable_interrupts( _Bool poll ); 37 37 38 void __cfactx_invoke_coroutine(38 libcfa_public void __cfactx_invoke_coroutine( 39 39 void (*main)(void *), 40 40 void *this … … 70 70 } 71 71 72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));72 libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__)); 73 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) { 74 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); … … 77 77 } 78 78 79 void __cfactx_invoke_thread(79 libcfa_public void __cfactx_invoke_thread( 80 80 void (*main)(void *), 81 81 void *this … … 98 98 } 99 99 100 void __cfactx_start(100 libcfa_public void __cfactx_start( 101 101 void (*main)(void *), 102 102 struct coroutine$ * cor, -
libcfa/src/concurrency/io.cfa
r29d8c02 r74ec742 244 244 245 245 remote = true; 246 __STATS__( false, io.calls.helped++; )246 __STATS__( true, io.calls.helped++; ) 247 247 } 248 248 proc->io.target = MAX; … … 340 340 // for convenience, return both the index and the pointer to the sqe 341 341 // sqe == &sqes[idx] 342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public { 343 343 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want); 344 344 … … 419 419 } 420 420 421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public { 422 422 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); 423 423 -
libcfa/src/concurrency/io/call.cfa.in
r29d8c02 r74ec742 139 139 // I/O Interface 140 140 //============================================================================================= 141 #pragma GCC visibility push(default) 141 142 """ 142 143 -
libcfa/src/concurrency/io/setup.cfa
r29d8c02 r74ec742 26 26 27 27 #if !defined(CFA_HAVE_LINUX_IO_URING_H) 28 void ?{}(io_context_params & this) {}28 void ?{}(io_context_params & this) libcfa_public {} 29 29 30 30 void ?{}($io_context & this, struct cluster & cl) {} … … 66 66 #pragma GCC diagnostic pop 67 67 68 void ?{}(io_context_params & this) {68 void ?{}(io_context_params & this) libcfa_public { 69 69 this.num_entries = 256; 70 70 } -
libcfa/src/concurrency/kernel.cfa
r29d8c02 r74ec742 389 389 390 390 // KERNEL_ONLY 391 void returnToKernel() {391 static void returnToKernel() { 392 392 /* paranoid */ verify( ! __preemption_enabled() ); 393 393 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); … … 547 547 } 548 548 549 void unpark( thread$ * thrd, unpark_hint hint ) {549 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 550 550 if( !thrd ) return; 551 551 … … 558 558 } 559 559 560 void park( void ) {560 void park( void ) libcfa_public { 561 561 __disable_interrupts_checked(); 562 562 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); … … 601 601 602 602 // KERNEL ONLY 603 bool force_yield( __Preemption_Reason reason ) {603 bool force_yield( __Preemption_Reason reason ) libcfa_public { 604 604 __disable_interrupts_checked(); 605 605 thread$ * thrd = kernelTLS().this_thread; … … 849 849 //----------------------------------------------------------------------------- 850 850 // Debug 851 bool threading_enabled(void) __attribute__((const)) {851 bool threading_enabled(void) __attribute__((const)) libcfa_public { 852 852 return true; 853 853 } … … 856 856 // Statistics 857 857 #if !defined(__CFA_NO_STATISTICS__) 858 void print_halts( processor & this ) {858 void print_halts( processor & this ) libcfa_public { 859 859 this.print_halts = true; 860 860 } … … 873 873 } 874 874 875 void crawl_cluster_stats( cluster & this ) {875 static void crawl_cluster_stats( cluster & this ) { 876 876 // Stop the world, otherwise stats could get really messed-up 877 877 // this doesn't solve all problems but does solve many … … 889 889 890 890 891 void print_stats_now( cluster & this, int flags ) {891 void print_stats_now( cluster & this, int flags ) libcfa_public { 892 892 crawl_cluster_stats( this ); 893 893 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); -
libcfa/src/concurrency/kernel.hfa
r29d8c02 r74ec742 49 49 50 50 // Coroutine used py processors for the 2-step context switch 51 coroutine processorCtx_t { 51 52 struct processorCtx_t { 53 struct coroutine$ self; 52 54 struct processor * proc; 53 55 }; -
libcfa/src/concurrency/kernel/cluster.cfa
r29d8c02 r74ec742 49 49 50 50 // returns the maximum number of processors the RWLock support 51 __attribute__((weak)) unsigned __max_processors() {51 __attribute__((weak)) unsigned __max_processors() libcfa_public { 52 52 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); 53 53 if(!max_cores_s) { -
libcfa/src/concurrency/kernel/private.hfa
r29d8c02 r74ec742 109 109 //----------------------------------------------------------------------------- 110 110 // Processor 111 void main(processorCtx_t *); 111 void main(processorCtx_t &); 112 static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; } 112 113 113 114 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); -
libcfa/src/concurrency/kernel/startup.cfa
r29d8c02 r74ec742 120 120 #endif 121 121 122 cluster * mainCluster ;122 cluster * mainCluster libcfa_public; 123 123 processor * mainProcessor; 124 124 thread$ * mainThread; … … 169 169 }; 170 170 171 void ?{}( current_stack_info_t & this ) {171 static void ?{}( current_stack_info_t & this ) { 172 172 __stack_context_t ctx; 173 173 CtxGet( ctx ); … … 209 209 // Construct the processor context of the main processor 210 210 void ?{}(processorCtx_t & this, processor * proc) { 211 (this. __cor){ "Processor" };212 this. __cor.starter = 0p;211 (this.self){ "Processor" }; 212 this.self.starter = 0p; 213 213 this.proc = proc; 214 214 } … … 526 526 // Construct the processor context of non-main processors 527 527 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) { 528 (this. __cor){ info };528 (this.self){ info }; 529 529 this.proc = proc; 530 530 } … … 578 578 } 579 579 580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public { 581 581 ( this.terminated ){}; 582 582 ( this.runner ){}; … … 591 591 } 592 592 593 void ?{}(processor & this, const char name[], cluster & _cltr) {593 void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public { 594 594 (this){name, _cltr, 0p}; 595 595 } 596 596 597 597 extern size_t __page_size; 598 void ^?{}(processor & this) with( this ){598 void ^?{}(processor & this) libcfa_public with( this ) { 599 599 /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ); 600 600 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); … … 623 623 } 624 624 625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) { 626 626 this.name = name; 627 627 this.preemption_rate = preemption_rate; … … 658 658 } 659 659 660 void ^?{}(cluster & this) {660 void ^?{}(cluster & this) libcfa_public { 661 661 destroy(this.io.arbiter); 662 662 -
libcfa/src/concurrency/locks.cfa
r29d8c02 r74ec742 24 24 #include <stdlib.hfa> 25 25 26 #pragma GCC visibility push(default) 27 26 28 //----------------------------------------------------------------------------- 27 29 // info_thread … … 116 118 } 117 119 118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {120 static void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 119 121 thread$ * t = &try_pop_front( blocked_threads ); 120 122 owner = t; … … 192 194 void ^?{}( alarm_node_wrap(L) & this ) { } 193 195 194 void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {196 static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) { 195 197 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin. 196 198 lock( cond->lock __cfaabi_dbg_ctx2 ); … … 216 218 217 219 // this casts the alarm node to our wrapped type since we used type erasure 218 void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }220 static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); } 219 221 } 220 222 221 223 //----------------------------------------------------------------------------- 222 // condition variable224 // Synchronization Locks 223 225 forall(L & | is_blocking_lock(L)) { 224 226 227 //----------------------------------------------------------------------------- 228 // condition variable 225 229 void ?{}( condition_variable(L) & this ){ 226 230 this.lock{}; … … 231 235 void ^?{}( condition_variable(L) & this ){ } 232 236 233 void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {237 static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) { 234 238 if(&popped != 0p) { 235 239 popped.signalled = true; … … 276 280 int counter( condition_variable(L) & this ) with(this) { return count; } 277 281 278 s ize_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {282 static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) { 279 283 // add info_thread to waiting queue 280 284 insert_last( blocked_threads, *i ); … … 289 293 290 294 // helper for wait()'s' with no timeout 291 void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {295 static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 292 296 lock( lock __cfaabi_dbg_ctx2 ); 293 297 size_t recursion_count = queue_and_get_recursion(this, &i); … … 306 310 307 311 // helper for wait()'s' with a timeout 308 void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {312 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 309 313 lock( lock __cfaabi_dbg_ctx2 ); 310 314 size_t recursion_count = queue_and_get_recursion(this, &info); … … 337 341 bool wait( condition_variable(L) & this, L & l, Duration duration ) with(this) { WAIT_TIME( 0 , &l , duration ) } 338 342 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) } 343 344 //----------------------------------------------------------------------------- 345 // fast_cond_var 346 void ?{}( fast_cond_var(L) & this ){ 347 this.blocked_threads{}; 348 #ifdef __CFA_DEBUG__ 349 this.lock_used = 0p; 350 #endif 351 } 352 void ^?{}( fast_cond_var(L) & this ){ } 353 354 bool notify_one( fast_cond_var(L) & this ) with(this) { 355 bool ret = ! blocked_threads`isEmpty; 356 if ( ret ) { 357 info_thread(L) & popped = try_pop_front( blocked_threads ); 358 on_notify(*popped.lock, popped.t); 359 } 360 return ret; 361 } 362 bool notify_all( fast_cond_var(L) & this ) with(this) { 363 bool ret = ! blocked_threads`isEmpty; 364 while( ! blocked_threads`isEmpty ) { 365 info_thread(L) & popped = try_pop_front( blocked_threads ); 366 on_notify(*popped.lock, popped.t); 367 } 368 return ret; 369 } 370 371 uintptr_t front( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; } 372 bool empty ( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; } 373 374 void wait( fast_cond_var(L) & this, L & l ) { 375 wait( this, l, 0 ); 376 } 377 378 void wait( fast_cond_var(L) & this, L & l, uintptr_t info ) with(this) { 379 // brand cond lock with lock 380 #ifdef __CFA_DEBUG__ 381 if ( lock_used == 0p ) lock_used = &l; 382 else { assert(lock_used == &l); } 383 #endif 384 info_thread( L ) i = { active_thread(), info, &l }; 385 insert_last( blocked_threads, i ); 386 size_t recursion_count = on_wait( *i.lock ); 387 park( ); 388 on_wakeup(*i.lock, recursion_count); 389 } 339 390 } 340 391 -
libcfa/src/concurrency/locks.hfa
r29d8c02 r74ec742 73 73 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 74 74 75 //----------------------------------------------------------------------------- 76 // MCS Lock 75 77 struct mcs_node { 76 78 mcs_node * volatile next; … … 98 100 } 99 101 102 //----------------------------------------------------------------------------- 103 // Linear backoff Spinlock 100 104 struct linear_backoff_then_block_lock { 101 105 // Spin lock used for mutual exclusion … … 199 203 200 204 //----------------------------------------------------------------------------- 205 // Fast Block Lock 206 207 // High efficiency minimal blocking lock 208 // - No reacquire for cond var 209 // - No recursive acquisition 210 // - No ownership 211 struct fast_block_lock { 212 // Spin lock used for mutual exclusion 213 __spinlock_t lock; 214 215 // List of blocked threads 216 dlist( thread$ ) blocked_threads; 217 218 bool held:1; 219 }; 220 221 static inline void ?{}( fast_block_lock & this ) with(this) { 222 lock{}; 223 blocked_threads{}; 224 held = false; 225 } 226 static inline void ^?{}( fast_block_lock & this ) {} 227 static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void; 228 static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void; 229 230 // if this is called recursively IT WILL DEADLOCK!!!!! 231 static inline void lock(fast_block_lock & this) with(this) { 232 lock( lock __cfaabi_dbg_ctx2 ); 233 if (held) { 234 insert_last( blocked_threads, *active_thread() ); 235 unlock( lock ); 236 park( ); 237 return; 238 } 239 held = true; 240 unlock( lock ); 241 } 242 243 static inline void unlock(fast_block_lock & this) with(this) { 244 lock( lock __cfaabi_dbg_ctx2 ); 245 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); 246 thread$ * t = &try_pop_front( blocked_threads ); 247 held = ( t ? true : false ); 248 unpark( t ); 249 unlock( lock ); 250 } 251 252 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); } 253 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } 254 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } 255 256 //----------------------------------------------------------------------------- 201 257 // is_blocking_lock 202 258 trait is_blocking_lock(L & | sized(L)) { … … 226 282 // Synchronization Locks 227 283 forall(L & | is_blocking_lock(L)) { 284 285 //----------------------------------------------------------------------------- 286 // condition_variable 287 288 // The multi-tool condition variable 289 // - can pass timeouts to wait for either a signal or timeout 290 // - can wait without passing a lock 291 // - can have waiters reacquire different locks while waiting on the same cond var 292 // - has shadow queue 293 // - can be signalled outside of critical sections with no locks held 228 294 struct condition_variable { 229 295 // Spin lock used for mutual exclusion … … 258 324 bool wait( condition_variable(L) & this, L & l, Duration duration ); 259 325 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ); 260 } 326 327 //----------------------------------------------------------------------------- 328 // fast_cond_var 329 330 // The trimmed and slim condition variable 331 // - no internal lock so you must hold a lock while using this cond var 332 // - signalling without holding branded lock is UNSAFE! 333 // - only allows usage of one lock, cond var is branded after usage 334 struct fast_cond_var { 335 // List of blocked threads 336 dlist( info_thread(L) ) blocked_threads; 337 338 #ifdef __CFA_DEBUG__ 339 L * lock_used; 340 #endif 341 }; 342 343 344 void ?{}( fast_cond_var(L) & this ); 345 void ^?{}( fast_cond_var(L) & this ); 346 347 bool notify_one( fast_cond_var(L) & this ); 348 bool notify_all( fast_cond_var(L) & this ); 349 350 uintptr_t front( fast_cond_var(L) & this ); 351 352 bool empty ( fast_cond_var(L) & this ); 353 354 void wait( fast_cond_var(L) & this, L & l ); 355 void wait( fast_cond_var(L) & this, L & l, uintptr_t info ); 356 } -
libcfa/src/concurrency/monitor.cfa
r29d8c02 r74ec742 44 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 45 46 static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 47 static inline void ?{}(__condition_criterion_t & this ); 48 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 49 46 50 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 51 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 243 247 244 248 // Leave single monitor 245 void __leave( monitor$ * this ) {249 static void __leave( monitor$ * this ) { 246 250 // Lock the monitor spinlock 247 251 lock( this->lock __cfaabi_dbg_ctx2 ); … … 278 282 279 283 // Leave single monitor for the last time 280 void __dtor_leave( monitor$ * this, bool join ) {284 static void __dtor_leave( monitor$ * this, bool join ) { 281 285 __cfaabi_dbg_debug_do( 282 286 if( active_thread() != this->owner ) { … … 344 348 // Ctor for monitor guard 345 349 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {350 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public { 347 351 thread$ * thrd = active_thread(); 348 352 … … 369 373 } 370 374 371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {375 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public { 372 376 this{ m, count, 0p }; 373 377 } … … 375 379 376 380 // Dtor for monitor guard 377 void ^?{}( monitor_guard_t & this ) {381 void ^?{}( monitor_guard_t & this ) libcfa_public { 378 382 // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count); 379 383 … … 389 393 // Ctor for monitor guard 390 394 // Sorts monitors before entering 391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {395 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public { 392 396 // optimization 393 397 thread$ * thrd = active_thread(); … … 409 413 410 414 // Dtor for monitor guard 411 void ^?{}( monitor_dtor_guard_t & this ) {415 void ^?{}( monitor_dtor_guard_t & this ) libcfa_public { 412 416 // Leave the monitors in order 413 417 __dtor_leave( this.m, this.join ); … … 419 423 //----------------------------------------------------------------------------- 420 424 // Internal scheduling types 421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {425 static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 422 426 this.waiting_thread = waiting_thread; 423 427 this.count = count; … … 426 430 } 427 431 428 void ?{}(__condition_criterion_t & this ) with( this ) {432 static void ?{}(__condition_criterion_t & this ) with( this ) { 429 433 ready = false; 430 434 target = 0p; … … 433 437 } 434 438 435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {439 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 436 440 this.ready = false; 437 441 this.target = target; … … 442 446 //----------------------------------------------------------------------------- 443 447 // Internal scheduling 444 void wait( condition & this, uintptr_t user_info = 0 ) {448 void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public { 445 449 brand_condition( this ); 446 450 … … 496 500 } 497 501 498 bool signal( condition & this ) {502 bool signal( condition & this ) libcfa_public { 499 503 if( is_empty( this ) ) { return false; } 500 504 … … 538 542 } 539 543 540 bool signal_block( condition & this ) {544 bool signal_block( condition & this ) libcfa_public { 541 545 if( !this.blocked.head ) { return false; } 542 546 … … 586 590 587 591 // Access the user_info of the thread waiting at the front of the queue 588 uintptr_t front( condition & this ) {592 uintptr_t front( condition & this ) libcfa_public { 589 593 verifyf( !is_empty(this), 590 594 "Attempt to access user data on an empty condition.\n" … … 608 612 // setup mask 609 613 // block 610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {614 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public { 611 615 // This statment doesn't have a contiguous list of monitors... 612 616 // Create one! … … 994 998 // Can't be accepted since a mutex stmt is effectively an anonymous routine 995 999 // Thus we do not need a monitor group 996 void lock( monitor$ * this ) {1000 void lock( monitor$ * this ) libcfa_public { 997 1001 thread$ * thrd = active_thread(); 998 1002 … … 1046 1050 // Leave routine for mutex stmt 1047 1051 // Is just a wrapper around __leave for the is_lock trait to see 1048 void unlock( monitor$ * this ) { __leave( this ); }1052 void unlock( monitor$ * this ) libcfa_public { __leave( this ); } 1049 1053 1050 1054 // Local Variables: // -
libcfa/src/concurrency/monitor.hfa
r29d8c02 r74ec742 119 119 } 120 120 121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );122 void ?{}(__condition_criterion_t & this );123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );121 // void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 122 // void ?{}(__condition_criterion_t & this ); 123 // void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 124 124 125 125 struct condition { -
libcfa/src/concurrency/preemption.cfa
r29d8c02 r74ec742 38 38 #endif 39 39 40 __attribute__((weak)) Duration default_preemption() {40 __attribute__((weak)) Duration default_preemption() libcfa_public { 41 41 const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); 42 42 if(!preempt_rate_s) { … … 238 238 //---------- 239 239 // special case for preemption since used often 240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public { 241 241 // create a assembler label before 242 242 // marked as clobber all to avoid movement … … 276 276 // Get data from the TLS block 277 277 // struct asm_region __cfaasm_get; 278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__ )); //no inline to avoid problems278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems 279 279 uintptr_t __cfatls_get( unsigned long int offset ) { 280 280 // create a assembler label before … … 295 295 extern "C" { 296 296 // Disable interrupts by incrementing the counter 297 void disable_interrupts(){297 __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public { 298 298 // create a assembler label before 299 299 // marked as clobber all to avoid movement … … 326 326 // Enable interrupts by decrementing the counter 327 327 // If counter reaches 0, execute any pending __cfactx_switch 328 void enable_interrupts( bool poll ) {328 void enable_interrupts( bool poll ) libcfa_public { 329 329 // Cache the processor now since interrupts can start happening after the atomic store 330 330 processor * proc = __cfaabi_tls.this_processor; … … 362 362 //----------------------------------------------------------------------------- 363 363 // Kernel Signal Debug 364 void __cfaabi_check_preemption() {364 void __cfaabi_check_preemption() libcfa_public { 365 365 bool ready = __preemption_enabled(); 366 366 if(!ready) { abort("Preemption should be ready"); } -
libcfa/src/concurrency/ready_subqueue.hfa
r29d8c02 r74ec742 83 83 /* paranoid */ verify( node->link.ts != 0 ); 84 84 /* paranoid */ verify( this.anchor.ts != 0 ); 85 /* paranoid */ verify( (this.anchor.ts == MAX) == is_empty ); 85 86 return [node, this.anchor.ts]; 86 87 } … … 93 94 // Return the timestamp 94 95 static inline unsigned long long ts(__intrusive_lane_t & this) { 95 // Cannot verify here since it may not be locked96 // Cannot verify 'emptiness' here since it may not be locked 96 97 /* paranoid */ verify(this.anchor.ts != 0); 97 98 return this.anchor.ts; -
libcfa/src/concurrency/thread.cfa
r29d8c02 r74ec742 26 26 27 27 extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask; 28 29 #pragma GCC visibility push(default) 28 30 29 31 //----------------------------------------------------------------------------- -
libcfa/src/containers/maybe.cfa
r29d8c02 r74ec742 17 17 #include <assert.h> 18 18 19 #pragma GCC visibility push(default) 19 20 20 21 forall(T) -
libcfa/src/containers/result.cfa
r29d8c02 r74ec742 17 17 #include <assert.h> 18 18 19 #pragma GCC visibility push(default) 19 20 20 21 forall(T, E) -
libcfa/src/containers/string.cfa
r29d8c02 r74ec742 18 18 #include <stdlib.hfa> 19 19 20 #pragma GCC visibility push(default) 20 21 21 22 /* -
libcfa/src/containers/string_sharectx.hfa
r29d8c02 r74ec742 16 16 #pragma once 17 17 18 #pragma GCC visibility push(default) 19 18 20 //######################### String Sharing Context ######################### 19 21 20 22 struct VbyteHeap; 21 23 22 // A string_sharectx 24 // A string_sharectx 23 25 // 24 26 // Usage: -
libcfa/src/containers/vector.cfa
r29d8c02 r74ec742 18 18 #include <stdlib.hfa> 19 19 20 #pragma GCC visibility push(default) 21 20 22 forall(T, allocator_t | allocator_c(T, allocator_t)) 21 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);23 static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other); 22 24 23 25 //------------------------------------------------------------------------------ … … 83 85 84 86 forall(T, allocator_t | allocator_c(T, allocator_t)) 85 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)87 static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other) 86 88 { 87 89 this->size = other->size; -
libcfa/src/device/cpu.cfa
r29d8c02 r74ec742 31 31 } 32 32 33 #include "bits/defs.hfa" 33 34 #include "algorithms/range_iterator.hfa" 34 35 … … 456 457 } 457 458 458 cpu_info_t cpu_info;459 libcfa_public cpu_info_t cpu_info; -
libcfa/src/exception.c
r29d8c02 r74ec742 27 27 #include "stdhdr/assert.h" 28 28 #include "virtual.h" 29 30 #pragma GCC visibility push(default) 31 29 32 #include "lsda.h" 30 33 … … 261 264 #else // defined( __ARM_ARCH ) 262 265 // The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack. 263 // This workaround tries to keep default exception handling working. 266 // This workaround tries to keep default exception handling working. 264 267 if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) { 265 268 #endif -
libcfa/src/fstream.cfa
r29d8c02 r74ec742 22 22 #include <assert.h> 23 23 #include <errno.h> // errno 24 25 #pragma GCC visibility push(default) 24 26 25 27 // *********************************** ofstream *********************************** … … 118 120 // abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno ); 119 121 } // if 120 (os){ file }; // initialize 122 (os){ file }; // initialize 121 123 } // open 122 124 … … 157 159 va_list args; 158 160 va_start( args, format ); 159 161 160 162 int len; 161 163 for ( cnt; 10 ) { … … 241 243 // abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno ); 242 244 } // if 243 (is){ file }; // initialize 245 (is){ file }; // initialize 244 246 } // open 245 247 -
libcfa/src/fstream.hfa
r29d8c02 r74ec742 18 18 #include "bits/weakso_locks.hfa" // mutex_lock 19 19 #include "iostream.hfa" 20 #include <exception.hfa>21 20 22 21 -
libcfa/src/heap.cfa
r29d8c02 r74ec742 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 25 18:51:36202213 // Update Count : 11 4712 // Last Modified On : Fri Apr 29 19:05:03 2022 13 // Update Count : 1167 14 14 // 15 15 … … 36 36 static bool traceHeap = false; 37 37 38 inline bool traceHeap() { return traceHeap; }39 40 bool traceHeapOn() {38 inline bool traceHeap() libcfa_public { return traceHeap; } 39 40 bool traceHeapOn() libcfa_public { 41 41 bool temp = traceHeap; 42 42 traceHeap = true; … … 44 44 } // traceHeapOn 45 45 46 bool traceHeapOff() {46 bool traceHeapOff() libcfa_public { 47 47 bool temp = traceHeap; 48 48 traceHeap = false; … … 50 50 } // traceHeapOff 51 51 52 bool traceHeapTerm() { return false; }52 bool traceHeapTerm() libcfa_public { return false; } 53 53 54 54 55 55 static bool prtFree = false; 56 56 57 bool prtFree() {57 static bool prtFree() { 58 58 return prtFree; 59 59 } // prtFree 60 60 61 bool prtFreeOn() {61 static bool prtFreeOn() { 62 62 bool temp = prtFree; 63 63 prtFree = true; … … 65 65 } // prtFreeOn 66 66 67 bool prtFreeOff() {67 static bool prtFreeOff() { 68 68 bool temp = prtFree; 69 69 prtFree = false; … … 87 87 88 88 89 #ifdef __CFA_DEBUG__ 90 static size_t allocUnfreed; // running total of allocations minus frees 91 92 static void prtUnfreed() { 93 if ( allocUnfreed != 0 ) { 94 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 95 char helpText[512]; 96 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 97 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 98 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 99 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 100 } // if 101 } // prtUnfreed 102 103 extern int cfa_main_returned; // from interpose.cfa 104 extern "C" { 105 void heapAppStart() { // called by __cfaabi_appready_startup 106 allocUnfreed = 0; 107 } // heapAppStart 108 109 void heapAppStop() { // called by __cfaabi_appready_startdown 110 fclose( stdin ); fclose( stdout ); 111 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 112 } // heapAppStop 113 } // extern "C" 114 #endif // __CFA_DEBUG__ 115 116 117 // statically allocated variables => zero filled. 118 static size_t heapExpand; // sbrk advance 119 static size_t mmapStart; // cross over point for mmap 120 static unsigned int maxBucketsUsed; // maximum number of buckets in use 121 // extern visibility, used by runtime kernel 122 size_t __page_size; // architecture pagesize 123 int __map_prot; // common mmap/mprotect protection 124 125 126 #define SPINLOCK 0 127 #define LOCKFREE 1 128 #define BUCKETLOCK SPINLOCK 129 #if BUCKETLOCK == SPINLOCK 130 #elif BUCKETLOCK == LOCKFREE 131 #include <stackLockFree.hfa> 132 #else 133 #error undefined lock type for bucket lock 134 #endif // LOCKFREE 135 136 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 137 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 138 enum { NoBucketSizes = 91 }; // number of buckets sizes 139 140 struct Heap { 141 struct Storage { 142 struct Header { // header 143 union Kind { 144 struct RealHeader { 145 union { 146 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 147 union { 148 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 149 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 150 void * home; // allocated block points back to home locations (must overlay alignment) 151 size_t blockSize; // size for munmap (must overlay alignment) 152 #if BUCKETLOCK == SPINLOCK 153 Storage * next; // freed block points to next freed block of same size 154 #endif // SPINLOCK 155 }; 156 size_t size; // allocation size in bytes 157 }; 158 #if BUCKETLOCK == LOCKFREE 159 Link(Storage) next; // freed block points next freed block of same size (double-wide) 160 #endif // LOCKFREE 161 }; 162 } real; // RealHeader 163 164 struct FakeHeader { 165 uintptr_t alignment; // 1st low-order bit => fake header & alignment 166 uintptr_t offset; 167 } fake; // FakeHeader 168 } kind; // Kind 169 } header; // Header 170 171 char pad[libAlign() - sizeof( Header )]; 172 char data[0]; // storage 173 }; // Storage 174 175 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 176 177 struct FreeHeader { 178 #if BUCKETLOCK == SPINLOCK 179 __spinlock_t lock; // must be first field for alignment 180 Storage * freeList; 181 #else 182 StackLF(Storage) freeList; 183 #endif // BUCKETLOCK 184 size_t blockSize; // size of allocations on this list 185 }; // FreeHeader 186 187 // must be first fields for alignment 188 __spinlock_t extlock; // protects allocation-buffer extension 189 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 190 191 void * heapBegin; // start of heap 192 void * heapEnd; // logical end of heap 193 size_t heapRemaining; // amount of storage not allocated in the current chunk 194 }; // Heap 195 196 #if BUCKETLOCK == LOCKFREE 197 static inline { 198 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 199 void ?{}( Heap.FreeHeader & ) {} 200 void ^?{}( Heap.FreeHeader & ) {} 201 } // distribution 202 #endif // LOCKFREE 203 204 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 205 206 207 #ifdef FASTLOOKUP 208 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 209 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 210 #endif // FASTLOOKUP 211 212 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 213 #ifdef __CFA_DEBUG__ 214 static bool heapBoot = 0; // detect recursion during boot 215 #endif // __CFA_DEBUG__ 216 217 218 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. 219 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 220 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 221 static const unsigned int bucketSizes[] @= { // different bucket sizes 222 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 223 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 224 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 225 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 226 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 227 1_536, 2_048 + sizeof(Heap.Storage), // 2 228 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 229 6_144, 8_192 + sizeof(Heap.Storage), // 2 230 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 231 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 232 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 233 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 234 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 235 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 236 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 237 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 238 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 239 }; 240 241 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 242 243 // The constructor for heapManager is called explicitly in memory_startup. 244 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 245 246 247 //####################### Memory Allocation Routines Helpers #################### 89 //####################### Heap Statistics #################### 248 90 249 91 … … 307 149 return lhs; 308 150 } // ?+=? 309 151 #endif // __STATISTICS__ 152 153 154 #define SPINLOCK 0 155 #define LOCKFREE 1 156 #define BUCKETLOCK SPINLOCK 157 #if BUCKETLOCK == SPINLOCK 158 #elif BUCKETLOCK == LOCKFREE 159 #include <stackLockFree.hfa> 160 #else 161 #error undefined lock type for bucket lock 162 #endif // LOCKFREE 163 164 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 165 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 166 enum { NoBucketSizes = 91 }; // number of buckets sizes 167 168 struct Heap { 169 struct Storage { 170 struct Header { // header 171 union Kind { 172 struct RealHeader { 173 union { 174 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 175 union { 176 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 177 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 178 void * home; // allocated block points back to home locations (must overlay alignment) 179 size_t blockSize; // size for munmap (must overlay alignment) 180 #if BUCKETLOCK == SPINLOCK 181 Storage * next; // freed block points to next freed block of same size 182 #endif // SPINLOCK 183 }; 184 size_t size; // allocation size in bytes 185 }; 186 #if BUCKETLOCK == LOCKFREE 187 Link(Storage) next; // freed block points next freed block of same size (double-wide) 188 #endif // LOCKFREE 189 }; 190 } real; // RealHeader 191 192 struct FakeHeader { 193 uintptr_t alignment; // 1st low-order bit => fake header & alignment 194 uintptr_t offset; 195 } fake; // FakeHeader 196 } kind; // Kind 197 } header; // Header 198 199 char pad[libAlign() - sizeof( Header )]; 200 char data[0]; // storage 201 }; // Storage 202 203 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 204 205 struct FreeHeader { 206 size_t blockSize __attribute__(( aligned (8) )); // size of allocations on this list 207 #if BUCKETLOCK == SPINLOCK 208 __spinlock_t lock; 209 Storage * freeList; 210 #else 211 StackLF(Storage) freeList; 212 #endif // BUCKETLOCK 213 } __attribute__(( aligned (8) )); // FreeHeader 214 215 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 216 217 __spinlock_t extlock; // protects allocation-buffer extension 218 void * heapBegin; // start of heap 219 void * heapEnd; // logical end of heap 220 size_t heapRemaining; // amount of storage not allocated in the current chunk 221 }; // Heap 222 223 #if BUCKETLOCK == LOCKFREE 224 static inline { 225 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 226 void ?{}( Heap.FreeHeader & ) {} 227 void ^?{}( Heap.FreeHeader & ) {} 228 } // distribution 229 #endif // LOCKFREE 230 231 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 232 233 234 #ifdef FASTLOOKUP 235 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 236 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 237 #endif // FASTLOOKUP 238 239 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 240 #ifdef __CFA_DEBUG__ 241 static bool heapBoot = 0; // detect recursion during boot 242 #endif // __CFA_DEBUG__ 243 244 245 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. 246 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 247 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 248 static const unsigned int bucketSizes[] @= { // different bucket sizes 249 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 250 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 251 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 252 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 253 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 254 1_536, 2_048 + sizeof(Heap.Storage), // 2 255 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 256 6_144, 8_192 + sizeof(Heap.Storage), // 2 257 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 258 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 259 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 260 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 261 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 262 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 263 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 264 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 265 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 266 }; 267 268 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 269 270 // The constructor for heapManager is called explicitly in memory_startup. 271 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 272 273 274 //####################### Memory Allocation Routines Helpers #################### 275 276 277 #ifdef __CFA_DEBUG__ 278 static size_t allocUnfreed; // running total of allocations minus frees 279 280 static void prtUnfreed() { 281 if ( allocUnfreed != 0 ) { 282 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 283 char helpText[512]; 284 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 285 "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 286 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 287 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 288 } // if 289 } // prtUnfreed 290 291 extern int cfa_main_returned; // from interpose.cfa 292 extern "C" { 293 void heapAppStart() { // called by __cfaabi_appready_startup 294 allocUnfreed = 0; 295 } // heapAppStart 296 297 void heapAppStop() { // called by __cfaabi_appready_startdown 298 fclose( stdin ); fclose( stdout ); 299 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 300 } // heapAppStop 301 } // extern "C" 302 #endif // __CFA_DEBUG__ 303 304 305 #ifdef __STATISTICS__ 310 306 static HeapStatistics stats; // zero filled 311 307 static unsigned int sbrk_calls; … … 387 383 388 384 385 // statically allocated variables => zero filled. 386 static size_t heapExpand; // sbrk advance 387 static size_t mmapStart; // cross over point for mmap 388 static unsigned int maxBucketsUsed; // maximum number of buckets in use 389 // extern visibility, used by runtime kernel 390 // would be cool to remove libcfa_public but it's needed for libcfathread 391 libcfa_public size_t __page_size; // architecture pagesize 392 libcfa_public int __map_prot; // common mmap/mprotect protection 393 394 389 395 // thunk problem 390 396 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { … … 490 496 } else { 491 497 fakeHeader( header, alignment ); 492 if ( unlikely( MmappedBit( header ) ) ) { 493 assert( addr < heapBegin || heapEnd < addr );498 if ( unlikely( MmappedBit( header ) ) ) { // mmapped ? 499 verify( addr < heapBegin || heapEnd < addr ); 494 500 size = ClearStickyBits( header->kind.real.blockSize ); // mmap size 495 501 return true; … … 503 509 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 504 510 505 if ( freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) { 506 abort( "Attempt to %s storage %p with corrupted header.\n" 507 "Possible cause is duplicate free on same block or overwriting of header information.", 508 name, addr ); 509 } // if 511 Heap * homeManager; 512 if ( unlikely( freeHead == 0p || // freed and only free-list node => null link 513 // freed and link points at another free block not to a bucket in the bucket array. 514 freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) { 515 abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" 516 "Possible cause is duplicate free on same block or overwriting of header information.", 517 name, addr ); 518 } // if 510 519 #endif // __CFA_DEBUG__ 511 520 … … 560 569 sbrk_storage += increase; 561 570 #endif // __STATISTICS__ 571 562 572 #ifdef __CFA_DEBUG__ 563 573 // Set new memory to garbage so subsequent uninitialized usages might fail. … … 565 575 //Memset( (char *)heapEnd + heapRemaining, increase ); 566 576 #endif // __CFA_DEBUG__ 577 567 578 rem = heapRemaining + increase - size; 568 579 } // if … … 651 662 __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST ); 652 663 if ( traceHeap() ) { 653 enum { BufferSize = 64 }; 654 char helpText[BufferSize]; 655 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 656 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 664 char helpText[64]; 665 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 666 "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug 657 667 } // if 658 668 #endif // __CFA_DEBUG__ … … 711 721 if ( traceHeap() ) { 712 722 char helpText[64]; 713 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );714 __cfaabi_bits_write( STDERR_FILENO, helpText, len); // print debug/nodebug723 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 724 "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug 715 725 } // if 716 726 #endif // __CFA_DEBUG__ … … 718 728 719 729 720 s ize_t prtFree( Heap & manager ) with( manager ) {730 static size_t prtFree( Heap & manager ) with( manager ) { 721 731 size_t total = 0; 722 732 #ifdef __STATISTICS__ … … 870 880 // Allocates size bytes and returns a pointer to the allocated memory. The contents are undefined. If size is 0, 871 881 // then malloc() returns a unique pointer value that can later be successfully passed to free(). 872 void * malloc( size_t size ) {882 void * malloc( size_t size ) libcfa_public { 873 883 #ifdef __STATISTICS__ 874 884 if ( likely( size > 0 ) ) { … … 885 895 886 896 // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes. 887 void * aalloc( size_t dim, size_t elemSize ) {897 void * aalloc( size_t dim, size_t elemSize ) libcfa_public { 888 898 size_t size = dim * elemSize; 889 899 #ifdef __STATISTICS__ … … 901 911 902 912 // Same as aalloc() with memory set to zero. 903 void * calloc( size_t dim, size_t elemSize ) {913 void * calloc( size_t dim, size_t elemSize ) libcfa_public { 904 914 size_t size = dim * elemSize; 905 915 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER … … 942 952 // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier 943 953 // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done. 944 void * resize( void * oaddr, size_t size ) {954 void * resize( void * oaddr, size_t size ) libcfa_public { 945 955 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 946 956 if ( unlikely( size == 0 ) ) { // special cases … … 987 997 // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of 988 998 // the old and new sizes. 989 void * realloc( void * oaddr, size_t size ) {999 void * realloc( void * oaddr, size_t size ) libcfa_public { 990 1000 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 991 1001 if ( unlikely( size == 0 ) ) { // special cases … … 1051 1061 1052 1062 // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize. 1053 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) {1063 void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public { 1054 1064 return realloc( oaddr, dim * elemSize ); 1055 1065 } // reallocarray … … 1057 1067 1058 1068 // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete) 1059 void * memalign( size_t alignment, size_t size ) {1069 void * memalign( size_t alignment, size_t size ) libcfa_public { 1060 1070 #ifdef __STATISTICS__ 1061 1071 if ( likely( size > 0 ) ) { … … 1072 1082 1073 1083 // Same as aalloc() with memory alignment. 1074 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) {1084 void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1075 1085 size_t size = dim * elemSize; 1076 1086 #ifdef __STATISTICS__ … … 1088 1098 1089 1099 // Same as calloc() with memory alignment. 1090 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) {1100 void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public { 1091 1101 size_t size = dim * elemSize; 1092 1102 if ( unlikely( size ) == 0 ) { // 0 BYTE ALLOCATION RETURNS NULL POINTER … … 1127 1137 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple 1128 1138 // of alignment. This requirement is universally ignored. 1129 void * aligned_alloc( size_t alignment, size_t size ) {1139 void * aligned_alloc( size_t alignment, size_t size ) libcfa_public { 1130 1140 return memalign( alignment, size ); 1131 1141 } // aligned_alloc … … 1136 1146 // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to 1137 1147 // free(3). 1138 int posix_memalign( void ** memptr, size_t alignment, size_t size ) {1148 int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public { 1139 1149 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment 1140 1150 *memptr = memalign( alignment, size ); … … 1145 1155 // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the 1146 1156 // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). 1147 void * valloc( size_t size ) {1157 void * valloc( size_t size ) libcfa_public { 1148 1158 return memalign( __page_size, size ); 1149 1159 } // valloc … … 1151 1161 1152 1162 // Same as valloc but rounds size to multiple of page size. 1153 void * pvalloc( size_t size ) {1163 void * pvalloc( size_t size ) libcfa_public { 1154 1164 return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size 1155 1165 } // pvalloc … … 1159 1169 // or realloc(). Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is 1160 1170 // 0p, no operation is performed. 1161 void free( void * addr ) {1171 void free( void * addr ) libcfa_public { 1162 1172 if ( unlikely( addr == 0p ) ) { // special case 1163 1173 #ifdef __STATISTICS__ … … 1180 1190 1181 1191 // Returns the alignment of an allocation. 1182 size_t malloc_alignment( void * addr ) {1192 size_t malloc_alignment( void * addr ) libcfa_public { 1183 1193 if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment 1184 1194 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1192 1202 1193 1203 // Returns true if the allocation is zero filled, e.g., allocated by calloc(). 1194 bool malloc_zero_fill( void * addr ) {1204 bool malloc_zero_fill( void * addr ) libcfa_public { 1195 1205 if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill 1196 1206 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1203 1213 1204 1214 // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T). 1205 size_t malloc_size( void * addr ) {1215 size_t malloc_size( void * addr ) libcfa_public { 1206 1216 if ( unlikely( addr == 0p ) ) return 0; // null allocation has zero size 1207 1217 Heap.Storage.Header * header = HeaderAddr( addr ); … … 1215 1225 // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by 1216 1226 // malloc or a related function. 1217 size_t malloc_usable_size( void * addr ) {1227 size_t malloc_usable_size( void * addr ) libcfa_public { 1218 1228 if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size 1219 1229 Heap.Storage.Header * header; … … 1227 1237 1228 1238 // Prints (on default standard error) statistics about memory allocated by malloc and related functions. 1229 void malloc_stats( void ) {1239 void malloc_stats( void ) libcfa_public { 1230 1240 #ifdef __STATISTICS__ 1231 1241 printStats(); … … 1236 1246 1237 1247 // Changes the file descriptor where malloc_stats() writes statistics. 1238 int malloc_stats_fd( int fd __attribute__(( unused )) ) {1248 int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public { 1239 1249 #ifdef __STATISTICS__ 1240 1250 int temp = stats_fd; … … 1250 1260 // The string is printed on the file stream stream. The exported string includes information about all arenas (see 1251 1261 // malloc). 1252 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {1262 int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public { 1253 1263 if ( options != 0 ) { errno = EINVAL; return -1; } 1254 1264 #ifdef __STATISTICS__ … … 1262 1272 // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument 1263 1273 // specifies the parameter to be modified, and value specifies the new value for that parameter. 1264 int mallopt( int option, int value ) {1274 int mallopt( int option, int value ) libcfa_public { 1265 1275 if ( value < 0 ) return 0; 1266 1276 choose( option ) { … … 1276 1286 1277 1287 // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument). 1278 int malloc_trim( size_t ) {1288 int malloc_trim( size_t ) libcfa_public { 1279 1289 return 0; // => impossible to release memory 1280 1290 } // malloc_trim … … 1285 1295 // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function 1286 1296 // result. (The caller must free this memory.) 1287 void * malloc_get_state( void ) {1297 void * malloc_get_state( void ) libcfa_public { 1288 1298 return 0p; // unsupported 1289 1299 } // malloc_get_state … … 1292 1302 // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data 1293 1303 // structure pointed to by state. 1294 int malloc_set_state( void * ) {1304 int malloc_set_state( void * ) libcfa_public { 1295 1305 return 0; // unsupported 1296 1306 } // malloc_set_state … … 1298 1308 1299 1309 // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation. 1300 __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; }1310 __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; } 1301 1311 1302 1312 // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped. 1303 __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; }1313 __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; } 1304 1314 1305 1315 // Amount subtracted to adjust for unfreed program storage (debug only). 1306 __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; }1316 __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; } 1307 1317 } // extern "C" 1308 1318 1309 1319 1310 1320 // Must have CFA linkage to overload with C linkage realloc. 1311 void * resize( void * oaddr, size_t nalign, size_t size ) {1321 void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1312 1322 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1313 1323 if ( unlikely( size == 0 ) ) { // special cases … … 1371 1381 1372 1382 1373 void * realloc( void * oaddr, size_t nalign, size_t size ) {1383 void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public { 1374 1384 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1375 1385 if ( unlikely( size == 0 ) ) { // special cases -
libcfa/src/interpose.cfa
r29d8c02 r74ec742 36 36 //============================================================================================= 37 37 38 void preload_libgcc(void) {38 static void preload_libgcc(void) { 39 39 dlopen( "libgcc_s.so.1", RTLD_NOW ); 40 40 if ( const char * error = dlerror() ) abort( "interpose_symbol : internal error pre-loading libgcc, %s\n", error ); … … 42 42 43 43 typedef void (* generic_fptr_t)(void); 44 generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {44 static generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) { 45 45 const char * error; 46 46 … … 83 83 //============================================================================================= 84 84 85 void sigHandler_segv( __CFA_SIGPARMS__ );86 void sigHandler_ill ( __CFA_SIGPARMS__ );87 void sigHandler_fpe ( __CFA_SIGPARMS__ );88 void sigHandler_abrt( __CFA_SIGPARMS__ );89 void sigHandler_term( __CFA_SIGPARMS__ );90 91 st ruct {85 static void sigHandler_segv( __CFA_SIGPARMS__ ); 86 static void sigHandler_ill ( __CFA_SIGPARMS__ ); 87 static void sigHandler_fpe ( __CFA_SIGPARMS__ ); 88 static void sigHandler_abrt( __CFA_SIGPARMS__ ); 89 static void sigHandler_term( __CFA_SIGPARMS__ ); 90 91 static struct { 92 92 void (* exit)( int ) __attribute__(( __noreturn__ )); 93 93 void (* abort)( void ) __attribute__(( __noreturn__ )); 94 94 } __cabi_libc; 95 95 96 int cfa_main_returned;96 libcfa_public int cfa_main_returned; 97 97 98 98 extern "C" { … … 148 148 149 149 // Forward declare abort after the __typeof__ call to avoid ambiguities 150 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));151 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));152 void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));153 void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));150 libcfa_public void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 151 libcfa_public void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 152 libcfa_public void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 153 libcfa_public void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )); 154 154 155 155 extern "C" { 156 void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {156 libcfa_public void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { 157 157 abort( false, "%s", "" ); 158 158 } 159 159 160 void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {160 libcfa_public void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) { 161 161 va_list argp; 162 162 va_start( argp, fmt ); … … 165 165 } 166 166 167 void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {167 libcfa_public void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { 168 168 __cabi_libc.exit( status ); 169 169 } -
libcfa/src/iostream.cfa
r29d8c02 r74ec742 32 32 #include "bitmanip.hfa" // high1 33 33 34 #pragma GCC visibility push(default) 34 35 35 36 // *********************************** ostream *********************************** -
libcfa/src/limits.cfa
r29d8c02 r74ec742 20 20 #include <complex.h> 21 21 #include "limits.hfa" 22 23 #pragma GCC visibility push(default) 22 24 23 25 // Integral Constants -
libcfa/src/memory.cfa
r29d8c02 r74ec742 16 16 #include "memory.hfa" 17 17 #include "stdlib.hfa" 18 19 #pragma GCC visibility push(default) 18 20 19 21 // Internal data object. -
libcfa/src/parseargs.cfa
r29d8c02 r74ec742 24 24 #include "common.hfa" 25 25 #include "limits.hfa" 26 27 #pragma GCC visibility push(default) 26 28 27 29 extern int cfa_args_argc __attribute__((weak)); … … 208 210 } 209 211 212 if(strcmp(arg, "Y") == 0) { 213 value = true; 214 return true; 215 } 216 217 if(strcmp(arg, "y") == 0) { 218 value = true; 219 return true; 220 } 221 210 222 if(strcmp(arg, "no") == 0) { 223 value = false; 224 return true; 225 } 226 227 if(strcmp(arg, "N") == 0) { 228 value = false; 229 return true; 230 } 231 232 if(strcmp(arg, "n") == 0) { 211 233 value = false; 212 234 return true; -
libcfa/src/parseconfig.cfa
r29d8c02 r74ec742 14 14 15 15 16 #pragma GCC visibility push(default) 17 16 18 // *********************************** exceptions *********************************** 17 19 18 20 19 21 // TODO: Add names of missing config entries to exception (see further below) 20 staticvtable(Missing_Config_Entries) Missing_Config_Entries_vt;22 vtable(Missing_Config_Entries) Missing_Config_Entries_vt; 21 23 22 24 [ void ] ?{}( & Missing_Config_Entries this, unsigned int num_missing ) { … … 31 33 32 34 33 staticvtable(Parse_Failure) Parse_Failure_vt;35 vtable(Parse_Failure) Parse_Failure_vt; 34 36 35 37 [ void ] ?{}( & Parse_Failure this, [] char failed_key, [] char failed_value ) { … … 53 55 54 56 55 staticvtable(Validation_Failure) Validation_Failure_vt;57 vtable(Validation_Failure) Validation_Failure_vt; 56 58 57 59 [ void ] ?{}( & Validation_Failure this, [] char failed_key, [] char failed_value ) { … … 110 112 111 113 112 [ bool ] comments( & ifstream in, [] char name ) {114 static [ bool ] comments( & ifstream in, [] char name ) { 113 115 while () { 114 116 in | name; -
libcfa/src/rational.cfa
r29d8c02 r74ec742 17 17 #include "fstream.hfa" 18 18 #include "stdlib.hfa" 19 20 #pragma GCC visibility push(default) 19 21 20 22 forall( T | Arithmetic( T ) ) { -
libcfa/src/startup.cfa
r29d8c02 r74ec742 41 41 } // __cfaabi_appready_shutdown 42 42 43 void disable_interrupts() __attribute__(( weak )) {}44 void enable_interrupts() __attribute__(( weak )) {}43 void disable_interrupts() __attribute__(( weak )) libcfa_public {} 44 void enable_interrupts() __attribute__(( weak )) libcfa_public {} 45 45 46 46 … … 64 64 struct __spinlock_t; 65 65 extern "C" { 66 void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {}66 void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {} 67 67 } 68 68 -
libcfa/src/stdlib.cfa
r29d8c02 r74ec742 25 25 #include <complex.h> // _Complex_I 26 26 #include <assert.h> 27 28 #pragma GCC visibility push(default) 27 29 28 30 //--------------------------------------- … … 225 227 #define GENERATOR LCG 226 228 227 uint32_t __global_random_seed; // sequential/concurrent 228 uint32_t __global_random_state; // sequential only 229 // would be cool to make hidden but it's needed for libcfathread 230 __attribute__((visibility("default"))) uint32_t __global_random_seed; // sequential/concurrent 231 __attribute__((visibility("hidden"))) uint32_t __global_random_state; // sequential only 229 232 230 233 void set_seed( PRNG & prng, uint32_t seed_ ) with( prng ) { state = seed = seed_; GENERATOR( state ); } // set seed -
libcfa/src/strstream.cfa
r29d8c02 r74ec742 1 // 1 // 2 2 // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo 3 // 3 // 4 4 // The contents of this file are covered under the licence agreement in the 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // strstream.cfa -- 8 // 7 // strstream.cfa -- 8 // 9 9 // Author : Peter A. Buhr 10 10 // Created On : Thu Apr 22 22:24:35 2021 … … 12 12 // Last Modified On : Sun Oct 10 16:13:20 2021 13 13 // Update Count : 101 14 // 14 // 15 15 16 16 #include "strstream.hfa" … … 24 24 #include <unistd.h> // sbrk, sysconf 25 25 26 #pragma GCC visibility push(default) 26 27 27 28 // *********************************** strstream *********************************** -
libcfa/src/time.cfa
r29d8c02 r74ec742 18 18 #include <stdio.h> // snprintf 19 19 #include <assert.h> 20 21 #pragma GCC visibility push(default) 20 22 21 23 static char * nanomsd( long int ns, char * buf ) { // most significant digits -
libcfa/src/virtual.c
r29d8c02 r74ec742 16 16 #include "virtual.h" 17 17 #include "assert.h" 18 19 #pragma GCC visibility push(default) 18 20 19 21 int __cfavir_is_parent(
Note:
See TracChangeset
for help on using the changeset viewer.