Changeset 74ec742 for libcfa/src


Ignore:
Timestamp:
May 20, 2022, 10:36:45 AM (3 years ago)
Author:
m3zulfiq <m3zulfiq@…>
Branches:
ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
Children:
25fa20a
Parents:
29d8c02 (diff), 7831e8fb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa/src
Files:
51 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/Makefile.am

    r29d8c02 r74ec742  
    3333# The built sources must not depend on the installed inst_headers_src
    3434AM_CFAFLAGS = -quiet -cfalib -I$(srcdir)/stdhdr -I$(srcdir)/concurrency $(if $(findstring ${gdbwaittarget}, ${@}), -XCFA --gdb) @CONFIG_CFAFLAGS@
    35 AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@
     35AM_CFLAGS = -g -Wall -Werror=return-type -Wno-unused-function -fPIC -fexceptions -fvisibility=hidden -pthread @ARCH_FLAGS@ @CONFIG_CFLAGS@
    3636AM_CCASFLAGS = -g -Wall -Werror=return-type -Wno-unused-function @ARCH_FLAGS@ @CONFIG_CFLAGS@
    3737CFACC = @CFACC@
     
    194194
    195195prelude.o : prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@
    196         ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o ${@}
     196        ${AM_V_GEN}$(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@}
    197197
    198198prelude.lo: prelude.cfa extras.cf gcc-builtins.cf builtins.cf @LOCAL_CFACC@ @CFACPP@
    199199        ${AM_V_GEN}$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile \
    200         $(CFACOMPILE) -quiet -XCFA,-l ${<} -c -o ${@}
     200        $(CFACOMPILE) -quiet -XCFA,-l ${<} -c -fvisibility=default -o ${@}
    201201
    202202concurrency/io/call.cfa: $(srcdir)/concurrency/io/call.cfa.in
  • libcfa/src/algorithms/range_iterator.cfa

    r29d8c02 r74ec742  
    2020#include <fstream.hfa>
    2121
    22 void main(RangeIter & this) {
     22#include "bits/defs.hfa"
     23
     24void main(RangeIter & this) libcfa_public {
    2325        for() {
    2426                this._start = -1;
  • libcfa/src/assert.cfa

    r29d8c02 r74ec742  
    1919#include <unistd.h>                                                             // STDERR_FILENO
    2020#include "bits/debug.hfa"
     21#include "bits/defs.hfa"
    2122
    2223extern "C" {
     
    2627
    2728        // called by macro assert in assert.h
    28         void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) {
     29        // would be cool to remove libcfa_public but it's needed for libcfathread
     30        void __assert_fail( const char assertion[], const char file[], unsigned int line, const char function[] ) libcfa_public {
    2931                __cfaabi_bits_print_safe( STDERR_FILENO, CFA_ASSERT_FMT ".\n", assertion, __progname, function, line, file );
    3032                abort();
     
    3234
    3335        // called by macro assertf
    34         void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) {
     36        // would be cool to remove libcfa_public but it's needed for libcfathread
     37        void __assert_fail_f( const char assertion[], const char file[], unsigned int line, const char function[], const char fmt[], ... ) libcfa_public {
    3538                __cfaabi_bits_acquire();
    3639                __cfaabi_bits_print_nolock( STDERR_FILENO, CFA_ASSERT_FMT ": ", assertion, __progname, function, line, file );
  • libcfa/src/bits/align.hfa

    r29d8c02 r74ec742  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Nov 16 18:58:22 2019
    13 // Update Count     : 3
     12// Last Modified On : Fri Apr 29 19:14:43 2022
     13// Update Count     : 4
    1414//
    1515// This  library is free  software; you  can redistribute  it and/or  modify it
     
    3535//#define libAlign() (sizeof(double))
    3636// gcc-7 uses xmms instructions, which require 16 byte alignment.
    37 #define libAlign() (16)
     37#define libAlign() (__BIGGEST_ALIGNMENT__)
    3838
    3939// Check for power of 2
  • libcfa/src/bits/debug.cfa

    r29d8c02 r74ec742  
    2121#include <unistd.h>
    2222
     23#include "bits/defs.hfa"
     24
    2325enum { buffer_size = 4096 };
    2426static char buffer[ buffer_size ];
    2527
    2628extern "C" {
    27         void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) {
     29        // would be cool to remove libcfa_public but it's needed for libcfathread
     30        void __cfaabi_bits_write( int fd, const char in_buffer[], int len ) libcfa_public {
    2831                // ensure all data is written
    2932                for ( int count = 0, retcode; count < len; count += retcode ) {
     
    4447        void __cfaabi_bits_release() __attribute__((__weak__)) {}
    4548
    46         int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) {
     49        // would be cool to remove libcfa_public but it's needed for libcfathread
     50        int __cfaabi_bits_print_safe  ( int fd, const char fmt[], ... ) __attribute__(( format(printf, 2, 3) )) libcfa_public {
    4751                va_list args;
    4852
  • libcfa/src/bits/defs.hfa

    r29d8c02 r74ec742  
    3636#define __cfa_dlink(x) struct { struct x * next; struct x * back; } __dlink_substitute
    3737#endif
     38
     39#define libcfa_public __attribute__((visibility("default")))
    3840
    3941#ifdef __cforall
  • libcfa/src/bits/weakso_locks.cfa

    r29d8c02 r74ec742  
    1818#include "bits/weakso_locks.hfa"
    1919
     20#pragma GCC visibility push(default)
     21
    2022void  ?{}( blocking_lock &, bool, bool ) {}
    2123void ^?{}( blocking_lock & ) {}
  • libcfa/src/common.cfa

    r29d8c02 r74ec742  
    1818#include <stdlib.h>                                     // div_t, *div
    1919
     20#pragma GCC visibility push(default)
     21
    2022//---------------------------------------
    2123
  • libcfa/src/concurrency/alarm.cfa

    r29d8c02 r74ec742  
    141141//=============================================================================================
    142142
    143 void sleep( Duration duration ) {
     143void sleep( Duration duration ) libcfa_public {
    144144        alarm_node_t node = { active_thread(), duration, 0`s };
    145145
  • libcfa/src/concurrency/clib/cfathread.cfa

    r29d8c02 r74ec742  
    326326}
    327327
     328#pragma GCC visibility push(default)
     329
    328330//================================================================================
    329331// Main Api
    330332extern "C" {
    331         int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {
     333        int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public {
    332334                *cl = new();
    333335                return 0;
    334336        }
    335337
    336         cfathread_cluster_t cfathread_cluster_self(void) {
     338        cfathread_cluster_t cfathread_cluster_self(void) libcfa_public {
    337339                return active_cluster();
    338340        }
    339341
    340         int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {
     342        int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public {
    341343                #if !defined(__CFA_NO_STATISTICS__)
    342344                        print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO );
  • libcfa/src/concurrency/coroutine.cfa

    r29d8c02 r74ec742  
    4848//-----------------------------------------------------------------------------
    4949forall(T &)
    50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {
     50void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public {
    5151        dst->virtual_table = src->virtual_table;
    5252        dst->the_coroutine = src->the_coroutine;
     
    5555
    5656forall(T &)
    57 const char * msg(CoroutineCancelled(T) *) {
     57const char * msg(CoroutineCancelled(T) *) libcfa_public {
    5858        return "CoroutineCancelled(...)";
    5959}
     
    6262forall(T & | is_coroutine(T))
    6363void __cfaehm_cancelled_coroutine(
    64                 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {
     64                T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) libcfa_public {
    6565        verify( desc->cancellation );
    6666        desc->state = Cancelled;
     
    8989
    9090void __stack_prepare( __stack_info_t * this, size_t create_size );
    91 void __stack_clean  ( __stack_info_t * this );
     91static void __stack_clean  ( __stack_info_t * this );
    9292
    9393//-----------------------------------------------------------------------------
     
    114114}
    115115
    116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {
     116void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) {
    117117        (this.context){0p, 0p};
    118118        (this.stack){storage, storageSize};
     
    124124}
    125125
    126 void ^?{}(coroutine$& this) {
     126void ^?{}(coroutine$& this) libcfa_public {
    127127        if(this.state != Halted && this.state != Start && this.state != Primed) {
    128128                coroutine$ * src = active_coroutine();
     
    147147// Not inline since only ever called once per coroutine
    148148forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
    149 void prime(T& cor) {
     149void prime(T& cor) libcfa_public {
    150150        coroutine$* this = get_coroutine(cor);
    151151        assert(this->state == Start);
     
    155155}
    156156
    157 [void *, size_t] __stack_alloc( size_t storageSize ) {
     157static [void *, size_t] __stack_alloc( size_t storageSize ) {
    158158        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    159159        assert(__page_size != 0l);
     
    193193}
    194194
    195 void __stack_clean  ( __stack_info_t * this ) {
     195static void __stack_clean  ( __stack_info_t * this ) {
    196196        void * storage = this->storage->limit;
    197197
     
    215215}
    216216
    217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {
     217void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public {
    218218        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    219219        bool userStack;
  • libcfa/src/concurrency/coroutine.hfa

    r29d8c02 r74ec742  
    113113
    114114extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
    115 extern void __stack_clean  ( __stack_info_t * this );
    116 
    117115
    118116// Suspend implementation inlined for performance
  • libcfa/src/concurrency/exception.cfa

    r29d8c02 r74ec742  
    6464extern "C" {
    6565
    66 struct exception_context_t * this_exception_context(void) {
     66struct exception_context_t * this_exception_context(void) libcfa_public {
    6767        return &__get_stack( active_coroutine() )->exception_context;
    6868}
    6969
    70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {
     70_Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public {
    7171        _Unwind_Stop_Fn stop_func;
    7272        void * stop_param;
  • libcfa/src/concurrency/invoke.c

    r29d8c02 r74ec742  
    3636extern void enable_interrupts( _Bool poll );
    3737
    38 void __cfactx_invoke_coroutine(
     38libcfa_public void __cfactx_invoke_coroutine(
    3939        void (*main)(void *),
    4040        void *this
     
    7070}
    7171
    72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
     72libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
    7373void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) {
    7474        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
     
    7777}
    7878
    79 void __cfactx_invoke_thread(
     79libcfa_public void __cfactx_invoke_thread(
    8080        void (*main)(void *),
    8181        void *this
     
    9898}
    9999
    100 void __cfactx_start(
     100libcfa_public void __cfactx_start(
    101101        void (*main)(void *),
    102102        struct coroutine$ * cor,
  • libcfa/src/concurrency/io.cfa

    r29d8c02 r74ec742  
    244244
    245245                                        remote = true;
    246                                         __STATS__( false, io.calls.helped++; )
     246                                        __STATS__( true, io.calls.helped++; )
    247247                                }
    248248                                proc->io.target = MAX;
     
    340340        // for convenience, return both the index and the pointer to the sqe
    341341        // sqe == &sqes[idx]
    342         struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
     342        struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {
    343343                // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
    344344
     
    419419        }
    420420
    421         void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
     421        void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {
    422422                // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
    423423
  • libcfa/src/concurrency/io/call.cfa.in

    r29d8c02 r74ec742  
    139139// I/O Interface
    140140//=============================================================================================
     141#pragma GCC visibility push(default)
    141142"""
    142143
  • libcfa/src/concurrency/io/setup.cfa

    r29d8c02 r74ec742  
    2626
    2727#if !defined(CFA_HAVE_LINUX_IO_URING_H)
    28         void ?{}(io_context_params & this) {}
     28        void ?{}(io_context_params & this) libcfa_public {}
    2929
    3030        void  ?{}($io_context & this, struct cluster & cl) {}
     
    6666#pragma GCC diagnostic pop
    6767
    68         void ?{}(io_context_params & this) {
     68        void ?{}(io_context_params & this) libcfa_public {
    6969                this.num_entries = 256;
    7070        }
  • libcfa/src/concurrency/kernel.cfa

    r29d8c02 r74ec742  
    389389
    390390// KERNEL_ONLY
    391 void returnToKernel() {
     391static void returnToKernel() {
    392392        /* paranoid */ verify( ! __preemption_enabled() );
    393393        coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
     
    547547}
    548548
    549 void unpark( thread$ * thrd, unpark_hint hint ) {
     549void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
    550550        if( !thrd ) return;
    551551
     
    558558}
    559559
    560 void park( void ) {
     560void park( void ) libcfa_public {
    561561        __disable_interrupts_checked();
    562562                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
     
    601601
    602602// KERNEL ONLY
    603 bool force_yield( __Preemption_Reason reason ) {
     603bool force_yield( __Preemption_Reason reason ) libcfa_public {
    604604        __disable_interrupts_checked();
    605605                thread$ * thrd = kernelTLS().this_thread;
     
    849849//-----------------------------------------------------------------------------
    850850// Debug
    851 bool threading_enabled(void) __attribute__((const)) {
     851bool threading_enabled(void) __attribute__((const)) libcfa_public {
    852852        return true;
    853853}
     
    856856// Statistics
    857857#if !defined(__CFA_NO_STATISTICS__)
    858         void print_halts( processor & this ) {
     858        void print_halts( processor & this ) libcfa_public {
    859859                this.print_halts = true;
    860860        }
     
    873873        }
    874874
    875         void crawl_cluster_stats( cluster & this ) {
     875        static void crawl_cluster_stats( cluster & this ) {
    876876                // Stop the world, otherwise stats could get really messed-up
    877877                // this doesn't solve all problems but does solve many
     
    889889
    890890
    891         void print_stats_now( cluster & this, int flags ) {
     891        void print_stats_now( cluster & this, int flags ) libcfa_public {
    892892                crawl_cluster_stats( this );
    893893                __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
  • libcfa/src/concurrency/kernel.hfa

    r29d8c02 r74ec742  
    4949
    5050// Coroutine used py processors for the 2-step context switch
    51 coroutine processorCtx_t {
     51
     52struct processorCtx_t {
     53        struct coroutine$ self;
    5254        struct processor * proc;
    5355};
  • libcfa/src/concurrency/kernel/cluster.cfa

    r29d8c02 r74ec742  
    4949
    5050// returns the maximum number of processors the RWLock support
    51 __attribute__((weak)) unsigned __max_processors() {
     51__attribute__((weak)) unsigned __max_processors() libcfa_public {
    5252        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
    5353        if(!max_cores_s) {
  • libcfa/src/concurrency/kernel/private.hfa

    r29d8c02 r74ec742  
    109109//-----------------------------------------------------------------------------
    110110// Processor
    111 void main(processorCtx_t *);
     111void main(processorCtx_t &);
     112static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; }
    112113
    113114void * __create_pthread( pthread_t *, void * (*)(void *), void * );
  • libcfa/src/concurrency/kernel/startup.cfa

    r29d8c02 r74ec742  
    120120#endif
    121121
    122 cluster              * mainCluster;
     122cluster              * mainCluster libcfa_public;
    123123processor            * mainProcessor;
    124124thread$              * mainThread;
     
    169169};
    170170
    171 void ?{}( current_stack_info_t & this ) {
     171static void ?{}( current_stack_info_t & this ) {
    172172        __stack_context_t ctx;
    173173        CtxGet( ctx );
     
    209209        // Construct the processor context of the main processor
    210210        void ?{}(processorCtx_t & this, processor * proc) {
    211                 (this.__cor){ "Processor" };
    212                 this.__cor.starter = 0p;
     211                (this.self){ "Processor" };
     212                this.self.starter = 0p;
    213213                this.proc = proc;
    214214        }
     
    526526// Construct the processor context of non-main processors
    527527static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
    528         (this.__cor){ info };
     528        (this.self){ info };
    529529        this.proc = proc;
    530530}
     
    578578}
    579579
    580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {
     580void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public {
    581581        ( this.terminated ){};
    582582        ( this.runner ){};
     
    591591}
    592592
    593 void ?{}(processor & this, const char name[], cluster & _cltr) {
     593void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public {
    594594        (this){name, _cltr, 0p};
    595595}
    596596
    597597extern size_t __page_size;
    598 void ^?{}(processor & this) with( this ){
     598void ^?{}(processor & this) libcfa_public with( this ) {
    599599        /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) );
    600600        __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
     
    623623}
    624624
    625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {
     625void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) {
    626626        this.name = name;
    627627        this.preemption_rate = preemption_rate;
     
    658658}
    659659
    660 void ^?{}(cluster & this) {
     660void ^?{}(cluster & this) libcfa_public {
    661661        destroy(this.io.arbiter);
    662662
  • libcfa/src/concurrency/locks.cfa

    r29d8c02 r74ec742  
    2424#include <stdlib.hfa>
    2525
     26#pragma GCC visibility push(default)
     27
    2628//-----------------------------------------------------------------------------
    2729// info_thread
     
    116118}
    117119
    118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
     120static void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
    119121        thread$ * t = &try_pop_front( blocked_threads );
    120122        owner = t;
     
    192194        void ^?{}( alarm_node_wrap(L) & this ) { }
    193195
    194         void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
     196        static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
    195197                // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin.
    196198                lock( cond->lock __cfaabi_dbg_ctx2 );
     
    216218
    217219        // this casts the alarm node to our wrapped type since we used type erasure
    218         void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
     220        static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
    219221}
    220222
    221223//-----------------------------------------------------------------------------
    222 // condition variable
     224// Synchronization Locks
    223225forall(L & | is_blocking_lock(L)) {
    224226
     227        //-----------------------------------------------------------------------------
     228        // condition variable
    225229        void ?{}( condition_variable(L) & this ){
    226230                this.lock{};
     
    231235        void ^?{}( condition_variable(L) & this ){ }
    232236
    233         void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
     237        static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
    234238                if(&popped != 0p) {
    235239                        popped.signalled = true;
     
    276280        int counter( condition_variable(L) & this ) with(this) { return count; }
    277281
    278         size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
     282        static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
    279283                // add info_thread to waiting queue
    280284                insert_last( blocked_threads, *i );
     
    289293
    290294        // helper for wait()'s' with no timeout
    291         void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
     295        static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
    292296                lock( lock __cfaabi_dbg_ctx2 );
    293297                size_t recursion_count = queue_and_get_recursion(this, &i);
     
    306310
    307311        // helper for wait()'s' with a timeout
    308         void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
     312        static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    309313                lock( lock __cfaabi_dbg_ctx2 );
    310314                size_t recursion_count = queue_and_get_recursion(this, &info);
     
    337341        bool wait( condition_variable(L) & this, L & l, Duration duration                 ) with(this) { WAIT_TIME( 0   , &l , duration ) }
    338342        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) }
     343
     344        //-----------------------------------------------------------------------------
     345        // fast_cond_var
     346        void  ?{}( fast_cond_var(L) & this ){
     347                this.blocked_threads{};
     348                #ifdef __CFA_DEBUG__
     349                this.lock_used = 0p;
     350                #endif
     351        }
     352        void ^?{}( fast_cond_var(L) & this ){ }
     353
     354        bool notify_one( fast_cond_var(L) & this ) with(this) {
     355                bool ret = ! blocked_threads`isEmpty;
     356                if ( ret ) {
     357                        info_thread(L) & popped = try_pop_front( blocked_threads );
     358                        on_notify(*popped.lock, popped.t);
     359                }
     360                return ret;
     361        }
     362        bool notify_all( fast_cond_var(L) & this ) with(this) {
     363                bool ret = ! blocked_threads`isEmpty;
     364                while( ! blocked_threads`isEmpty ) {
     365                        info_thread(L) & popped = try_pop_front( blocked_threads );
     366                        on_notify(*popped.lock, popped.t);
     367                }
     368                return ret;
     369        }
     370
     371        uintptr_t front( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }
     372        bool empty ( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }
     373
     374        void wait( fast_cond_var(L) & this, L & l ) {
     375                wait( this, l, 0 );
     376        }
     377
     378        void wait( fast_cond_var(L) & this, L & l, uintptr_t info ) with(this) {
     379                // brand cond lock with lock
     380                #ifdef __CFA_DEBUG__
     381                        if ( lock_used == 0p ) lock_used = &l;
     382                        else { assert(lock_used == &l); }
     383                #endif
     384                info_thread( L ) i = { active_thread(), info, &l };
     385                insert_last( blocked_threads, i );
     386                size_t recursion_count = on_wait( *i.lock );
     387                park( );
     388                on_wakeup(*i.lock, recursion_count);
     389        }
    339390}
    340391
  • libcfa/src/concurrency/locks.hfa

    r29d8c02 r74ec742  
    7373static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
    7474
     75//-----------------------------------------------------------------------------
     76// MCS Lock
    7577struct mcs_node {
    7678        mcs_node * volatile next;
     
    98100}
    99101
     102//-----------------------------------------------------------------------------
     103// Linear backoff Spinlock
    100104struct linear_backoff_then_block_lock {
    101105        // Spin lock used for mutual exclusion
     
    199203
    200204//-----------------------------------------------------------------------------
     205// Fast Block Lock
     206
     207// High efficiency minimal blocking lock
     208// - No reacquire for cond var
     209// - No recursive acquisition
     210// - No ownership
     211struct fast_block_lock {
     212        // Spin lock used for mutual exclusion
     213        __spinlock_t lock;
     214
     215        // List of blocked threads
     216        dlist( thread$ ) blocked_threads;
     217
     218        bool held:1;
     219};
     220
     221static inline void  ?{}( fast_block_lock & this ) with(this) {
     222        lock{};
     223        blocked_threads{};
     224        held = false;
     225}
     226static inline void ^?{}( fast_block_lock & this ) {}
     227static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
     228static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
     229
     230// if this is called recursively IT WILL DEADLOCK!!!!!
     231static inline void lock(fast_block_lock & this) with(this) {
     232        lock( lock __cfaabi_dbg_ctx2 );
     233        if (held) {
     234                insert_last( blocked_threads, *active_thread() );
     235                unlock( lock );
     236                park( );
     237                return;
     238        }
     239        held = true;
     240        unlock( lock );
     241}
     242
     243static inline void unlock(fast_block_lock & this) with(this) {
     244        lock( lock __cfaabi_dbg_ctx2 );
     245        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
     246        thread$ * t = &try_pop_front( blocked_threads );
     247        held = ( t ? true : false );
     248        unpark( t );
     249        unlock( lock );
     250}
     251
     252static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); }
     253static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
     254static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     255
     256//-----------------------------------------------------------------------------
    201257// is_blocking_lock
    202258trait is_blocking_lock(L & | sized(L)) {
     
    226282// Synchronization Locks
    227283forall(L & | is_blocking_lock(L)) {
     284
     285        //-----------------------------------------------------------------------------
     286        // condition_variable
     287
     288        // The multi-tool condition variable
     289        // - can pass timeouts to wait for either a signal or timeout
     290        // - can wait without passing a lock
     291        // - can have waiters reacquire different locks while waiting on the same cond var
     292        // - has shadow queue
     293        // - can be signalled outside of critical sections with no locks held
    228294        struct condition_variable {
    229295                // Spin lock used for mutual exclusion
     
    258324        bool wait( condition_variable(L) & this, L & l, Duration duration );
    259325        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
    260 }
     326
     327        //-----------------------------------------------------------------------------
     328        // fast_cond_var
     329
     330        // The trimmed and slim condition variable
     331        // - no internal lock so you must hold a lock while using this cond var
     332        // - signalling without holding branded lock is UNSAFE!
     333        // - only allows usage of one lock, cond var is branded after usage
     334        struct fast_cond_var {
     335                // List of blocked threads
     336                dlist( info_thread(L) ) blocked_threads;
     337
     338                #ifdef __CFA_DEBUG__
     339                L * lock_used;
     340                #endif
     341        };
     342
     343
     344        void  ?{}( fast_cond_var(L) & this );
     345        void ^?{}( fast_cond_var(L) & this );
     346
     347        bool notify_one( fast_cond_var(L) & this );
     348        bool notify_all( fast_cond_var(L) & this );
     349
     350        uintptr_t front( fast_cond_var(L) & this );
     351
     352        bool empty  ( fast_cond_var(L) & this );
     353
     354        void wait( fast_cond_var(L) & this, L & l );
     355        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
     356}
  • libcfa/src/concurrency/monitor.cfa

    r29d8c02 r74ec742  
    4444static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    4545
     46static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
     47static inline void ?{}(__condition_criterion_t & this );
     48static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
     49
    4650static inline void init     ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    4751static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     
    243247
    244248// Leave single monitor
    245 void __leave( monitor$ * this ) {
     249static void __leave( monitor$ * this ) {
    246250        // Lock the monitor spinlock
    247251        lock( this->lock __cfaabi_dbg_ctx2 );
     
    278282
    279283// Leave single monitor for the last time
    280 void __dtor_leave( monitor$ * this, bool join ) {
     284static void __dtor_leave( monitor$ * this, bool join ) {
    281285        __cfaabi_dbg_debug_do(
    282286                if( active_thread() != this->owner ) {
     
    344348// Ctor for monitor guard
    345349// Sorts monitors before entering
    346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {
     350void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public {
    347351        thread$ * thrd = active_thread();
    348352
     
    369373}
    370374
    371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {
     375void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public {
    372376        this{ m, count, 0p };
    373377}
     
    375379
    376380// Dtor for monitor guard
    377 void ^?{}( monitor_guard_t & this ) {
     381void ^?{}( monitor_guard_t & this ) libcfa_public {
    378382        // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count);
    379383
     
    389393// Ctor for monitor guard
    390394// Sorts monitors before entering
    391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {
     395void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public {
    392396        // optimization
    393397        thread$ * thrd = active_thread();
     
    409413
    410414// Dtor for monitor guard
    411 void ^?{}( monitor_dtor_guard_t & this ) {
     415void ^?{}( monitor_dtor_guard_t & this ) libcfa_public {
    412416        // Leave the monitors in order
    413417        __dtor_leave( this.m, this.join );
     
    419423//-----------------------------------------------------------------------------
    420424// Internal scheduling types
    421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     425static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    422426        this.waiting_thread = waiting_thread;
    423427        this.count = count;
     
    426430}
    427431
    428 void ?{}(__condition_criterion_t & this ) with( this ) {
     432static void ?{}(__condition_criterion_t & this ) with( this ) {
    429433        ready  = false;
    430434        target = 0p;
     
    433437}
    434438
    435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
     439static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
    436440        this.ready  = false;
    437441        this.target = target;
     
    442446//-----------------------------------------------------------------------------
    443447// Internal scheduling
    444 void wait( condition & this, uintptr_t user_info = 0 ) {
     448void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public {
    445449        brand_condition( this );
    446450
     
    496500}
    497501
    498 bool signal( condition & this ) {
     502bool signal( condition & this ) libcfa_public {
    499503        if( is_empty( this ) ) { return false; }
    500504
     
    538542}
    539543
    540 bool signal_block( condition & this ) {
     544bool signal_block( condition & this ) libcfa_public {
    541545        if( !this.blocked.head ) { return false; }
    542546
     
    586590
    587591// Access the user_info of the thread waiting at the front of the queue
    588 uintptr_t front( condition & this ) {
     592uintptr_t front( condition & this ) libcfa_public {
    589593        verifyf( !is_empty(this),
    590594                "Attempt to access user data on an empty condition.\n"
     
    608612//              setup mask
    609613//              block
    610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
     614void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public {
    611615        // This statment doesn't have a contiguous list of monitors...
    612616        // Create one!
     
    994998// Can't be accepted since a mutex stmt is effectively an anonymous routine
    995999// Thus we do not need a monitor group
    996 void lock( monitor$ * this ) {
     1000void lock( monitor$ * this ) libcfa_public {
    9971001        thread$ * thrd = active_thread();
    9981002
     
    10461050// Leave routine for mutex stmt
    10471051// Is just a wrapper around __leave for the is_lock trait to see
    1048 void unlock( monitor$ * this ) { __leave( this ); }
     1052void unlock( monitor$ * this ) libcfa_public { __leave( this ); }
    10491053
    10501054// Local Variables: //
  • libcfa/src/concurrency/monitor.hfa

    r29d8c02 r74ec742  
    119119}
    120120
    121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
    122 void ?{}(__condition_criterion_t & this );
    123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
     121// void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
     122// void ?{}(__condition_criterion_t & this );
     123// void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
    124124
    125125struct condition {
  • libcfa/src/concurrency/preemption.cfa

    r29d8c02 r74ec742  
    3838#endif
    3939
    40 __attribute__((weak)) Duration default_preemption() {
     40__attribute__((weak)) Duration default_preemption() libcfa_public {
    4141        const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION");
    4242        if(!preempt_rate_s) {
     
    238238//----------
    239239// special case for preemption since used often
    240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {
     240__attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public {
    241241        // create a assembler label before
    242242        // marked as clobber all to avoid movement
     
    276276// Get data from the TLS block
    277277// struct asm_region __cfaasm_get;
    278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
     278uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems
    279279uintptr_t __cfatls_get( unsigned long int offset ) {
    280280        // create a assembler label before
     
    295295extern "C" {
    296296        // Disable interrupts by incrementing the counter
    297         void disable_interrupts() {
     297        __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public {
    298298                // create a assembler label before
    299299                // marked as clobber all to avoid movement
     
    326326        // Enable interrupts by decrementing the counter
    327327        // If counter reaches 0, execute any pending __cfactx_switch
    328         void enable_interrupts( bool poll ) {
     328        void enable_interrupts( bool poll ) libcfa_public {
    329329                // Cache the processor now since interrupts can start happening after the atomic store
    330330                processor   * proc = __cfaabi_tls.this_processor;
     
    362362//-----------------------------------------------------------------------------
    363363// Kernel Signal Debug
    364 void __cfaabi_check_preemption() {
     364void __cfaabi_check_preemption() libcfa_public {
    365365        bool ready = __preemption_enabled();
    366366        if(!ready) { abort("Preemption should be ready"); }
  • libcfa/src/concurrency/ready_subqueue.hfa

    r29d8c02 r74ec742  
    8383        /* paranoid */ verify( node->link.ts   != 0  );
    8484        /* paranoid */ verify( this.anchor.ts  != 0  );
     85        /* paranoid */ verify( (this.anchor.ts  == MAX) == is_empty );
    8586        return [node, this.anchor.ts];
    8687}
     
    9394// Return the timestamp
    9495static inline unsigned long long ts(__intrusive_lane_t & this) {
    95         // Cannot verify here since it may not be locked
     96        // Cannot verify 'emptiness' here since it may not be locked
    9697        /* paranoid */ verify(this.anchor.ts != 0);
    9798        return this.anchor.ts;
  • libcfa/src/concurrency/thread.cfa

    r29d8c02 r74ec742  
    2626
    2727extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask;
     28
     29#pragma GCC visibility push(default)
    2830
    2931//-----------------------------------------------------------------------------
  • libcfa/src/containers/maybe.cfa

    r29d8c02 r74ec742  
    1717#include <assert.h>
    1818
     19#pragma GCC visibility push(default)
    1920
    2021forall(T)
  • libcfa/src/containers/result.cfa

    r29d8c02 r74ec742  
    1717#include <assert.h>
    1818
     19#pragma GCC visibility push(default)
    1920
    2021forall(T, E)
  • libcfa/src/containers/string.cfa

    r29d8c02 r74ec742  
    1818#include <stdlib.hfa>
    1919
     20#pragma GCC visibility push(default)
    2021
    2122/*
  • libcfa/src/containers/string_sharectx.hfa

    r29d8c02 r74ec742  
    1616#pragma once
    1717
     18#pragma GCC visibility push(default)
     19
    1820//######################### String Sharing Context #########################
    1921
    2022struct VbyteHeap;
    2123
    22 // A string_sharectx 
     24// A string_sharectx
    2325//
    2426// Usage:
  • libcfa/src/containers/vector.cfa

    r29d8c02 r74ec742  
    1818#include <stdlib.hfa>
    1919
     20#pragma GCC visibility push(default)
     21
    2022forall(T, allocator_t | allocator_c(T, allocator_t))
    21 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);
     23static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other);
    2224
    2325//------------------------------------------------------------------------------
     
    8385
    8486forall(T, allocator_t | allocator_c(T, allocator_t))
    85 void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)
     87static void copy_internal(vector(T, allocator_t)* this, vector(T, allocator_t)* other)
    8688{
    8789        this->size = other->size;
  • libcfa/src/device/cpu.cfa

    r29d8c02 r74ec742  
    3131}
    3232
     33#include "bits/defs.hfa"
    3334#include "algorithms/range_iterator.hfa"
    3435
     
    456457}
    457458
    458 cpu_info_t cpu_info;
     459libcfa_public cpu_info_t cpu_info;
  • libcfa/src/exception.c

    r29d8c02 r74ec742  
    2727#include "stdhdr/assert.h"
    2828#include "virtual.h"
     29
     30#pragma GCC visibility push(default)
     31
    2932#include "lsda.h"
    3033
     
    261264#else // defined( __ARM_ARCH )
    262265        // The return code from _Unwind_RaiseException seems to be corrupt on ARM at end of stack.
    263         // This workaround tries to keep default exception handling working. 
     266        // This workaround tries to keep default exception handling working.
    264267        if ( ret == _URC_FATAL_PHASE1_ERROR || ret == _URC_FATAL_PHASE2_ERROR ) {
    265268#endif
  • libcfa/src/fstream.cfa

    r29d8c02 r74ec742  
    2222#include <assert.h>
    2323#include <errno.h>                                                                              // errno
     24
     25#pragma GCC visibility push(default)
    2426
    2527// *********************************** ofstream ***********************************
     
    118120                // abort | IO_MSG "open output file \"" | name | "\"" | nl | strerror( errno );
    119121        } // if
    120         (os){ file };                                                                           // initialize 
     122        (os){ file };                                                                           // initialize
    121123} // open
    122124
     
    157159        va_list args;
    158160        va_start( args, format );
    159                
     161
    160162        int len;
    161163    for ( cnt; 10 ) {
     
    241243                // abort | IO_MSG "open input file \"" | name | "\"" | nl | strerror( errno );
    242244        } // if
    243         (is){ file };                                                                           // initialize 
     245        (is){ file };                                                                           // initialize
    244246} // open
    245247
  • libcfa/src/fstream.hfa

    r29d8c02 r74ec742  
    1818#include "bits/weakso_locks.hfa"                                                // mutex_lock
    1919#include "iostream.hfa"
    20 #include <exception.hfa>
    2120
    2221
  • libcfa/src/heap.cfa

    r29d8c02 r74ec742  
    1010// Created On       : Tue Dec 19 21:58:35 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Apr 25 18:51:36 2022
    13 // Update Count     : 1147
     12// Last Modified On : Fri Apr 29 19:05:03 2022
     13// Update Count     : 1167
    1414//
    1515
     
    3636static bool traceHeap = false;
    3737
    38 inline bool traceHeap() { return traceHeap; }
    39 
    40 bool traceHeapOn() {
     38inline bool traceHeap() libcfa_public { return traceHeap; }
     39
     40bool traceHeapOn() libcfa_public {
    4141        bool temp = traceHeap;
    4242        traceHeap = true;
     
    4444} // traceHeapOn
    4545
    46 bool traceHeapOff() {
     46bool traceHeapOff() libcfa_public {
    4747        bool temp = traceHeap;
    4848        traceHeap = false;
     
    5050} // traceHeapOff
    5151
    52 bool traceHeapTerm() { return false; }
     52bool traceHeapTerm() libcfa_public { return false; }
    5353
    5454
    5555static bool prtFree = false;
    5656
    57 bool prtFree() {
     57static bool prtFree() {
    5858        return prtFree;
    5959} // prtFree
    6060
    61 bool prtFreeOn() {
     61static bool prtFreeOn() {
    6262        bool temp = prtFree;
    6363        prtFree = true;
     
    6565} // prtFreeOn
    6666
    67 bool prtFreeOff() {
     67static bool prtFreeOff() {
    6868        bool temp = prtFree;
    6969        prtFree = false;
     
    8787
    8888
    89 #ifdef __CFA_DEBUG__
    90 static size_t allocUnfreed;                                                             // running total of allocations minus frees
    91 
    92 static void prtUnfreed() {
    93         if ( allocUnfreed != 0 ) {
    94                 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
    95                 char helpText[512];
    96                 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n"
    97                                                         "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
    98                                                         (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid
    99                 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
    100         } // if
    101 } // prtUnfreed
    102 
    103 extern int cfa_main_returned;                                                   // from interpose.cfa
    104 extern "C" {
    105         void heapAppStart() {                                                           // called by __cfaabi_appready_startup
    106                 allocUnfreed = 0;
    107         } // heapAppStart
    108 
    109         void heapAppStop() {                                                            // called by __cfaabi_appready_startdown
    110                 fclose( stdin ); fclose( stdout );
    111                 if ( cfa_main_returned ) prtUnfreed();                  // do not check unfreed storage if exit called
    112         } // heapAppStop
    113 } // extern "C"
    114 #endif // __CFA_DEBUG__
    115 
    116 
    117 // statically allocated variables => zero filled.
    118 static size_t heapExpand;                                                               // sbrk advance
    119 static size_t mmapStart;                                                                // cross over point for mmap
    120 static unsigned int maxBucketsUsed;                                             // maximum number of buckets in use
    121 // extern visibility, used by runtime kernel
    122 size_t __page_size;                                                                             // architecture pagesize
    123 int __map_prot;                                                                                 // common mmap/mprotect protection
    124 
    125 
    126 #define SPINLOCK 0
    127 #define LOCKFREE 1
    128 #define BUCKETLOCK SPINLOCK
    129 #if BUCKETLOCK == SPINLOCK
    130 #elif BUCKETLOCK == LOCKFREE
    131 #include <stackLockFree.hfa>
    132 #else
    133         #error undefined lock type for bucket lock
    134 #endif // LOCKFREE
    135 
    136 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
    137 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
    138 enum { NoBucketSizes = 91 };                                                    // number of buckets sizes
    139 
    140 struct Heap {
    141         struct Storage {
    142                 struct Header {                                                                 // header
    143                         union Kind {
    144                                 struct RealHeader {
    145                                         union {
    146                                                 struct {                                                // 4-byte word => 8-byte header, 8-byte word => 16-byte header
    147                                                         union {
    148                                                                 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped
    149                                                                 // FreeHeader * home;           // allocated block points back to home locations (must overlay alignment)
    150                                                                 void * home;                    // allocated block points back to home locations (must overlay alignment)
    151                                                                 size_t blockSize;               // size for munmap (must overlay alignment)
    152                                                                 #if BUCKETLOCK == SPINLOCK
    153                                                                 Storage * next;                 // freed block points to next freed block of same size
    154                                                                 #endif // SPINLOCK
    155                                                         };
    156                                                         size_t size;                            // allocation size in bytes
    157                                                 };
    158                                                 #if BUCKETLOCK == LOCKFREE
    159                                                 Link(Storage) next;                             // freed block points next freed block of same size (double-wide)
    160                                                 #endif // LOCKFREE
    161                                         };
    162                                 } real; // RealHeader
    163 
    164                                 struct FakeHeader {
    165                                         uintptr_t alignment;                            // 1st low-order bit => fake header & alignment
    166                                         uintptr_t offset;
    167                                 } fake; // FakeHeader
    168                         } kind; // Kind
    169                 } header; // Header
    170 
    171                 char pad[libAlign() - sizeof( Header )];
    172                 char data[0];                                                                   // storage
    173         }; // Storage
    174 
    175         static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" );
    176 
    177         struct FreeHeader {
    178                 #if BUCKETLOCK == SPINLOCK
    179                 __spinlock_t lock;                                                              // must be first field for alignment
    180                 Storage * freeList;
    181                 #else
    182                 StackLF(Storage) freeList;
    183                 #endif // BUCKETLOCK
    184                 size_t blockSize;                                                               // size of allocations on this list
    185         }; // FreeHeader
    186 
    187         // must be first fields for alignment
    188         __spinlock_t extlock;                                                           // protects allocation-buffer extension
    189         FreeHeader freeLists[NoBucketSizes];                            // buckets for different allocation sizes
    190 
    191         void * heapBegin;                                                                       // start of heap
    192         void * heapEnd;                                                                         // logical end of heap
    193         size_t heapRemaining;                                                           // amount of storage not allocated in the current chunk
    194 }; // Heap
    195 
    196 #if BUCKETLOCK == LOCKFREE
    197 static inline {
    198         Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; }
    199         void ?{}( Heap.FreeHeader & ) {}
    200         void ^?{}( Heap.FreeHeader & ) {}
    201 } // distribution
    202 #endif // LOCKFREE
    203 
    204 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; }
    205 
    206 
    207 #ifdef FASTLOOKUP
    208 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes
    209 static unsigned char lookup[LookupSizes];                               // O(1) lookup for small sizes
    210 #endif // FASTLOOKUP
    211 
    212 static const off_t mmapFd = -1;                                                 // fake or actual fd for anonymous file
    213 #ifdef __CFA_DEBUG__
    214 static bool heapBoot = 0;                                                               // detect recursion during boot
    215 #endif // __CFA_DEBUG__
    216 
    217 
    218 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16.
    219 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size.
    220 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed.
    221 static const unsigned int bucketSizes[] @= {                    // different bucket sizes
    222         16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4
    223         96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3
    224         160, 192, 224, 256 + sizeof(Heap.Storage), // 4
    225         320, 384, 448, 512 + sizeof(Heap.Storage), // 4
    226         640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4
    227         1_536, 2_048 + sizeof(Heap.Storage), // 2
    228         2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4
    229         6_144, 8_192 + sizeof(Heap.Storage), // 2
    230         9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8
    231         18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8
    232         36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8
    233         73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8
    234         147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8
    235         294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8
    236         655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4
    237         1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8
    238         2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4
    239 };
    240 
    241 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" );
    242 
    243 // The constructor for heapManager is called explicitly in memory_startup.
    244 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
    245 
    246 
    247 //####################### Memory Allocation Routines Helpers ####################
     89//####################### Heap Statistics ####################
    24890
    24991
     
    307149        return lhs;
    308150} // ?+=?
    309 
     151#endif // __STATISTICS__
     152
     153
     154#define SPINLOCK 0
     155#define LOCKFREE 1
     156#define BUCKETLOCK SPINLOCK
     157#if BUCKETLOCK == SPINLOCK
     158#elif BUCKETLOCK == LOCKFREE
     159#include <stackLockFree.hfa>
     160#else
     161        #error undefined lock type for bucket lock
     162#endif // LOCKFREE
     163
     164// Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage.
     165// Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined.
     166enum { NoBucketSizes = 91 };                                                    // number of buckets sizes
     167
     168struct Heap {
     169        struct Storage {
     170                struct Header {                                                                 // header
     171                        union Kind {
     172                                struct RealHeader {
     173                                        union {
     174                                                struct {                                                // 4-byte word => 8-byte header, 8-byte word => 16-byte header
     175                                                        union {
     176                                                                // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped
     177                                                                // FreeHeader * home;           // allocated block points back to home locations (must overlay alignment)
     178                                                                void * home;                    // allocated block points back to home locations (must overlay alignment)
     179                                                                size_t blockSize;               // size for munmap (must overlay alignment)
     180                                                                #if BUCKETLOCK == SPINLOCK
     181                                                                Storage * next;                 // freed block points to next freed block of same size
     182                                                                #endif // SPINLOCK
     183                                                        };
     184                                                        size_t size;                            // allocation size in bytes
     185                                                };
     186                                                #if BUCKETLOCK == LOCKFREE
     187                                                Link(Storage) next;                             // freed block points next freed block of same size (double-wide)
     188                                                #endif // LOCKFREE
     189                                        };
     190                                } real; // RealHeader
     191
     192                                struct FakeHeader {
     193                                        uintptr_t alignment;                            // 1st low-order bit => fake header & alignment
     194                                        uintptr_t offset;
     195                                } fake; // FakeHeader
     196                        } kind; // Kind
     197                } header; // Header
     198
     199                char pad[libAlign() - sizeof( Header )];
     200                char data[0];                                                                   // storage
     201        }; // Storage
     202
     203        static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" );
     204
     205        struct FreeHeader {
     206                size_t blockSize __attribute__(( aligned (8) )); // size of allocations on this list
     207                #if BUCKETLOCK == SPINLOCK
     208                __spinlock_t lock;
     209                Storage * freeList;
     210                #else
     211                StackLF(Storage) freeList;
     212                #endif // BUCKETLOCK
     213        } __attribute__(( aligned (8) )); // FreeHeader
     214
     215        FreeHeader freeLists[NoBucketSizes];                            // buckets for different allocation sizes
     216
     217        __spinlock_t extlock;                                                           // protects allocation-buffer extension
     218        void * heapBegin;                                                                       // start of heap
     219        void * heapEnd;                                                                         // logical end of heap
     220        size_t heapRemaining;                                                           // amount of storage not allocated in the current chunk
     221}; // Heap
     222
     223#if BUCKETLOCK == LOCKFREE
     224static inline {
     225        Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; }
     226        void ?{}( Heap.FreeHeader & ) {}
     227        void ^?{}( Heap.FreeHeader & ) {}
     228} // distribution
     229#endif // LOCKFREE
     230
     231static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; }
     232
     233
     234#ifdef FASTLOOKUP
     235enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes
     236static unsigned char lookup[LookupSizes];                               // O(1) lookup for small sizes
     237#endif // FASTLOOKUP
     238
     239static const off_t mmapFd = -1;                                                 // fake or actual fd for anonymous file
     240#ifdef __CFA_DEBUG__
     241static bool heapBoot = 0;                                                               // detect recursion during boot
     242#endif // __CFA_DEBUG__
     243
     244
     245// Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16.
     246// Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size.
     247// malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed.
     248static const unsigned int bucketSizes[] @= {                    // different bucket sizes
     249        16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4
     250        96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3
     251        160, 192, 224, 256 + sizeof(Heap.Storage), // 4
     252        320, 384, 448, 512 + sizeof(Heap.Storage), // 4
     253        640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4
     254        1_536, 2_048 + sizeof(Heap.Storage), // 2
     255        2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4
     256        6_144, 8_192 + sizeof(Heap.Storage), // 2
     257        9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8
     258        18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8
     259        36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8
     260        73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8
     261        147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8
     262        294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8
     263        655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4
     264        1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8
     265        2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4
     266};
     267
     268static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" );
     269
     270// The constructor for heapManager is called explicitly in memory_startup.
     271static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing
     272
     273
     274//####################### Memory Allocation Routines Helpers ####################
     275
     276
     277#ifdef __CFA_DEBUG__
     278static size_t allocUnfreed;                                                             // running total of allocations minus frees
     279
     280static void prtUnfreed() {
     281        if ( allocUnfreed != 0 ) {
     282                // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.
     283                char helpText[512];
     284                __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
     285                                                                        "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n"
     286                                                                        "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n",
     287                                                                        (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid
     288        } // if
     289} // prtUnfreed
     290
     291extern int cfa_main_returned;                                                   // from interpose.cfa
     292extern "C" {
     293        void heapAppStart() {                                                           // called by __cfaabi_appready_startup
     294                allocUnfreed = 0;
     295        } // heapAppStart
     296
     297        void heapAppStop() {                                                            // called by __cfaabi_appready_startdown
     298                fclose( stdin ); fclose( stdout );
     299                if ( cfa_main_returned ) prtUnfreed();                  // do not check unfreed storage if exit called
     300        } // heapAppStop
     301} // extern "C"
     302#endif // __CFA_DEBUG__
     303
     304
     305#ifdef __STATISTICS__
    310306static HeapStatistics stats;                                                    // zero filled
    311307static unsigned int sbrk_calls;
     
    387383
    388384
     385// statically allocated variables => zero filled.
     386static size_t heapExpand;                                                               // sbrk advance
     387static size_t mmapStart;                                                                // cross over point for mmap
     388static unsigned int maxBucketsUsed;                                             // maximum number of buckets in use
     389// extern visibility, used by runtime kernel
     390// would be cool to remove libcfa_public but it's needed for libcfathread
     391libcfa_public size_t __page_size;                                                       // architecture pagesize
     392libcfa_public int __map_prot;                                                           // common mmap/mprotect protection
     393
     394
    389395// thunk problem
    390396size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) {
     
    490496        } else {
    491497                fakeHeader( header, alignment );
    492                 if ( unlikely( MmappedBit( header ) ) ) {
    493                         assert( addr < heapBegin || heapEnd < addr );
     498                if ( unlikely( MmappedBit( header ) ) ) {               // mmapped ?
     499                        verify( addr < heapBegin || heapEnd < addr );
    494500                        size = ClearStickyBits( header->kind.real.blockSize ); // mmap size
    495501                        return true;
     
    503509        checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -)
    504510
    505         if ( freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) {
    506                 abort( "Attempt to %s storage %p with corrupted header.\n"
    507                            "Possible cause is duplicate free on same block or overwriting of header information.",
    508                            name, addr );
    509         } // if
     511        Heap * homeManager;
     512        if ( unlikely( freeHead == 0p || // freed and only free-list node => null link
     513                                   // freed and link points at another free block not to a bucket in the bucket array.
     514                                   freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) {
     515                abort( "**** Error **** attempt to %s storage %p with corrupted header.\n"
     516                           "Possible cause is duplicate free on same block or overwriting of header information.",
     517                           name, addr );
     518        } // if
    510519        #endif // __CFA_DEBUG__
    511520
     
    560569                sbrk_storage += increase;
    561570                #endif // __STATISTICS__
     571
    562572                #ifdef __CFA_DEBUG__
    563573                // Set new memory to garbage so subsequent uninitialized usages might fail.
     
    565575                //Memset( (char *)heapEnd + heapRemaining, increase );
    566576                #endif // __CFA_DEBUG__
     577
    567578                rem = heapRemaining + increase - size;
    568579        } // if
     
    651662        __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST );
    652663        if ( traceHeap() ) {
    653                 enum { BufferSize = 64 };
    654                 char helpText[BufferSize];
    655                 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize );
    656                 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
     664                char helpText[64];
     665                __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
     666                                                                        "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug
    657667        } // if
    658668        #endif // __CFA_DEBUG__
     
    711721        if ( traceHeap() ) {
    712722                char helpText[64];
    713                 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );
    714                 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug
     723                __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText),
     724                                                                        "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug
    715725        } // if
    716726        #endif // __CFA_DEBUG__
     
    718728
    719729
    720 size_t prtFree( Heap & manager ) with( manager ) {
     730static size_t prtFree( Heap & manager ) with( manager ) {
    721731        size_t total = 0;
    722732        #ifdef __STATISTICS__
     
    870880        // Allocates size bytes and returns a pointer to the allocated memory.  The contents are undefined. If size is 0,
    871881        // then malloc() returns a unique pointer value that can later be successfully passed to free().
    872         void * malloc( size_t size ) {
     882        void * malloc( size_t size ) libcfa_public {
    873883                #ifdef __STATISTICS__
    874884                if ( likely( size > 0 ) ) {
     
    885895
    886896        // Same as malloc() except size bytes is an array of dim elements each of elemSize bytes.
    887         void * aalloc( size_t dim, size_t elemSize ) {
     897        void * aalloc( size_t dim, size_t elemSize ) libcfa_public {
    888898                size_t size = dim * elemSize;
    889899                #ifdef __STATISTICS__
     
    901911
    902912        // Same as aalloc() with memory set to zero.
    903         void * calloc( size_t dim, size_t elemSize ) {
     913        void * calloc( size_t dim, size_t elemSize ) libcfa_public {
    904914                size_t size = dim * elemSize;
    905915          if ( unlikely( size ) == 0 ) {                        // 0 BYTE ALLOCATION RETURNS NULL POINTER
     
    942952        // not 0p, then the call is equivalent to free(oaddr). Unless oaddr is 0p, it must have been returned by an earlier
    943953        // call to malloc(), alloc(), calloc() or realloc(). If the area pointed to was moved, a free(oaddr) is done.
    944         void * resize( void * oaddr, size_t size ) {
     954        void * resize( void * oaddr, size_t size ) libcfa_public {
    945955                // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    946956          if ( unlikely( size == 0 ) ) {                                        // special cases
     
    987997        // Same as resize() but the contents are unchanged in the range from the start of the region up to the minimum of
    988998        // the old and new sizes.
    989         void * realloc( void * oaddr, size_t size ) {
     999        void * realloc( void * oaddr, size_t size ) libcfa_public {
    9901000                // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    9911001          if ( unlikely( size == 0 ) ) {                                        // special cases
     
    10511061
    10521062        // Same as realloc() except the new allocation size is large enough for an array of nelem elements of size elsize.
    1053         void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) {
     1063        void * reallocarray( void * oaddr, size_t dim, size_t elemSize ) libcfa_public {
    10541064                return realloc( oaddr, dim * elemSize );
    10551065        } // reallocarray
     
    10571067
    10581068        // Same as malloc() except the memory address is a multiple of alignment, which must be a power of two. (obsolete)
    1059         void * memalign( size_t alignment, size_t size ) {
     1069        void * memalign( size_t alignment, size_t size ) libcfa_public {
    10601070                #ifdef __STATISTICS__
    10611071                if ( likely( size > 0 ) ) {
     
    10721082
    10731083        // Same as aalloc() with memory alignment.
    1074         void * amemalign( size_t alignment, size_t dim, size_t elemSize ) {
     1084        void * amemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
    10751085                size_t size = dim * elemSize;
    10761086                #ifdef __STATISTICS__
     
    10881098
    10891099        // Same as calloc() with memory alignment.
    1090         void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) {
     1100        void * cmemalign( size_t alignment, size_t dim, size_t elemSize ) libcfa_public {
    10911101                size_t size = dim * elemSize;
    10921102          if ( unlikely( size ) == 0 ) {                                        // 0 BYTE ALLOCATION RETURNS NULL POINTER
     
    11271137        // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple
    11281138        // of alignment. This requirement is universally ignored.
    1129         void * aligned_alloc( size_t alignment, size_t size ) {
     1139        void * aligned_alloc( size_t alignment, size_t size ) libcfa_public {
    11301140                return memalign( alignment, size );
    11311141        } // aligned_alloc
     
    11361146        // is 0, then posix_memalign() returns either 0p, or a unique pointer value that can later be successfully passed to
    11371147        // free(3).
    1138         int posix_memalign( void ** memptr, size_t alignment, size_t size ) {
     1148        int posix_memalign( void ** memptr, size_t alignment, size_t size ) libcfa_public {
    11391149          if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment
    11401150                *memptr = memalign( alignment, size );
     
    11451155        // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the
    11461156        // page size.  It is equivalent to memalign(sysconf(_SC_PAGESIZE),size).
    1147         void * valloc( size_t size ) {
     1157        void * valloc( size_t size ) libcfa_public {
    11481158                return memalign( __page_size, size );
    11491159        } // valloc
     
    11511161
    11521162        // Same as valloc but rounds size to multiple of page size.
    1153         void * pvalloc( size_t size ) {
     1163        void * pvalloc( size_t size ) libcfa_public {
    11541164                return memalign( __page_size, ceiling2( size, __page_size ) ); // round size to multiple of page size
    11551165        } // pvalloc
     
    11591169        // or realloc().  Otherwise, or if free(ptr) has already been called before, undefined behaviour occurs. If ptr is
    11601170        // 0p, no operation is performed.
    1161         void free( void * addr ) {
     1171        void free( void * addr ) libcfa_public {
    11621172          if ( unlikely( addr == 0p ) ) {                                       // special case
    11631173                        #ifdef __STATISTICS__
     
    11801190
    11811191        // Returns the alignment of an allocation.
    1182         size_t malloc_alignment( void * addr ) {
     1192        size_t malloc_alignment( void * addr ) libcfa_public {
    11831193          if ( unlikely( addr == 0p ) ) return libAlign();      // minimum alignment
    11841194                Heap.Storage.Header * header = HeaderAddr( addr );
     
    11921202
    11931203        // Returns true if the allocation is zero filled, e.g., allocated by calloc().
    1194         bool malloc_zero_fill( void * addr ) {
     1204        bool malloc_zero_fill( void * addr ) libcfa_public {
    11951205          if ( unlikely( addr == 0p ) ) return false;           // null allocation is not zero fill
    11961206                Heap.Storage.Header * header = HeaderAddr( addr );
     
    12031213
    12041214        // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T).
    1205         size_t malloc_size( void * addr ) {
     1215        size_t malloc_size( void * addr ) libcfa_public {
    12061216          if ( unlikely( addr == 0p ) ) return 0;                       // null allocation has zero size
    12071217                Heap.Storage.Header * header = HeaderAddr( addr );
     
    12151225        // Returns the number of usable bytes in the block pointed to by ptr, a pointer to a block of memory allocated by
    12161226        // malloc or a related function.
    1217         size_t malloc_usable_size( void * addr ) {
     1227        size_t malloc_usable_size( void * addr ) libcfa_public {
    12181228          if ( unlikely( addr == 0p ) ) return 0;                       // null allocation has 0 size
    12191229                Heap.Storage.Header * header;
     
    12271237
    12281238        // Prints (on default standard error) statistics about memory allocated by malloc and related functions.
    1229         void malloc_stats( void ) {
     1239        void malloc_stats( void ) libcfa_public {
    12301240                #ifdef __STATISTICS__
    12311241                printStats();
     
    12361246
    12371247        // Changes the file descriptor where malloc_stats() writes statistics.
    1238         int malloc_stats_fd( int fd __attribute__(( unused )) ) {
     1248        int malloc_stats_fd( int fd __attribute__(( unused )) ) libcfa_public {
    12391249                #ifdef __STATISTICS__
    12401250                int temp = stats_fd;
     
    12501260        // The string is printed on the file stream stream.  The exported string includes information about all arenas (see
    12511261        // malloc).
    1252         int malloc_info( int options, FILE * stream __attribute__(( unused )) ) {
     1262        int malloc_info( int options, FILE * stream __attribute__(( unused )) ) libcfa_public {
    12531263          if ( options != 0 ) { errno = EINVAL; return -1; }
    12541264                #ifdef __STATISTICS__
     
    12621272        // Adjusts parameters that control the behaviour of the memory-allocation functions (see malloc). The param argument
    12631273        // specifies the parameter to be modified, and value specifies the new value for that parameter.
    1264         int mallopt( int option, int value ) {
     1274        int mallopt( int option, int value ) libcfa_public {
    12651275          if ( value < 0 ) return 0;
    12661276                choose( option ) {
     
    12761286
    12771287        // Attempt to release free memory at the top of the heap (by calling sbrk with a suitable argument).
    1278         int malloc_trim( size_t ) {
     1288        int malloc_trim( size_t ) libcfa_public {
    12791289                return 0;                                                                               // => impossible to release memory
    12801290        } // malloc_trim
     
    12851295        // structure dynamically allocated via malloc, and a pointer to that data structure is returned as the function
    12861296        // result.  (The caller must free this memory.)
    1287         void * malloc_get_state( void ) {
     1297        void * malloc_get_state( void ) libcfa_public {
    12881298                return 0p;                                                                              // unsupported
    12891299        } // malloc_get_state
     
    12921302        // Restores the state of all malloc internal bookkeeping variables to the values recorded in the opaque data
    12931303        // structure pointed to by state.
    1294         int malloc_set_state( void * ) {
     1304        int malloc_set_state( void * ) libcfa_public {
    12951305                return 0;                                                                               // unsupported
    12961306        } // malloc_set_state
     
    12981308
    12991309        // Sets the amount (bytes) to extend the heap when there is insufficent free storage to service an allocation.
    1300         __attribute__((weak)) size_t malloc_expansion() { return __CFA_DEFAULT_HEAP_EXPANSION__; }
     1310        __attribute__((weak)) size_t malloc_expansion() libcfa_public { return __CFA_DEFAULT_HEAP_EXPANSION__; }
    13011311
    13021312        // Sets the crossover point between allocations occuring in the sbrk area or separately mmapped.
    1303         __attribute__((weak)) size_t malloc_mmap_start() { return __CFA_DEFAULT_MMAP_START__; }
     1313        __attribute__((weak)) size_t malloc_mmap_start() libcfa_public { return __CFA_DEFAULT_MMAP_START__; }
    13041314
    13051315        // Amount subtracted to adjust for unfreed program storage (debug only).
    1306         __attribute__((weak)) size_t malloc_unfreed() { return __CFA_DEFAULT_HEAP_UNFREED__; }
     1316        __attribute__((weak)) size_t malloc_unfreed() libcfa_public { return __CFA_DEFAULT_HEAP_UNFREED__; }
    13071317} // extern "C"
    13081318
    13091319
    13101320// Must have CFA linkage to overload with C linkage realloc.
    1311 void * resize( void * oaddr, size_t nalign, size_t size ) {
     1321void * resize( void * oaddr, size_t nalign, size_t size ) libcfa_public {
    13121322        // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    13131323  if ( unlikely( size == 0 ) ) {                                                // special cases
     
    13711381
    13721382
    1373 void * realloc( void * oaddr, size_t nalign, size_t size ) {
     1383void * realloc( void * oaddr, size_t nalign, size_t size ) libcfa_public {
    13741384        // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.
    13751385  if ( unlikely( size == 0 ) ) {                                                // special cases
  • libcfa/src/interpose.cfa

    r29d8c02 r74ec742  
    3636//=============================================================================================
    3737
    38 void preload_libgcc(void) {
     38static void preload_libgcc(void) {
    3939        dlopen( "libgcc_s.so.1", RTLD_NOW );
    4040        if ( const char * error = dlerror() ) abort( "interpose_symbol : internal error pre-loading libgcc, %s\n", error );
     
    4242
    4343typedef void (* generic_fptr_t)(void);
    44 generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {
     44static generic_fptr_t interpose_symbol( const char symbol[], const char version[] ) {
    4545        const char * error;
    4646
     
    8383//=============================================================================================
    8484
    85 void sigHandler_segv( __CFA_SIGPARMS__ );
    86 void sigHandler_ill ( __CFA_SIGPARMS__ );
    87 void sigHandler_fpe ( __CFA_SIGPARMS__ );
    88 void sigHandler_abrt( __CFA_SIGPARMS__ );
    89 void sigHandler_term( __CFA_SIGPARMS__ );
    90 
    91 struct {
     85static void sigHandler_segv( __CFA_SIGPARMS__ );
     86static void sigHandler_ill ( __CFA_SIGPARMS__ );
     87static void sigHandler_fpe ( __CFA_SIGPARMS__ );
     88static void sigHandler_abrt( __CFA_SIGPARMS__ );
     89static void sigHandler_term( __CFA_SIGPARMS__ );
     90
     91static struct {
    9292        void (* exit)( int ) __attribute__(( __noreturn__ ));
    9393        void (* abort)( void ) __attribute__(( __noreturn__ ));
    9494} __cabi_libc;
    9595
    96 int cfa_main_returned;
     96libcfa_public int cfa_main_returned;
    9797
    9898extern "C" {
     
    148148
    149149// Forward declare abort after the __typeof__ call to avoid ambiguities
    150 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
    151 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));
    152 void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
    153 void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));
     150libcfa_public void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
     151libcfa_public void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ ));
     152libcfa_public void abort( bool signalAbort, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ ));
     153libcfa_public void __abort( bool signalAbort, const char fmt[], va_list args ) __attribute__(( __nothrow__, __leaf__, __noreturn__ ));
    154154
    155155extern "C" {
    156         void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
     156        libcfa_public void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
    157157                abort( false, "%s", "" );
    158158        }
    159159
    160         void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {
     160        libcfa_public void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) {
    161161                va_list argp;
    162162                va_start( argp, fmt );
     
    165165        }
    166166
    167         void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
     167        libcfa_public void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) {
    168168                __cabi_libc.exit( status );
    169169        }
  • libcfa/src/iostream.cfa

    r29d8c02 r74ec742  
    3232#include "bitmanip.hfa"                                                                 // high1
    3333
     34#pragma GCC visibility push(default)
    3435
    3536// *********************************** ostream ***********************************
  • libcfa/src/limits.cfa

    r29d8c02 r74ec742  
    2020#include <complex.h>
    2121#include "limits.hfa"
     22
     23#pragma GCC visibility push(default)
    2224
    2325// Integral Constants
  • libcfa/src/memory.cfa

    r29d8c02 r74ec742  
    1616#include "memory.hfa"
    1717#include "stdlib.hfa"
     18
     19#pragma GCC visibility push(default)
    1820
    1921// Internal data object.
  • libcfa/src/parseargs.cfa

    r29d8c02 r74ec742  
    2424#include "common.hfa"
    2525#include "limits.hfa"
     26
     27#pragma GCC visibility push(default)
    2628
    2729extern int cfa_args_argc __attribute__((weak));
     
    208210        }
    209211
     212        if(strcmp(arg, "Y") == 0) {
     213                value = true;
     214                return true;
     215        }
     216
     217        if(strcmp(arg, "y") == 0) {
     218                value = true;
     219                return true;
     220        }
     221
    210222        if(strcmp(arg, "no") == 0) {
     223                value = false;
     224                return true;
     225        }
     226
     227        if(strcmp(arg, "N") == 0) {
     228                value = false;
     229                return true;
     230        }
     231
     232        if(strcmp(arg, "n") == 0) {
    211233                value = false;
    212234                return true;
  • libcfa/src/parseconfig.cfa

    r29d8c02 r74ec742  
    1414
    1515
     16#pragma GCC visibility push(default)
     17
    1618// *********************************** exceptions ***********************************
    1719
    1820
    1921// TODO: Add names of missing config entries to exception (see further below)
    20 static vtable(Missing_Config_Entries) Missing_Config_Entries_vt;
     22vtable(Missing_Config_Entries) Missing_Config_Entries_vt;
    2123
    2224[ void ] ?{}( & Missing_Config_Entries this, unsigned int num_missing ) {
     
    3133
    3234
    33 static vtable(Parse_Failure) Parse_Failure_vt;
     35vtable(Parse_Failure) Parse_Failure_vt;
    3436
    3537[ void ] ?{}( & Parse_Failure this, [] char failed_key, [] char failed_value ) {
     
    5355
    5456
    55 static vtable(Validation_Failure) Validation_Failure_vt;
     57vtable(Validation_Failure) Validation_Failure_vt;
    5658
    5759[ void ] ?{}( & Validation_Failure this, [] char failed_key, [] char failed_value ) {
     
    110112
    111113
    112 [ bool ] comments( & ifstream in, [] char name ) {
     114static [ bool ] comments( & ifstream in, [] char name ) {
    113115        while () {
    114116                in | name;
  • libcfa/src/rational.cfa

    r29d8c02 r74ec742  
    1717#include "fstream.hfa"
    1818#include "stdlib.hfa"
     19
     20#pragma GCC visibility push(default)
    1921
    2022forall( T | Arithmetic( T ) ) {
  • libcfa/src/startup.cfa

    r29d8c02 r74ec742  
    4141        } // __cfaabi_appready_shutdown
    4242
    43         void disable_interrupts() __attribute__(( weak )) {}
    44         void enable_interrupts() __attribute__(( weak )) {}
     43        void disable_interrupts() __attribute__(( weak )) libcfa_public {}
     44        void enable_interrupts() __attribute__(( weak )) libcfa_public {}
    4545
    4646
     
    6464struct __spinlock_t;
    6565extern "C" {
    66         void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) {}
     66        void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {}
    6767}
    6868
  • libcfa/src/stdlib.cfa

    r29d8c02 r74ec742  
    2525#include <complex.h>                                                                    // _Complex_I
    2626#include <assert.h>
     27
     28#pragma GCC visibility push(default)
    2729
    2830//---------------------------------------
     
    225227#define GENERATOR LCG
    226228
    227 uint32_t __global_random_seed;                                                  // sequential/concurrent
    228 uint32_t __global_random_state;                                                 // sequential only
     229// would be cool to make hidden but it's needed for libcfathread
     230__attribute__((visibility("default"))) uint32_t __global_random_seed;                                                   // sequential/concurrent
     231__attribute__((visibility("hidden"))) uint32_t __global_random_state;                                                   // sequential only
    229232
    230233void set_seed( PRNG & prng, uint32_t seed_ ) with( prng ) { state = seed = seed_; GENERATOR( state ); } // set seed
  • libcfa/src/strstream.cfa

    r29d8c02 r74ec742  
    1 // 
     1//
    22// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
    3 // 
     3//
    44// The contents of this file are covered under the licence agreement in the
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // strstream.cfa -- 
    8 // 
     7// strstream.cfa --
     8//
    99// Author           : Peter A. Buhr
    1010// Created On       : Thu Apr 22 22:24:35 2021
     
    1212// Last Modified On : Sun Oct 10 16:13:20 2021
    1313// Update Count     : 101
    14 // 
     14//
    1515
    1616#include "strstream.hfa"
     
    2424#include <unistd.h>                                                                             // sbrk, sysconf
    2525
     26#pragma GCC visibility push(default)
    2627
    2728// *********************************** strstream ***********************************
  • libcfa/src/time.cfa

    r29d8c02 r74ec742  
    1818#include <stdio.h>                                                                              // snprintf
    1919#include <assert.h>
     20
     21#pragma GCC visibility push(default)
    2022
    2123static char * nanomsd( long int ns, char * buf ) {              // most significant digits
  • libcfa/src/virtual.c

    r29d8c02 r74ec742  
    1616#include "virtual.h"
    1717#include "assert.h"
     18
     19#pragma GCC visibility push(default)
    1820
    1921int __cfavir_is_parent(
Note: See TracChangeset for help on using the changeset viewer.