Changeset c28ea4e for libcfa


Ignore:
Timestamp:
Nov 4, 2020, 2:56:30 PM (3 years ago)
Author:
Colby Alexander Parsons <caparsons@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
eeb5023
Parents:
4b30e8cc (diff), a3f5208a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa
Files:
1 added
22 edited

Legend:

Unmodified
Added
Removed
  • libcfa/prelude/builtins.c

    r4b30e8cc rc28ea4e  
    99// Author           : Peter A. Buhr
    1010// Created On       : Fri Jul 21 16:21:03 2017
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Oct  9 18:26:19 2020
    13 // Update Count     : 110
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Tue Oct 27 14:42:00 2020
     13// Update Count     : 111
    1414//
     15
     16#define __cforall_builtins__
    1517
    1618// type that wraps a pointer and a destructor-like function - used in generating implicit destructor calls for struct members in user-defined functions
  • libcfa/src/concurrency/clib/cfathread.cfa

    r4b30e8cc rc28ea4e  
    5959        void cfathread_setproccnt( int ncnt ) {
    6060                assert( ncnt >= 1 );
    61                 adelete(proc_cnt, procs);
     61                adelete( procs );
    6262
    6363                proc_cnt = ncnt - 1;
  • libcfa/src/concurrency/coroutine.cfa

    r4b30e8cc rc28ea4e  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue May 26 22:06:09 2020
    13 // Update Count     : 21
     12// Last Modified On : Fri Oct 23 23:05:24 2020
     13// Update Count     : 22
    1414//
    1515
     
    2424#include <unistd.h>
    2525#include <sys/mman.h>                                                                   // mprotect
    26 extern "C" {
    27 // use this define to make unwind.h play nice, definitely a hack
    28 #define HIDE_EXPORTS
    2926#include <unwind.h>
    30 #undef HIDE_EXPORTS
    31 }
    3227
    3328#include "kernel_private.hfa"
     29#include "exception.hfa"
    3430
    3531#define __CFA_INVOKE_PRIVATE__
     
    4945FORALL_DATA_INSTANCE(CoroutineCancelled, (dtype coroutine_t), (coroutine_t))
    5046
    51 struct __cfaehm_node {
    52         struct _Unwind_Exception unwind_exception;
    53         struct __cfaehm_node * next;
    54         int handler_index;
    55 };
    56 
    5747forall(dtype T)
    5848void mark_exception(CoroutineCancelled(T) *) {}
     
    6050forall(dtype T)
    6151void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {
     52        dst->virtual_table = src->virtual_table;
    6253        dst->the_coroutine = src->the_coroutine;
    6354        dst->the_exception = src->the_exception;
     
    7465        verify( desc->cancellation );
    7566        desc->state = Cancelled;
    76         exception_t * except = (exception_t *)(1 + (__cfaehm_node *)desc->cancellation);
     67        exception_t * except = __cfaehm_cancellation_exception( desc->cancellation );
    7768
    7869        // TODO: Remove explitate vtable set once trac#186 is fixed.
     
    9283
    9384// minimum feasible stack size in bytes
    94 #define MinStackSize 1000
     85static const size_t MinStackSize = 1000;
    9586extern size_t __page_size;                              // architecture pagesize HACK, should go in proper runtime singleton
    9687
     
    217208                size = libFloor(create_size - stack_data_size - diff, libAlign());
    218209        } // if
    219         assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
     210        assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize );
    220211
    221212        this->storage = (__stack_t *)((intptr_t)storage + size);
  • libcfa/src/concurrency/exception.cfa

    r4b30e8cc rc28ea4e  
    1010// Created On       : Mon Aug 17 10:41:00 2020
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue Aug 25 14:41:00 2020
    13 // Update Count     : 0
     12// Last Modified On : Wed Oct 28 14:34:00 2020
     13// Update Count     : 1
    1414//
    1515
    16 extern "C" {
    17 // use this define to make unwind.h play nice, definitely a hack
    18 #define HIDE_EXPORTS
    19 #include <unwind.h>
    20 #undef HIDE_EXPORTS
    21 }
     16#define __cforall_thread__
    2217
    23 #include "invoke.h"
    2418#include "exception.hfa"
     19
    2520#include "coroutine.hfa"
    2621
    2722extern struct $thread * mainThread;
     23extern "C" {
     24extern void __cfactx_thrd_leave();
     25}
    2826
    2927// Common pattern for all the stop functions, wait until the end then act.
     
    5250
    5351STOP_AT_END_FUNCTION(thread_cancelstop,
    54         // TODO: Instead pass information to the joiner.
    55         abort();
     52        __cfactx_thrd_leave();
     53        __cabi_abort( "Resumed cancelled thread" );
    5654)
    5755
     
    8583                stop_param = (void *)0x22;
    8684        } else {
     85                this_thread->self_cor.cancellation = unwind_exception;
     86
    8787                stop_func = thread_cancelstop;
    8888                stop_param = this_thread;
  • libcfa/src/concurrency/exception.hfa

    r4b30e8cc rc28ea4e  
    1616#pragma once
    1717
     18// This is an internal bridge between the two modes and must be C compatable.
     19
     20#include <unwind.h>
    1821#include "bits/defs.hfa"
    1922#include "invoke.h"
     23#include "exception.h"
    2024
    2125#ifdef __cforall
    2226extern "C" {
    23 
    24 #define HIDE_EXPORTS
    2527#endif
    26 #include "unwind.h"
    2728
    2829struct exception_context_t * this_exception_context(void) OPTIONAL_THREAD;
     
    3233
    3334#ifdef __cforall
    34 #undef HIDE_EXPORTS
    3535}
    3636#endif
  • libcfa/src/concurrency/invoke.h

    r4b30e8cc rc28ea4e  
    157157
    158158                // current execution status for coroutine
     159                // Possible values are:
     160                //    - TICKET_BLOCKED (-1) thread is blocked
     161                //    - TICKET_RUNNING ( 0) thread is running
     162                //    - TICKET_UNBLOCK ( 1) thread should ignore next block
    159163                volatile int ticket;
    160164                enum __Coroutine_State state:8;
  • libcfa/src/concurrency/io/call.cfa.in

    r4b30e8cc rc28ea4e  
    4747        #include "kernel/fwd.hfa"
    4848
    49         #if defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_IO_DRAIN) && defined(CFA_HAVE_IOSQE_ASYNC)
    50                 #define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_DRAIN | IOSQE_ASYNC)
    51         #elif defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_ASYNC)
    52                 #define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_ASYNC)
    53         #elif defined(CFA_HAVE_IOSQE_FIXED_FILE) && defined(CFA_HAVE_IOSQE_IO_DRAIN)
    54                 #define REGULAR_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_DRAIN)
    55         #elif defined(CFA_HAVE_IOSQE_IO_DRAIN) && defined(CFA_HAVE_IOSQE_ASYNC)
    56                 #define REGULAR_FLAGS (IOSQE_IO_DRAIN | IOSQE_ASYNC)
    57         #elif defined(CFA_HAVE_IOSQE_FIXED_FILE)
    58                 #define REGULAR_FLAGS (IOSQE_FIXED_FILE)
    59         #elif defined(CFA_HAVE_IOSQE_IO_DRAIN)
    60                 #define REGULAR_FLAGS (IOSQE_IO_DRAIN)
    61         #elif defined(CFA_HAVE_IOSQE_ASYNC)
    62                 #define REGULAR_FLAGS (IOSQE_ASYNC)
    63         #else
    64                 #define REGULAR_FLAGS (0)
    65         #endif
    66 
    67         #if defined(CFA_HAVE_IOSQE_IO_LINK) && defined(CFA_HAVE_IOSQE_IO_HARDLINK)
    68                 #define LINK_FLAGS (IOSQE_IO_LINK | IOSQE_IO_HARDLINK)
    69         #elif defined(CFA_HAVE_IOSQE_IO_LINK)
    70                 #define LINK_FLAGS (IOSQE_IO_LINK)
    71         #elif defined(CFA_HAVE_IOSQE_IO_HARDLINK)
    72                 #define LINK_FLAGS (IOSQE_IO_HARDLINK)
    73         #else
    74                 #define LINK_FLAGS (0)
    75         #endif
    76 
    77         #if defined(CFA_HAVE_SPLICE_F_FD_IN_FIXED)
    78                 #define SPLICE_FLAGS (SPLICE_F_FD_IN_FIXED)
    79         #else
    80                 #define SPLICE_FLAGS (0)
    81         #endif
     49        static const __u8 REGULAR_FLAGS = 0
     50                #if defined(CFA_HAVE_IOSQE_FIXED_FILE)
     51                        | IOSQE_FIXED_FILE
     52                #endif
     53                #if defined(CFA_HAVE_IOSQE_IO_DRAIN)
     54                        | IOSQE_IO_DRAIN
     55                #endif
     56                #if defined(CFA_HAVE_IOSQE_ASYNC)
     57                        | IOSQE_ASYNC
     58                #endif
     59        ;
     60
     61        static const __u32 LINK_FLAGS = 0
     62                #if defined(CFA_HAVE_IOSQE_IO_LINK)
     63                        | IOSQE_IO_LINK
     64                #endif
     65                #if defined(CFA_HAVE_IOSQE_IO_HARDLINK)
     66                        | IOSQE_IO_HARDLINK
     67                #endif
     68        ;
     69
     70        static const __u32 SPLICE_FLAGS = 0
     71                #if defined(CFA_HAVE_SPLICE_F_FD_IN_FIXED)
     72                        | SPLICE_F_FD_IN_FIXED
     73                #endif
     74        ;
    8275
    8376        extern [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data );
  • libcfa/src/concurrency/io/setup.cfa

    r4b30e8cc rc28ea4e  
    149149                id.full_proc = false;
    150150                id.id = doregister(&id);
     151                kernelTLS.this_proc_id = &id;
    151152                __cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" );
    152153
     
    180181                                        kernelTLS.this_stats = io_ctx->self.curr_cluster->stats;
    181182                                #endif
    182                                 __post( io_ctx->sem, &id );
     183                                post( io_ctx->sem );
    183184                        }
    184185                }
     
    235236                        if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) {
    236237
    237                                 ready_schedule_lock( (struct __processor_id_t *)active_processor() );
     238                                ready_schedule_lock();
    238239
    239240                                        // This is the tricky case
     
    250251                                        // Fixup the thread state
    251252                                        thrd.state = Blocked;
    252                                         thrd.ticket = 0;
     253                                        thrd.ticket = TICKET_BLOCKED;
    253254                                        thrd.preempted = __NO_PREEMPTION;
    254255
    255                                 ready_schedule_unlock( (struct __processor_id_t *)active_processor() );
     256                                ready_schedule_unlock();
    256257
    257258                                // Pretend like the thread was blocked all along
     
    275276                        }
    276277                } else {
    277                         unpark( &thrd );
     278                        post( this.thrd.sem );
    278279                }
    279280
  • libcfa/src/concurrency/kernel.cfa

    r4b30e8cc rc28ea4e  
    108108static $thread * __next_thread_slow(cluster * this);
    109109static void __run_thread(processor * this, $thread * dst);
    110 static void __wake_one(struct __processor_id_t * id, cluster * cltr);
     110static void __wake_one(cluster * cltr);
    111111
    112112static void push  (__cluster_idles & idles, processor & proc);
     
    252252                /* paranoid */ verify( kernelTLS.this_thread == thrd_dst );
    253253                /* paranoid */ verify( thrd_dst->context.SP );
     254                /* paranoid */ verify( thrd_dst->state != Halted );
    254255                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
    255256                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
     
    281282                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    282283                        // The thread was preempted, reschedule it and reset the flag
    283                         __schedule_thread( (__processor_id_t*)this, thrd_dst );
     284                        __schedule_thread( thrd_dst );
    284285                        break RUNNING;
    285286                }
     
    287288                if(unlikely(thrd_dst->state == Halted)) {
    288289                        // The thread has halted, it should never be scheduled/run again
    289                         // We may need to wake someone up here since
    290                         unpark( this->destroyer );
    291                         this->destroyer = 0p;
     290                        // finish the thread
     291                        __thread_finish( thrd_dst );
    292292                        break RUNNING;
    293293                }
     
    299299                int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
    300300                switch(old_ticket) {
    301                         case 1:
     301                        case TICKET_RUNNING:
    302302                                // This is case 1, the regular case, nothing more is needed
    303303                                break RUNNING;
    304                         case 2:
     304                        case TICKET_UNBLOCK:
    305305                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
    306306                                // In this case, just run it again.
     
    358358// Scheduler routines
    359359// KERNEL ONLY
    360 void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) {
     360void __schedule_thread( $thread * thrd ) {
    361361        /* paranoid */ verify( thrd );
    362362        /* paranoid */ verify( thrd->state != Halted );
    363363        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     364        /* paranoid */ verify( kernelTLS.this_proc_id );
    364365        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
    365366        /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     
    374375        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
    375376
    376         ready_schedule_lock  ( id );
     377        ready_schedule_lock();
    377378                push( thrd->curr_cluster, thrd );
    378                 __wake_one(id, thrd->curr_cluster);
    379         ready_schedule_unlock( id );
     379                __wake_one(thrd->curr_cluster);
     380        ready_schedule_unlock();
    380381
    381382        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    384385// KERNEL ONLY
    385386static inline $thread * __next_thread(cluster * this) with( *this ) {
    386         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    387 
    388         ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
     387        /* paranoid */ verify( kernelTLS.this_proc_id );
     388        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     389
     390        ready_schedule_lock();
    389391                $thread * thrd = pop( this );
    390         ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
    391 
    392         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     392        ready_schedule_unlock();
     393
     394        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     395        /* paranoid */ verify( kernelTLS.this_proc_id );
    393396        return thrd;
    394397}
     
    396399// KERNEL ONLY
    397400static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
    398         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    399 
    400         ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
     401        /* paranoid */ verify( kernelTLS.this_proc_id );
     402        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     403
     404        ready_schedule_lock();
    401405                $thread * thrd = pop_slow( this );
    402         ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
    403 
    404         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     406        ready_schedule_unlock();
     407
     408        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     409        /* paranoid */ verify( kernelTLS.this_proc_id );
    405410        return thrd;
    406411}
    407412
    408 // KERNEL ONLY unpark with out disabling interrupts
    409 void __unpark(  struct __processor_id_t * id, $thread * thrd ) {
     413void unpark( $thread * thrd ) {
     414        if( !thrd ) return;
     415
     416        /* paranoid */ verify( kernelTLS.this_proc_id );
     417        bool full = kernelTLS.this_proc_id->full_proc;
     418        if(full) disable_interrupts();
     419
     420        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    410421        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    411422        switch(old_ticket) {
    412                 case 1:
     423                case TICKET_RUNNING:
    413424                        // Wake won the race, the thread will reschedule/rerun itself
    414425                        break;
    415                 case 0:
     426                case TICKET_BLOCKED:
    416427                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
    417428                        /* paranoid */ verify( thrd->state == Blocked );
    418429
    419430                        // Wake lost the race,
    420                         __schedule_thread( id, thrd );
     431                        __schedule_thread( thrd );
    421432                        break;
    422433                default:
    423434                        // This makes no sense, something is wrong abort
    424                         abort();
    425         }
    426 }
    427 
    428 void unpark( $thread * thrd ) {
    429         if( !thrd ) return;
    430 
    431         disable_interrupts();
    432         __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd );
    433         enable_interrupts( __cfaabi_dbg_ctx );
     435                        abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
     436        }
     437        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     438
     439        if(full) enable_interrupts( __cfaabi_dbg_ctx );
     440        /* paranoid */ verify( kernelTLS.this_proc_id );
    434441}
    435442
     
    448455}
    449456
    450 // KERNEL ONLY
    451 void __leave_thread() {
    452         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    453         returnToKernel();
    454         abort();
     457extern "C" {
     458        // Leave the thread monitor
     459        // last routine called by a thread.
     460        // Should never return
     461        void __cfactx_thrd_leave() {
     462                $thread * thrd = TL_GET( this_thread );
     463                $monitor * this = &thrd->self_mon;
     464
     465                // Lock the monitor now
     466                lock( this->lock __cfaabi_dbg_ctx2 );
     467
     468                disable_interrupts();
     469
     470                thrd->state = Halted;
     471                if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
     472                if( thrd != this->owner || this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
     473
     474                // Leave the thread
     475                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     476                returnToKernel();
     477                abort();
     478
     479                // Control flow should never reach here!
     480        }
    455481}
    456482
     
    486512//=============================================================================================
    487513// Wake a thread from the front if there are any
    488 static void __wake_one(struct __processor_id_t * id, cluster * this) {
    489         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    490         /* paranoid */ verify( ready_schedule_islocked( id ) );
     514static void __wake_one(cluster * this) {
     515        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     516        /* paranoid */ verify( ready_schedule_islocked() );
    491517
    492518        // Check if there is a sleeping processor
     
    506532        #endif
    507533
    508         /* paranoid */ verify( ready_schedule_islocked( id ) );
     534        /* paranoid */ verify( ready_schedule_islocked() );
    509535        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    510536
     
    709735                this.print_halts = true;
    710736        }
     737
     738        void print_stats_now( cluster & this, int flags ) {
     739                __print_stats( this.stats, this.print_stats, true, this.name, (void*)&this );
     740        }
    711741#endif
    712742// Local Variables: //
  • libcfa/src/concurrency/kernel.hfa

    r4b30e8cc rc28ea4e  
    7979        // Handle to pthreads
    8080        pthread_t kernel_thread;
    81 
    82         // RunThread data
    83         // Action to do after a thread is ran
    84         $thread * destroyer;
    8581
    8682        // Preemption data
  • libcfa/src/concurrency/kernel/fwd.hfa

    r4b30e8cc rc28ea4e  
    3535        extern "Cforall" {
    3636                extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
    37                         struct $thread    * volatile this_thread;
    38                         struct processor  * volatile this_processor;
    39                         struct __stats_t  * volatile this_stats;
     37                        struct $thread          * volatile this_thread;
     38                        struct processor        * volatile this_processor;
     39                        struct __processor_id_t * volatile this_proc_id;
     40                        struct __stats_t        * volatile this_stats;
    4041
    4142                        struct {
  • libcfa/src/concurrency/kernel/startup.cfa

    r4b30e8cc rc28ea4e  
    122122        NULL,
    123123        NULL,
     124        NULL,
    124125        { 1, false, false },
    125126};
     
    212213        //initialize the global state variables
    213214        kernelTLS.this_processor = mainProcessor;
     215        kernelTLS.this_proc_id   = (__processor_id_t*)mainProcessor;
    214216        kernelTLS.this_thread    = mainThread;
    215217
     
    227229        // Add the main thread to the ready queue
    228230        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    229         __schedule_thread((__processor_id_t *)mainProcessor, mainThread);
     231        __schedule_thread(mainThread);
    230232
    231233        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
     
    324326        processor * proc = (processor *) arg;
    325327        kernelTLS.this_processor = proc;
     328        kernelTLS.this_proc_id   = (__processor_id_t*)proc;
    326329        kernelTLS.this_thread    = 0p;
    327330        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
     
    441444
    442445static void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
    443         ticket = 1;
     446        ticket = TICKET_RUNNING;
    444447        state = Start;
    445448        self_cor{ info };
     
    474477        this.cltr = &_cltr;
    475478        full_proc = true;
    476         destroyer = 0p;
    477479        do_terminate = false;
    478480        preemption_alarm = 0p;
  • libcfa/src/concurrency/kernel_private.hfa

    r4b30e8cc rc28ea4e  
    3333}
    3434
    35 void __schedule_thread( struct __processor_id_t *, $thread * )
     35void __schedule_thread( $thread * )
    3636#if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
    37         __attribute__((nonnull (2)))
     37        __attribute__((nonnull (1)))
    3838#endif
    3939;
    4040
    41 //Block current thread and release/wake-up the following resources
    42 void __leave_thread() __attribute__((noreturn));
     41//release/wake-up the following resources
     42void __thread_finish( $thread * thrd );
    4343
    4444//-----------------------------------------------------------------------------
     
    6363)
    6464
    65 // KERNEL ONLY unpark with out disabling interrupts
    66 void __unpark( struct __processor_id_t *, $thread * thrd );
    67 
    68 static inline bool __post(single_sem & this, struct __processor_id_t * id) {
    69         for() {
    70                 struct $thread * expected = this.ptr;
    71                 if(expected == 1p) return false;
    72                 if(expected == 0p) {
    73                         if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    74                                 return false;
    75                         }
    76                 }
    77                 else {
    78                         if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    79                                 __unpark( id, expected );
    80                                 return true;
    81                         }
    82                 }
    83         }
    84 }
     65#define TICKET_BLOCKED (-1) // thread is blocked
     66#define TICKET_RUNNING ( 0) // thread is running
     67#define TICKET_UNBLOCK ( 1) // thread should ignore next block
    8568
    8669//-----------------------------------------------------------------------------
     
    197180// Reader side : acquire when using the ready queue to schedule but not
    198181//  creating/destroying queues
    199 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) {
    200         unsigned iproc = proc->id;
    201         /*paranoid*/ verify(data[iproc].handle == proc);
     182static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
     183        /*paranoid*/ verify( kernelTLS.this_proc_id );
     184
     185        unsigned iproc = kernelTLS.this_proc_id->id;
     186        /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
    202187        /*paranoid*/ verify(iproc < ready);
    203188
     
    221206}
    222207
    223 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) {
    224         unsigned iproc = proc->id;
    225         /*paranoid*/ verify(data[iproc].handle == proc);
     208static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
     209        /*paranoid*/ verify( kernelTLS.this_proc_id );
     210
     211        unsigned iproc = kernelTLS.this_proc_id->id;
     212        /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
    226213        /*paranoid*/ verify(iproc < ready);
    227214        /*paranoid*/ verify(data[iproc].lock);
     
    235222
    236223#ifdef __CFA_WITH_VERIFY__
    237         static inline bool ready_schedule_islocked( struct __processor_id_t * proc) {
     224        static inline bool ready_schedule_islocked(void) {
     225                /*paranoid*/ verify( kernelTLS.this_proc_id );
     226                __processor_id_t * proc = kernelTLS.this_proc_id;
    238227                return __scheduler_lock->data[proc->id].owned;
    239228        }
  • libcfa/src/concurrency/monitor.cfa

    r4b30e8cc rc28ea4e  
    281281}
    282282
    283 extern "C" {
    284         // Leave the thread monitor
    285         // last routine called by a thread.
    286         // Should never return
    287         void __cfactx_thrd_leave() {
    288                 $thread * thrd = TL_GET( this_thread );
    289                 $monitor * this = &thrd->self_mon;
    290 
    291                 // Lock the monitor now
    292                 lock( this->lock __cfaabi_dbg_ctx2 );
    293 
    294                 disable_interrupts();
    295 
    296                 thrd->state = Halted;
    297 
    298                 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    299 
    300                 // Leaving a recursion level, decrement the counter
    301                 this->recursion -= 1;
    302 
    303                 // If we haven't left the last level of recursion
    304                 // it must mean there is an error
    305                 if( this->recursion != 0) { abort( "Thread internal monitor has unbalanced recursion" ); }
    306 
    307                 // Fetch the next thread, can be null
    308                 $thread * new_owner = next_thread( this );
    309 
    310                 // Release the monitor lock
    311                 unlock( this->lock );
    312 
    313                 // Unpark the next owner if needed
    314                 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    315                 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    316                 /* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
    317                 /* paranoid */ verify( thrd->state == Halted );
    318 
    319                 kernelTLS.this_processor->destroyer = new_owner;
    320 
    321                 // Leave the thread
    322                 __leave_thread();
    323 
    324                 // Control flow should never reach here!
    325         }
    326 }
    327 
    328 // Join a thread
    329 forall( dtype T | is_thread(T) )
    330 T & join( T & this ) {
    331         $monitor *    m = get_monitor(this);
    332         void (*dtor)(T& mutex this) = ^?{};
    333         monitor_dtor_guard_t __guard = { &m, (fptr_t)dtor, true };
    334         {
    335                 return this;
    336         }
     283void __thread_finish( $thread * thrd ) {
     284        $monitor * this = &thrd->self_mon;
     285
     286        // Lock the monitor now
     287        /* paranoid */ verify( this->lock.lock );
     288        /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     289        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     290        /* paranoid */ verify( thrd->state == Halted );
     291        /* paranoid */ verify( this->recursion == 1 );
     292
     293        // Leaving a recursion level, decrement the counter
     294        this->recursion -= 1;
     295        this->owner = 0p;
     296
     297        // Fetch the next thread, can be null
     298        $thread * new_owner = next_thread( this );
     299
     300        // Release the monitor lock
     301        unlock( this->lock );
     302
     303        // Unpark the next owner if needed
     304        /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     305        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     306        /* paranoid */ verify( thrd->state == Halted );
     307        unpark( new_owner );
    337308}
    338309
  • libcfa/src/concurrency/preemption.cfa

    r4b30e8cc rc28ea4e  
    3838// FwdDeclarations : timeout handlers
    3939static void preempt( processor   * this );
    40 static void timeout( struct __processor_id_t * id, $thread * this );
     40static void timeout( $thread * this );
    4141
    4242// FwdDeclarations : Signal handlers
     
    9191
    9292// Tick one frame of the Discrete Event Simulation for alarms
    93 static void tick_preemption( struct __processor_id_t * id ) {
     93static void tick_preemption(void) {
    9494        alarm_node_t * node = 0p;                                                       // Used in the while loop but cannot be declared in the while condition
    9595        alarm_list_t * alarms = &event_kernel->alarms;          // Local copy for ease of reading
     
    109109                }
    110110                else if( node->type == User ) {
    111                         timeout( id, node->thrd );
     111                        timeout( node->thrd );
    112112                }
    113113                else {
    114114                        bool unpark_thd = node->callback(*node);
    115                         if (unpark_thd) timeout( id, node->thrd );
     115                        if (unpark_thd) timeout( node->thrd );
    116116                }
    117117
     
    274274
    275275// reserved for future use
    276 static void timeout( struct __processor_id_t * id, $thread * this ) {
     276static void timeout( $thread * this ) {
    277277        #if !defined( __CFA_NO_STATISTICS__ )
    278278                kernelTLS.this_stats = this->curr_cluster->stats;
    279279        #endif
    280         __unpark( id, this );
     280        unpark( this );
    281281}
    282282
     
    417417        id.full_proc = false;
    418418        id.id = doregister(&id);
     419        kernelTLS.this_proc_id = &id;
    419420
    420421        // Block sigalrms to control when they arrive
     
    462463                        // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" );
    463464                        lock( event_kernel->lock __cfaabi_dbg_ctx2 );
    464                         tick_preemption( &id );
     465                        tick_preemption();
    465466                        unlock( event_kernel->lock );
    466467                        break;
  • libcfa/src/concurrency/snzi.hfa

    r4b30e8cc rc28ea4e  
    3636static inline void depart( __snzi_node_t & );
    3737
    38 #define __snzi_half -1
     38static const int __snzi_half = -1;
    3939
    4040//--------------------------------------------------
  • libcfa/src/concurrency/thread.cfa

    r4b30e8cc rc28ea4e  
    1919
    2020#include "kernel_private.hfa"
     21#include "exception.hfa"
    2122
    2223#define __CFA_INVOKE_PRIVATE__
     
    2829        context{ 0p, 0p };
    2930        self_cor{ name, storage, storageSize };
    30         ticket = 1;
     31        ticket = TICKET_RUNNING;
    3132        state = Start;
    3233        preempted = __NO_PREEMPTION;
     
    5859}
    5960
     61FORALL_DATA_INSTANCE(ThreadCancelled, (dtype thread_t), (thread_t))
     62
     63forall(dtype T)
     64void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src) {
     65        dst->virtual_table = src->virtual_table;
     66        dst->the_thread = src->the_thread;
     67        dst->the_exception = src->the_exception;
     68}
     69
     70forall(dtype T)
     71const char * msg(ThreadCancelled(T) *) {
     72        return "ThreadCancelled";
     73}
     74
     75forall(dtype T)
     76static void default_thread_cancel_handler(ThreadCancelled(T) & ) {
     77        abort( "Unhandled thread cancellation.\n" );
     78}
     79
     80forall(dtype T | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T)))
     81void ?{}( thread_dtor_guard_t & this,
     82                T & thrd, void(*defaultResumptionHandler)(ThreadCancelled(T) &)) {
     83        $monitor * m = get_monitor(thrd);
     84        void (*dtor)(T& mutex this) = ^?{};
     85        bool join = defaultResumptionHandler != (void(*)(ThreadCancelled(T)&))0;
     86        (this.mg){&m, (void(*)())dtor, join};
     87
     88        // After the guard set-up and any wait, check for cancellation.
     89        $thread * desc = get_thread(thrd);
     90        struct _Unwind_Exception * cancellation = desc->self_cor.cancellation;
     91        if ( likely( 0p == cancellation ) ) {
     92                return;
     93        } else if ( Cancelled == desc->state ) {
     94                return;
     95        }
     96        desc->state = Cancelled;
     97        if (!join) {
     98                defaultResumptionHandler = default_thread_cancel_handler;
     99        }
     100
     101        ThreadCancelled(T) except;
     102        // TODO: Remove explitate vtable set once trac#186 is fixed.
     103        except.virtual_table = &get_exception_vtable(&except);
     104        except.the_thread = &thrd;
     105        except.the_exception = __cfaehm_cancellation_exception( cancellation );
     106        throwResume except;
     107
     108        except.the_exception->virtual_table->free( except.the_exception );
     109        free( cancellation );
     110        desc->self_cor.cancellation = 0p;
     111}
     112
     113void ^?{}( thread_dtor_guard_t & this ) {
     114        ^(this.mg){};
     115}
     116
    60117//-----------------------------------------------------------------------------
    61118// Starting and stopping threads
     
    70127        verify( this_thrd->context.SP );
    71128
    72         __schedule_thread( (__processor_id_t *)kernelTLS.this_processor, this_thrd);
     129        __schedule_thread( this_thrd );
    73130        enable_interrupts( __cfaabi_dbg_ctx );
    74131}
     
    93150}
    94151
     152//-----------------------------------------------------------------------------
     153forall(dtype T | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T)))
     154T & join( T & this ) {
     155        thread_dtor_guard_t guard = { this, defaultResumptionHandler };
     156        return this;
     157}
     158
    95159// Local Variables: //
    96160// mode: c //
  • libcfa/src/concurrency/thread.hfa

    r4b30e8cc rc28ea4e  
    2222#include "kernel.hfa"
    2323#include "monitor.hfa"
     24#include "exception.hfa"
    2425
    2526//-----------------------------------------------------------------------------
    2627// thread trait
    2728trait is_thread(dtype T) {
    28       void ^?{}(T& mutex this);
    29       void main(T& this);
    30       $thread* get_thread(T& this);
     29        void ^?{}(T& mutex this);
     30        void main(T& this);
     31        $thread* get_thread(T& this);
    3132};
     33
     34FORALL_DATA_EXCEPTION(ThreadCancelled, (dtype thread_t), (thread_t)) (
     35        thread_t * the_thread;
     36        exception_t * the_exception;
     37);
     38
     39forall(dtype T)
     40void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src);
     41
     42forall(dtype T)
     43const char * msg(ThreadCancelled(T) *);
    3244
    3345// define that satisfies the trait without using the thread keyword
     
    6577static inline void ?{}($thread & this, const char * const name, struct cluster & cl )                   { this{ name, cl, 0p, 65000 }; }
    6678static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }
     79
     80struct thread_dtor_guard_t {
     81        monitor_dtor_guard_t mg;
     82};
     83
     84forall( dtype T | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T)) )
     85void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) );
     86void ^?{}( thread_dtor_guard_t & this );
    6787
    6888//-----------------------------------------------------------------------------
     
    108128//----------
    109129// join
    110 forall( dtype T | is_thread(T) )
     130forall( dtype T | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T)) )
    111131T & join( T & this );
    112132
  • libcfa/src/exception.c

    r4b30e8cc rc28ea4e  
    99// Author           : Andrew Beach
    1010// Created On       : Mon Jun 26 15:13:00 2017
    11 // Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sat Aug 29 15:52:22 2020
    13 // Update Count     : 34
     11// Last Modified By : Andrew Beach
     12// Last Modified On : Tue Oct 27 16:27:00 2020
     13// Update Count     : 35
    1414//
    1515
     
    1717#include <stddef.h> // for size_t
    1818
     19#include <unwind.h> // for struct _Unwind_Exception {...};
     20
    1921#include "exception.h"
    2022
    2123#include <stdlib.h>
    2224#include <stdio.h>
    23 #include <unwind.h>
    2425#include <bits/debug.hfa>
    2526#include "concurrency/invoke.h"
     
    113114
    114115// MEMORY MANAGEMENT =========================================================
    115 
    116 struct __cfaehm_node {
    117         struct _Unwind_Exception unwind_exception;
    118         struct __cfaehm_node * next;
    119         int handler_index;
    120 };
    121116
    122117#define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node)))
  • libcfa/src/exception.h

    r4b30e8cc rc28ea4e  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // exception.h -- Builtins for exception handling.
     7// exception.h -- Internal exception handling definitions.
    88//
    99// Author           : Andrew Beach
    1010// Created On       : Mon Jun 26 15:11:00 2017
    1111// Last Modified By : Andrew Beach
    12 // Last Modified On : Tue May 19 14:17:00 2020
    13 // Update Count     : 10
     12// Last Modified On : Tue Oct 27 14:45:00 2020
     13// Update Count     : 11
    1414//
    1515
    1616#pragma once
    1717
     18// This could be considered several headers. All are internal to the exception
     19// system but needed to depending on whether they are C/Cforall code and
     20// whether or not they are part of the builtins.
    1821
    1922#ifdef __cforall
    2023extern "C" {
    2124#endif
     25
     26// Included in C code or the built-ins.
     27#if !defined(__cforall) || defined(__cforall_builtins__)
    2228
    2329struct __cfaehm_base_exception_t;
     
    4753// Function catches termination exceptions.
    4854void __cfaehm_try_terminate(
    49     void (*try_block)(),
    50     void (*catch_block)(int index, exception_t * except),
    51     int (*match_block)(exception_t * except));
     55        void (*try_block)(),
     56        void (*catch_block)(int index, exception_t * except),
     57        int (*match_block)(exception_t * except));
    5258
    5359// Clean-up the exception in catch blocks.
     
    5662// Data structure creates a list of resume handlers.
    5763struct __cfaehm_try_resume_node {
    58     struct __cfaehm_try_resume_node * next;
    59     _Bool (*handler)(exception_t * except);
     64        struct __cfaehm_try_resume_node * next;
     65        _Bool (*handler)(exception_t * except);
    6066};
    6167
    6268// These act as constructor and destructor for the resume node.
    6369void __cfaehm_try_resume_setup(
    64     struct __cfaehm_try_resume_node * node,
    65     _Bool (*handler)(exception_t * except));
     70        struct __cfaehm_try_resume_node * node,
     71        _Bool (*handler)(exception_t * except));
    6672void __cfaehm_try_resume_cleanup(
    67     struct __cfaehm_try_resume_node * node);
     73        struct __cfaehm_try_resume_node * node);
    6874
    6975// Check for a standard way to call fake deconstructors.
    7076struct __cfaehm_cleanup_hook {};
    7177
     78#endif
     79
     80// Included in C code and the library.
     81#if !defined(__cforall) || !defined(__cforall_builtins__)
     82struct __cfaehm_node {
     83        struct _Unwind_Exception unwind_exception;
     84        struct __cfaehm_node * next;
     85        int handler_index;
     86};
     87
     88static inline exception_t * __cfaehm_cancellation_exception(
     89                struct _Unwind_Exception * unwind_exception ) {
     90        return (exception_t *)(1 + (struct __cfaehm_node *)unwind_exception);
     91}
     92#endif
     93
    7294#ifdef __cforall
    7395}
     96
     97// Built-ins not visible in C.
     98#if defined(__cforall_builtins__)
    7499
    75100// Not all the built-ins can be expressed in C. These can't be
     
    124149
    125150#endif
     151
     152#endif
  • libcfa/src/stdlib.cfa

    r4b30e8cc rc28ea4e  
    5858
    5959forall( dtype T | sized(T) | { void ^?{}( T & ); } )
    60 void adelete( size_t dim, T arr[] ) {
     60void adelete( T arr[] ) {
    6161        if ( arr ) {                                                                            // ignore null
     62                size_t dim = malloc_size( arr ) / sizeof( T );
    6263                for ( int i = dim - 1; i >= 0; i -= 1 ) {               // reverse allocation order, must be unsigned
    6364                        ^(arr[i]){};                                                            // run destructor
     
    6869
    6970forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } )
    70 void adelete( size_t dim, T arr[], Params rest ) {
     71void adelete( T arr[], Params rest ) {
    7172        if ( arr ) {                                                                            // ignore null
     73                size_t dim = malloc_size( arr ) / sizeof( T );
    7274                for ( int i = dim - 1; i >= 0; i -= 1 ) {               // reverse allocation order, must be unsigned
    7375                        ^(arr[i]){};                                                            // run destructor
  • libcfa/src/stdlib.hfa

    r4b30e8cc rc28ea4e  
    263263// Cforall allocation/deallocation and constructor/destructor, array types
    264264forall( dtype T | sized(T), ttype Params | { void ?{}( T &, Params ); } ) T * anew( size_t dim, Params p );
    265 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( size_t dim, T arr[] );
    266 forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } ) void adelete( size_t dim, T arr[], Params rest );
     265forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( T arr[] );
     266forall( dtype T | sized(T) | { void ^?{}( T & ); }, ttype Params | { void adelete( Params ); } ) void adelete( T arr[], Params rest );
    267267
    268268//---------------------------------------
Note: See TracChangeset for help on using the changeset viewer.