Ignore:
Timestamp:
Nov 2, 2020, 12:44:43 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
58688bf, 82f791f
Parents:
f7136f7
Message:

Removed unpark and added support for unpark from the kernel (removing the distinction between the two

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    rf7136f7 re873838  
    3333}
    3434
    35 void __schedule_thread( struct __processor_id_t *, $thread * )
     35void __schedule_thread( $thread * )
    3636#if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
    37         __attribute__((nonnull (2)))
     37        __attribute__((nonnull (1)))
    3838#endif
    3939;
     
    6363)
    6464
    65 // KERNEL ONLY unpark with out disabling interrupts
    66 void __unpark( struct __processor_id_t *, $thread * thrd );
    67 
    6865#define TICKET_BLOCKED (-1) // thread is blocked
    6966#define TICKET_RUNNING ( 0) // thread is running
    7067#define TICKET_UNBLOCK ( 1) // thread should ignore next block
    71 
    72 static inline bool __post(single_sem & this, struct __processor_id_t * id) {
    73         for() {
    74                 struct $thread * expected = this.ptr;
    75                 if(expected == 1p) return false;
    76                 if(expected == 0p) {
    77                         if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    78                                 return false;
    79                         }
    80                 }
    81                 else {
    82                         if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    83                                 __unpark( id, expected );
    84                                 return true;
    85                         }
    86                 }
    87         }
    88 }
    8968
    9069//-----------------------------------------------------------------------------
     
    201180// Reader side : acquire when using the ready queue to schedule but not
    202181//  creating/destroying queues
    203 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) {
    204         unsigned iproc = proc->id;
    205         /*paranoid*/ verify(data[iproc].handle == proc);
     182static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
     183        /*paranoid*/ verify( kernelTLS.this_proc_id );
     184
     185        unsigned iproc = kernelTLS.this_proc_id->id;
     186        /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
    206187        /*paranoid*/ verify(iproc < ready);
    207188
     
    225206}
    226207
    227 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) {
    228         unsigned iproc = proc->id;
    229         /*paranoid*/ verify(data[iproc].handle == proc);
     208static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
     209        /*paranoid*/ verify( kernelTLS.this_proc_id );
     210
     211        unsigned iproc = kernelTLS.this_proc_id->id;
     212        /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
    230213        /*paranoid*/ verify(iproc < ready);
    231214        /*paranoid*/ verify(data[iproc].lock);
     
    239222
    240223#ifdef __CFA_WITH_VERIFY__
    241         static inline bool ready_schedule_islocked( struct __processor_id_t * proc) {
     224        static inline bool ready_schedule_islocked(void) {
     225                /*paranoid*/ verify( kernelTLS.this_proc_id );
     226                __processor_id_t * proc = kernelTLS.this_proc_id;
    242227                return __scheduler_lock->data[proc->id].owned;
    243228        }
Note: See TracChangeset for help on using the changeset viewer.