Ignore:
Timestamp:
Apr 14, 2021, 3:41:06 PM (3 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
a7504db
Parents:
634a5c2
Message:

Minor changes so using the global RWlock is more concise.

Location:
libcfa/src/concurrency
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel/startup.cfa

    r634a5c2 ra33c113  
    493493        unlock( this.cltr->idles );
    494494
    495         id = doregister((__processor_id_t*)&this);
    496 
    497         // Lock the RWlock so no-one pushes/pops while we are changing the queue
    498         uint_fast32_t last_size = ready_mutate_lock();
     495        // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue
     496        uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this);
    499497
    500498                // Adjust the ready queue size
     
    519517                ready_queue_shrink( this.cltr, target );
    520518
    521         // Unlock the RWlock
    522         ready_mutate_unlock( last_size );
    523 
    524         // Finally we don't need the read_lock any more
    525         unregister((__processor_id_t*)&this);
     519        // Unlock the RWlock and unregister: we don't need the read_lock any more
     520        ready_mutate_unregister((__processor_id_t*)&this, last_size );
    526521
    527522        close(this.idle);
  • libcfa/src/concurrency/kernel_private.hfa

    r634a5c2 ra33c113  
    8383// Cluster lock API
    8484//=======================================================================
    85 // Cells use by the reader writer lock
    86 // while not generic it only relies on a opaque pointer
    87 struct __attribute__((aligned(128))) __scheduler_lock_id_t {
    88         // Spin lock used as the underlying lock
    89         volatile bool lock;
    90 
    91         // Handle pointing to the proc owning this cell
    92         // Used for allocating cells and debugging
    93         __processor_id_t * volatile handle;
    94 
    95         #ifdef __CFA_WITH_VERIFY__
    96                 // Debug, check if this is owned for reading
    97                 bool owned;
    98         #endif
    99 };
    100 
    101 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
    102 
    10385// Lock-Free registering/unregistering of threads
    10486// Register a processor to a given cluster and get its unique id in return
    105 unsigned doregister( struct __processor_id_t * proc );
     87void register_proc_id( struct __processor_id_t * );
    10688
    10789// Unregister a processor from a given cluster using its id, getting back the original pointer
    108 void     unregister( struct __processor_id_t * proc );
     90void unregister_proc_id( struct __processor_id_t * proc );
    10991
    11092//-----------------------------------------------------------------------
     
    153135}
    154136
     137// Cells use by the reader writer lock
     138// while not generic it only relies on a opaque pointer
     139struct __attribute__((aligned(128))) __scheduler_lock_id_t {
     140        // Spin lock used as the underlying lock
     141        volatile bool lock;
     142
     143        // Handle pointing to the proc owning this cell
     144        // Used for allocating cells and debugging
     145        __processor_id_t * volatile handle;
     146
     147        #ifdef __CFA_WITH_VERIFY__
     148                // Debug, check if this is owned for reading
     149                bool owned;
     150        #endif
     151};
     152
     153static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
     154
    155155//-----------------------------------------------------------------------
    156156// Reader-Writer lock protecting the ready-queues
     
    247247void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
    248248
     249//-----------------------------------------------------------------------
     250// Lock-Free registering/unregistering of threads
     251// Register a processor to a given cluster and get its unique id in return
     252// For convenience, also acquires the lock
     253static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) {
     254        register_proc_id( proc );
     255        return ready_mutate_lock();
     256}
     257
     258// Unregister a processor from a given cluster using its id, getting back the original pointer
     259// assumes the lock is acquired
     260static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {
     261        ready_mutate_unlock( last_s );
     262        unregister_proc_id( proc );
     263}
     264
    249265//=======================================================================
    250266// Ready-Queue API
  • libcfa/src/concurrency/preemption.cfa

    r634a5c2 ra33c113  
    712712static void * alarm_loop( __attribute__((unused)) void * args ) {
    713713        __processor_id_t id;
    714         id.id = doregister(&id);
     714        register_proc_id(&id);
    715715        __cfaabi_tls.this_proc_id = &id;
    716716
     
    773773EXIT:
    774774        __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" );
    775         unregister(&id);
     775        register_proc_id(&id);
    776776
    777777        return 0p;
  • libcfa/src/concurrency/ready_queue.cfa

    r634a5c2 ra33c113  
    9494//=======================================================================
    9595// Lock-Free registering/unregistering of threads
    96 unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
     96void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
    9797        __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
    9898
     
    108108                        /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
    109109                        /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
    110                         return i;
     110                        proc->id = i;
    111111                }
    112112        }
     
    135135        /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
    136136        /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
    137         return n;
    138 }
    139 
    140 void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
     137        proc->id = n;
     138}
     139
     140void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) {
    141141        unsigned id = proc->id;
    142142        /*paranoid*/ verify(id < ready);
Note: See TracChangeset for help on using the changeset viewer.