Changes in / [17c6edeb:71cf630]


Ignore:
Location:
libcfa/src/concurrency/kernel
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel/cluster.cfa

    r17c6edeb r71cf630  
    9393//=======================================================================
    9494void  ?{}(__scheduler_RWLock_t & this) {
    95         this.max   = __max_processors();
    96         this.alloc = 0;
    97         this.ready = 0;
    98         this.data  = alloc(this.max);
    99         this.write_lock  = false;
    100 
    101         /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
    102         /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
     95        this.lock.max   = __max_processors();
     96        this.lock.alloc = 0;
     97        this.lock.ready = 0;
     98        this.lock.data  = alloc(this.lock.max);
     99        this.lock.write_lock  = false;
     100
     101        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.lock.alloc), &this.lock.alloc));
     102        /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.lock.ready), &this.lock.ready));
    103103
    104104}
    105105void ^?{}(__scheduler_RWLock_t & this) {
    106         free(this.data);
     106        free(this.lock.data);
    107107}
    108108
     
    110110//=======================================================================
    111111// Lock-Free registering/unregistering of threads
    112 unsigned register_proc_id( void ) with(*__scheduler_lock) {
     112unsigned register_proc_id( void ) with(__scheduler_lock->lock) {
    113113        __kernel_rseq_register();
    114114
     
    132132        }
    133133
    134         if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
     134        if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->lock.max);
    135135
    136136        // Step - 2 : F&A to get a new spot in the array.
    137137        uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
    138         if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
     138        if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->lock.max);
    139139
    140140        // Step - 3 : Mark space as used and then publish it.
     
    154154}
    155155
    156 void unregister_proc_id( unsigned id ) with(*__scheduler_lock) {
     156void unregister_proc_id( unsigned id ) with(__scheduler_lock->lock) {
    157157        /* paranoid */ verify(id < ready);
    158158        /* paranoid */ verify(id == kernelTLS().sched_id);
     
    169169// Writer side : acquire when changing the ready queue, e.g. adding more
    170170//  queues or removing them.
    171 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
     171uint_fast32_t ready_mutate_lock( void ) with(__scheduler_lock->lock) {
    172172        /* paranoid */ verify( ! __preemption_enabled() );
    173173
     
    196196}
    197197
    198 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
     198void ready_mutate_unlock( uint_fast32_t last_s ) with(__scheduler_lock->lock) {
    199199        /* paranoid */ verify( ! __preemption_enabled() );
    200200
  • libcfa/src/concurrency/kernel/cluster.hfa

    r17c6edeb r71cf630  
    2424// Calc moving average based on existing average, before and current time.
    2525static inline unsigned long long moving_average(unsigned long long currtsc, unsigned long long instsc, unsigned long long old_avg) {
    26         /* paranoid */ verifyf( currtsc < 45000000000000000, "Suspiciously large current time: %'llu (%llx)\n", currtsc, currtsc );
    27         /* paranoid */ verifyf( instsc  < 45000000000000000, "Suspiciously large insert time: %'llu (%llx)\n", instsc, instsc );
    2826        /* paranoid */ verifyf( old_avg < 15000000000000, "Suspiciously large previous average: %'llu (%llx)\n", old_avg, old_avg );
    2927
  • libcfa/src/concurrency/kernel/private.hfa

    r17c6edeb r71cf630  
    184184// have been hard-coded to for the ready-queue for
    185185// simplicity and performance
    186 struct __scheduler_RWLock_t {
    187         // total cachelines allocated
    188         unsigned int max;
    189 
    190         // cachelines currently in use
    191         volatile unsigned int alloc;
    192 
    193         // cachelines ready to itereate over
    194         // (!= to alloc when thread is in second half of doregister)
    195         volatile unsigned int ready;
    196 
    197         // writer lock
    198         volatile bool write_lock;
    199 
    200         // data pointer
    201         volatile bool * volatile * data;
     186union __attribute__((aligned(64))) __scheduler_RWLock_t {
     187        struct {
     188                // total cachelines allocated
     189                unsigned int max;
     190
     191                // cachelines currently in use
     192                volatile unsigned int alloc;
     193
     194                // cachelines ready to itereate over
     195                // (!= to alloc when thread is in second half of doregister)
     196                volatile unsigned int ready;
     197
     198                // writer lock
     199                volatile bool write_lock;
     200
     201                // data pointer
     202                volatile bool * volatile * data;
     203        } lock;
     204        char pad[192];
    202205};
    203206
     
    210213// Reader side : acquire when using the ready queue to schedule but not
    211214//  creating/destroying queues
    212 static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
     215static inline void ready_schedule_lock(void) with(__scheduler_lock->lock) {
    213216        /* paranoid */ verify( ! __preemption_enabled() );
    214217        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
     
    235238}
    236239
    237 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
     240static inline void ready_schedule_unlock(void) with(__scheduler_lock->lock) {
    238241        /* paranoid */ verify( ! __preemption_enabled() );
    239242        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     
    256259
    257260        static inline bool ready_mutate_islocked() {
    258                 return __scheduler_lock->write_lock;
     261                return __scheduler_lock->lock.write_lock;
    259262        }
    260263#endif
Note: See TracChangeset for help on using the changeset viewer.