Ignore:
Timestamp:
Nov 14, 2022, 11:52:44 AM (3 years ago)
Author:
caparson <caparson@…>
Branches:
ADT, ast-experimental, master
Children:
7d9598d8
Parents:
b77f0e1 (diff), 19a8c40 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa/src/concurrency
Files:
2 added
15 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/clib/cfathread.cfa

    rb77f0e1 r63be3387  
    172172
    173173                pthread_attr_t attr;
    174                 if (int ret = pthread_attr_init(&attr); 0 != ret) {
     174                if (int ret = __cfaabi_pthread_attr_init(&attr); 0 != ret) {
    175175                        abort | "failed to create master epoll thread attr: " | ret | strerror(ret);
    176176                }
    177177
    178                 if (int ret = pthread_create(&master_poller, &attr, master_epoll, 0p); 0 != ret) {
     178                if (int ret = __cfaabi_pthread_create(&master_poller, &attr, master_epoll, 0p); 0 != ret) {
    179179                        abort | "failed to create master epoll thread: " | ret | strerror(ret);
    180180                }
  • libcfa/src/concurrency/invoke.h

    rb77f0e1 r63be3387  
    146146
    147147        // Link lists fields
    148         // instrusive link field for threads
     148        // instrusive link field for threads in the ready-queue
    149149        struct __thread_desc_link {
    150150                struct thread$ * next;
    151151                volatile unsigned long long ts;
    152152        };
     153
     154        // Link lists fields
     155        // instrusive link field for threads in the user_link/cltr_link
     156        struct __thread_user_link {
     157                #ifdef __cforall
     158                        inline dlink(thread$);
     159                #else
     160                        struct thread$ * next; struct thread$ * back;
     161                #endif
     162        };
     163        _Static_assert(sizeof(struct __thread_user_link) == 2 * sizeof(struct thread$ *), "__thread_user_link should be consistent in C and Cforall");
    153164
    154165        struct thread$ {
     
    159170                // Link lists fields
    160171                // instrusive link field for threads
    161                 struct __thread_desc_link link;
     172                struct __thread_desc_link rdy_link;
    162173
    163174                // current execution status for coroutine
     
    195206                struct __monitor_group_t monitors;
    196207
    197                 // used to put threads on dlist data structure
    198                 __cfa_dlink(thread$);
    199 
    200                 struct {
    201                         struct thread$ * next;
    202                         struct thread$ * prev;
    203                 } node;
     208                // intrusive link fields, used for locks, monitors and any user defined data structure
     209                // default link fields for dlist
     210                struct __thread_user_link user_link;
     211
     212                // secondary intrusive link fields, used for global cluster list
     213                // default link fields for dlist
     214                struct __thread_user_link cltr_link;
    204215
    205216                // used to store state between clh lock/unlock
     
    214225
    215226                #if defined( __CFA_WITH_VERIFY__ )
     227                        struct processor * volatile executing;
    216228                        void * canary;
    217229                #endif
    218230        };
    219         #ifdef __cforall
    220                 P9_EMBEDDED( thread$, dlink(thread$) )
    221         #endif
     231
    222232        // Wrapper for gdb
    223233        struct cfathread_thread_t { struct thread$ debug; };
     
    231241        #ifdef __cforall
    232242        extern "Cforall" {
     243                static inline thread$ * volatile & ?`next ( thread$ * this ) {
     244                        return this->user_link.next;
     245                }
    233246
    234247                static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) {
    235                         return this.link.next;
    236                 }
    237 
    238                 static inline [thread$ *&, thread$ *& ] __get( thread$ & this ) __attribute__((const)) {
    239                         return this.node.[next, prev];
    240                 }
     248                        return this.user_link.next;
     249                }
     250
     251                static inline tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) {
     252                        dlink(thread$) & b = this.user_link;
     253                        tytagref( dlink(thread$), dlink(thread$) ) result = { b };
     254                        return result;
     255                }
     256
     257                static inline tytagref(struct __thread_user_link, dlink(thread$)) ?`inner( struct thread$ & this ) {
     258                        struct __thread_user_link & ib = this.cltr_link;
     259                        dlink(thread$) & b = ib`inner;
     260                        tytagref(struct __thread_user_link, dlink(thread$)) result = { b };
     261                        return result;
     262                }
     263
     264                P9_EMBEDDED(struct __thread_user_link, dlink(thread$))
    241265
    242266                static inline void ?{}(__monitor_group_t & this) {
  • libcfa/src/concurrency/io.cfa

    rb77f0e1 r63be3387  
    610610                if( we ) {
    611611                        sigval_t value = { PREEMPT_IO };
    612                         pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
     612                        __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
    613613                }
    614614
     
    639639                }
    640640        }
    641 
    642         #if defined(CFA_WITH_IO_URING_IDLE)
    643                 bool __kernel_read(struct processor * proc, io_future_t & future, iovec & iov, int fd) {
    644                         io_context$ * ctx = proc->io.ctx;
    645                         /* paranoid */ verify( ! __preemption_enabled() );
    646                         /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
    647                         /* paranoid */ verify( ctx );
    648 
    649                         __u32 idx;
    650                         struct io_uring_sqe * sqe;
    651 
    652                         // We can proceed to the fast path
    653                         if( !__alloc(ctx, &idx, 1) ) {
    654                                 /* paranoid */ verify( false ); // for now check if this happens, next time just abort the sleep.
    655                                 return false;
    656                         }
    657 
    658                         // Allocation was successful
    659                         __fill( &sqe, 1, &idx, ctx );
    660 
    661                         sqe->user_data = (uintptr_t)&future;
    662                         sqe->flags = 0;
    663                         sqe->fd = fd;
    664                         sqe->off = 0;
    665                         sqe->ioprio = 0;
    666                         sqe->fsync_flags = 0;
    667                         sqe->__pad2[0] = 0;
    668                         sqe->__pad2[1] = 0;
    669                         sqe->__pad2[2] = 0;
    670 
    671                         #if defined(CFA_HAVE_IORING_OP_READ)
    672                                 sqe->opcode = IORING_OP_READ;
    673                                 sqe->addr = (uint64_t)iov.iov_base;
    674                                 sqe->len = iov.iov_len;
    675                         #elif defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV)
    676                                 sqe->opcode = IORING_OP_READV;
    677                                 sqe->addr = (uintptr_t)&iov;
    678                                 sqe->len = 1;
    679                         #else
    680                                 #error CFA_WITH_IO_URING_IDLE but none of CFA_HAVE_READV, CFA_HAVE_IORING_OP_READV or CFA_HAVE_IORING_OP_READ defined
    681                         #endif
    682 
    683                         asm volatile("": : :"memory");
    684 
    685                         /* paranoid */ verify( sqe->user_data == (uintptr_t)&future );
    686                         __submit_only( ctx, &idx, 1 );
    687 
    688                         /* paranoid */ verify( proc == __cfaabi_tls.this_processor );
    689                         /* paranoid */ verify( ! __preemption_enabled() );
    690 
    691                         return true;
    692                 }
    693 
    694                 void __cfa_io_idle( struct processor * proc ) {
    695                         iovec iov;
    696                         __atomic_acquire( &proc->io.ctx->cq.lock );
    697 
    698                         __attribute__((used)) volatile bool was_reset = false;
    699 
    700                         with( proc->idle_wctx) {
    701 
    702                                 // Do we already have a pending read
    703                                 if(available(*ftr)) {
    704                                         // There is no pending read, we need to add one
    705                                         reset(*ftr);
    706 
    707                                         iov.iov_base = rdbuf;
    708                                         iov.iov_len  = sizeof(eventfd_t);
    709                                         __kernel_read(proc, *ftr, iov, evfd );
    710                                         ftr->result = 0xDEADDEAD;
    711                                         *((eventfd_t *)rdbuf) = 0xDEADDEADDEADDEAD;
    712                                         was_reset = true;
    713                                 }
    714                         }
    715 
    716                         if( !__atomic_load_n( &proc->do_terminate, __ATOMIC_SEQ_CST ) ) {
    717                                 __ioarbiter_flush( *proc->io.ctx );
    718                                 proc->idle_wctx.sleep_time = rdtscl();
    719                                 ioring_syscsll( *proc->io.ctx, 1, IORING_ENTER_GETEVENTS);
    720                         }
    721 
    722                         ready_schedule_lock();
    723                         __cfa_do_drain( proc->io.ctx, proc->cltr );
    724                         ready_schedule_unlock();
    725 
    726                         asm volatile ("" :: "m" (was_reset));
    727                 }
    728         #endif
    729641#endif
  • libcfa/src/concurrency/io/setup.cfa

    rb77f0e1 r63be3387  
    3434        bool __cfa_io_flush( processor * proc ) { return false; }
    3535        bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1))) { return false; }
    36         void __cfa_io_idle ( processor * ) __attribute__((nonnull (1))) {}
    3736        void __cfa_io_stop ( processor * proc ) {}
    3837
     
    317316        }
    318317
    319 //=============================================================================================
    320 // I/O Context Sleep
    321 //=============================================================================================
    322         // static inline void __epoll_ctl(io_context$ & ctx, int op, const char * error) {
    323         //      struct epoll_event ev;
    324         //      ev.events = EPOLLIN | EPOLLONESHOT;
    325         //      ev.data.u64 = (__u64)&ctx;
    326         //      int ret = epoll_ctl(iopoll.epollfd, op, ctx.efd, &ev);
    327         //      if (ret < 0) {
    328         //              abort( "KERNEL ERROR: EPOLL %s - (%d) %s\n", error, (int)errno, strerror(errno) );
    329         //      }
    330         // }
    331 
    332         // static void __epoll_register(io_context$ & ctx) {
    333         //      __epoll_ctl(ctx, EPOLL_CTL_ADD, "ADD");
    334         // }
    335 
    336         // static void __epoll_unregister(io_context$ & ctx) {
    337         //      // Read the current epoch so we know when to stop
    338         //      size_t curr = __atomic_load_n(&iopoll.epoch, __ATOMIC_SEQ_CST);
    339 
    340         //      // Remove the fd from the iopoller
    341         //      __epoll_ctl(ctx, EPOLL_CTL_DEL, "REMOVE");
    342 
    343         //      // Notify the io poller thread of the shutdown
    344         //      iopoll.run = false;
    345         //      sigval val = { 1 };
    346         //      pthread_sigqueue( iopoll.thrd, SIGUSR1, val );
    347 
    348         //      // Make sure all this is done
    349         //      __atomic_thread_fence(__ATOMIC_SEQ_CST);
    350 
    351         //      // Wait for the next epoch
    352         //      while(curr == iopoll.epoch && !iopoll.stopped) Pause();
    353         // }
    354 
    355         // void __ioctx_prepare_block(io_context$ & ctx) {
    356         //      __cfadbg_print_safe(io_core, "Kernel I/O - epoll : Re-arming io poller %d (%p)\n", ctx.fd, &ctx);
    357         //      __epoll_ctl(ctx, EPOLL_CTL_MOD, "REARM");
    358         // }
    359 
    360318
    361319//=============================================================================================
  • libcfa/src/concurrency/kernel.cfa

    rb77f0e1 r63be3387  
    138138extern bool __cfa_io_drain( processor * proc ) __attribute__((nonnull (1)));
    139139extern bool __cfa_io_flush( processor * ) __attribute__((nonnull (1)));
    140 extern void __cfa_io_idle( processor * ) __attribute__((nonnull (1)));
    141 
    142 #if defined(CFA_WITH_IO_URING_IDLE)
    143         extern bool __kernel_read(processor * proc, io_future_t & future, iovec &, int fd);
    144 #endif
     140
    145141
    146142extern void __disable_interrupts_hard();
     
    162158        verify(this);
    163159
    164         /* paranoid */ verify( this->idle_wctx.ftr   != 0p );
    165         /* paranoid */ verify( this->idle_wctx.rdbuf != 0p );
    166 
    167         // used for idle sleep when io_uring is present
    168         // mark it as already fulfilled so we know if there is a pending request or not
    169         this->idle_wctx.ftr->self.ptr = 1p;
    170 
    171160        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
    172161        #if !defined(__CFA_NO_STATISTICS__)
     
    291280        /* paranoid */ verify( ! __preemption_enabled() );
    292281        /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
    293         /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
     282        /* paranoid */ verifyf( thrd_dst->rdy_link.next == 0p, "Expected null got %p", thrd_dst->rdy_link.next );
    294283        __builtin_prefetch( thrd_dst->context.SP );
    295284
     
    321310                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
    322311                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
     312                /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, this, __ATOMIC_SEQ_CST) == 0p );
    323313                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
    324314
     
    332322
    333323                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
     324                /* paranoid */ verify( __atomic_exchange_n( &thrd_dst->executing, 0p, __ATOMIC_SEQ_CST) == this );
    334325                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
    335326                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
     327                /* paranoid */ verify( thrd_dst->state != Halted );
    336328                /* paranoid */ verify( thrd_dst->context.SP );
    337                 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
    338329                /* paranoid */ verify( kernelTLS().this_thread == thrd_dst );
    339330                /* paranoid */ verify( ! __preemption_enabled() );
     
    452443                                        "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    453444        /* paranoid */ #endif
    454         /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
     445        /* paranoid */ verifyf( thrd->rdy_link.next == 0p, "Expected null got %p", thrd->rdy_link.next );
    455446        /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary );
    456447
     
    600591                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
    601592
    602                 thrd->state = Halting;
    603593                if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
    604594                if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
    605595                if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
     596
     597                thrd->state = Halting;
     598                thrd->ticket = TICKET_DEAD;
    606599
    607600                // Leave the thread
     
    624617                // If that is the case, abandon the preemption.
    625618                bool preempted = false;
    626                 if(thrd->link.next == 0p) {
     619                if(thrd->rdy_link.next == 0p) {
    627620                        preempted = true;
    628621                        thrd->preempted = reason;
     
    726719
    727720
    728         #if !defined(CFA_WITH_IO_URING_IDLE)
    729                 #if !defined(__CFA_NO_STATISTICS__)
    730                         if(this->print_halts) {
    731                                 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
     721        #if !defined(__CFA_NO_STATISTICS__)
     722                if(this->print_halts) {
     723                        __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
     724                }
     725        #endif
     726
     727        __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
     728
     729        {
     730                eventfd_t val;
     731                ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
     732                if(ret < 0) {
     733                        switch((int)errno) {
     734                        case EAGAIN:
     735                        #if EAGAIN != EWOULDBLOCK
     736                                case EWOULDBLOCK:
     737                        #endif
     738                        case EINTR:
     739                                // No need to do anything special here, just assume it's a legitimate wake-up
     740                                break;
     741                        default:
     742                                abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
    732743                        }
    733                 #endif
    734 
    735                 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd);
    736 
    737                 {
    738                         eventfd_t val;
    739                         ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
    740                         if(ret < 0) {
    741                                 switch((int)errno) {
    742                                 case EAGAIN:
    743                                 #if EAGAIN != EWOULDBLOCK
    744                                         case EWOULDBLOCK:
    745                                 #endif
    746                                 case EINTR:
    747                                         // No need to do anything special here, just assume it's a legitimate wake-up
    748                                         break;
    749                                 default:
    750                                         abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );
    751                                 }
    752                         }
    753                 }
    754 
    755                 #if !defined(__CFA_NO_STATISTICS__)
    756                         if(this->print_halts) {
    757                                 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
    758                         }
    759                 #endif
    760         #else
    761                 __cfa_io_idle( this );
     744                }
     745        }
     746
     747        #if !defined(__CFA_NO_STATISTICS__)
     748                if(this->print_halts) {
     749                        __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
     750                }
    762751        #endif
    763752}
     
    775764                insert_first(this.idles, proc);
    776765
     766                // update the pointer to the head wait context, which should now point to this proc.
    777767                __atomic_store_n(&this.fdw, &proc.idle_wctx, __ATOMIC_SEQ_CST);
    778768        unlock( this );
     
    791781
    792782                {
     783                        // update the pointer to the head wait context
    793784                        struct __fd_waitctx * wctx = 0;
    794785                        if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
  • libcfa/src/concurrency/kernel.hfa

    rb77f0e1 r63be3387  
    6464        // 1 - means the proc should wake-up immediately
    6565        // FD - means the proc is going asleep and should be woken by writing to the FD.
     66        //      The FD value should always be the evfd field just below.
    6667        volatile int sem;
    6768
     
    6970        int evfd;
    7071
    71         // buffer into which the proc will read from evfd
    72         // unused if not using io_uring for idle sleep
    73         void * rdbuf;
    74 
    75         // future use to track the read of the eventfd
    76         // unused if not using io_uring for idle sleep
    77         io_future_t * ftr;
    78 
     72        // Used for debugging, should be removed eventually.
    7973        volatile unsigned long long wake__time;
    8074        volatile unsigned long long sleep_time;
     
    160154// P9_EMBEDDED( processor, dlink(processor) )
    161155static inline tytagref( dlink(processor), dlink(processor) ) ?`inner( processor & this ) {
    162     dlink(processor) & b = this.link;
    163     tytagref( dlink(processor), dlink(processor) ) result = { b };
    164     return result;
     156        dlink(processor) & b = this.link;
     157        tytagref( dlink(processor), dlink(processor) ) result = { b };
     158        return result;
    165159}
    166160
     
    256250        // List of threads
    257251        __spinlock_t thread_list_lock;
    258         __dllist_t(struct thread$) threads;
     252        dlist(struct thread$, struct __thread_user_link) threads;
    259253        unsigned int nthreads;
    260254
     
    269263                io_context_params params;
    270264        } io;
     265
     266        struct {
     267                struct processor ** procs;
     268                unsigned cnt;
     269        } managed;
    271270
    272271        #if !defined(__CFA_NO_STATISTICS__)
     
    298297static inline struct cluster   * active_cluster  () { return publicTLS_get( this_processor )->cltr; }
    299298
     299// set the number of internal processors
     300// these processors are in addition to any explicitly declared processors
     301unsigned set_concurrency( cluster & this, unsigned new_count );
     302
    300303#if !defined(__CFA_NO_STATISTICS__)
    301304        void print_stats_now( cluster & this, int flags );
  • libcfa/src/concurrency/kernel/cluster.cfa

    rb77f0e1 r63be3387  
    483483
    484484        // We add a boat-load of assertions here because the anchor code is very fragile
    485         /* paranoid */ _Static_assert( offsetof( thread$, link ) == nested_offsetof(__intrusive_lane_t, l.anchor) );
    486         /* paranoid */ verify( offsetof( thread$, link ) == nested_offsetof(__intrusive_lane_t, l.anchor) );
    487         /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.l.anchor) );
    488         /* paranoid */ verify( &mock_head(this)->link.next == &this.l.anchor.next );
    489         /* paranoid */ verify( &mock_head(this)->link.ts   == &this.l.anchor.ts   );
    490         /* paranoid */ verify( mock_head(this)->link.next == 0p );
    491         /* paranoid */ verify( mock_head(this)->link.ts   == MAX );
     485        /* paranoid */ _Static_assert( offsetof( thread$, rdy_link ) == nested_offsetof(__intrusive_lane_t, l.anchor) );
     486        /* paranoid */ verify( offsetof( thread$, rdy_link ) == nested_offsetof(__intrusive_lane_t, l.anchor) );
     487        /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, rdy_link )) == (uintptr_t)(&this.l.anchor) );
     488        /* paranoid */ verify( &mock_head(this)->rdy_link.next == &this.l.anchor.next );
     489        /* paranoid */ verify( &mock_head(this)->rdy_link.ts   == &this.l.anchor.ts   );
     490        /* paranoid */ verify( mock_head(this)->rdy_link.next == 0p );
     491        /* paranoid */ verify( mock_head(this)->rdy_link.ts   == MAX );
    492492        /* paranoid */ verify( mock_head(this) == this.l.prev );
    493493        /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 64 );
  • libcfa/src/concurrency/kernel/private.hfa

    rb77f0e1 r63be3387  
    2020#endif
    2121
     22#include <signal.h>
     23
    2224#include "kernel.hfa"
    2325#include "thread.hfa"
     
    3941}
    4042
    41 // Defines whether or not we *want* to use io_uring_enter as the idle_sleep blocking call
    42 // #define CFA_WANT_IO_URING_IDLE
    43 
    44 // Defines whether or not we *can* use io_uring_enter as the idle_sleep blocking call
    45 #if defined(CFA_WANT_IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H)
    46         #if defined(CFA_HAVE_IORING_OP_READ) || (defined(CFA_HAVE_READV) && defined(CFA_HAVE_IORING_OP_READV))
    47                 #define CFA_WITH_IO_URING_IDLE
    48         #endif
    49 #endif
    50 
    5143// #define READYQ_USE_LINEAR_AVG
    5244#define READYQ_USE_LOGDBL_AVG
     
    6355#endif
    6456
     57extern "C" {
     58        __attribute__((visibility("protected"))) int __cfaabi_pthread_create(pthread_t *_thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
     59        __attribute__((visibility("protected"))) int __cfaabi_pthread_join(pthread_t _thread, void **retval);
     60        __attribute__((visibility("protected"))) pthread_t __cfaabi_pthread_self(void);
     61        __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_init(pthread_attr_t *attr);
     62        __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_destroy(pthread_attr_t *attr);
     63        __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_setstack( pthread_attr_t *attr, void *stackaddr, size_t stacksize );
     64        __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_getstacksize( const pthread_attr_t *attr, size_t *stacksize );
     65        __attribute__((visibility("protected"))) int __cfaabi_pthread_sigqueue(pthread_t _thread, int sig, const union sigval value);
     66        __attribute__((visibility("protected"))) int __cfaabi_pthread_sigmask( int how, const sigset_t *set, sigset_t *oset);
     67}
     68
    6569//-----------------------------------------------------------------------------
    6670// Scheduler
     
    153157#define TICKET_RUNNING ( 0) // thread is running
    154158#define TICKET_UNBLOCK ( 1) // thread should ignore next block
     159#define TICKET_DEAD    (0xDEAD) // thread should never be unparked
    155160
    156161//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/kernel/startup.cfa

    rb77f0e1 r63be3387  
    1616#define __cforall_thread__
    1717#define _GNU_SOURCE
     18
     19// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
    1820
    1921// C Includes
     
    113115KERNEL_STORAGE(thread$,              mainThread);
    114116KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    115 // KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
    116 KERNEL_STORAGE(eventfd_t,            mainIdleEventFd);
    117 KERNEL_STORAGE(io_future_t,          mainIdleFuture);
    118117#if !defined(__CFA_NO_STATISTICS__)
    119118KERNEL_STORAGE(__stats_t, mainProcStats);
     
    222221                ( this.runner ){};
    223222                init( this, "Main Processor", *mainCluster, 0p );
    224                 kernel_thread = pthread_self();
     223                kernel_thread = __cfaabi_pthread_self();
    225224
    226225                runner{ &this };
     
    232231        mainProcessor = (processor *)&storage_mainProcessor;
    233232        (*mainProcessor){};
    234 
    235         mainProcessor->idle_wctx.rdbuf = &storage_mainIdleEventFd;
    236         mainProcessor->idle_wctx.ftr   = (io_future_t*)&storage_mainIdleFuture;
    237         /* paranoid */ verify( sizeof(storage_mainIdleEventFd) == sizeof(eventfd_t) );
    238233
    239234        __cfa_io_start( mainProcessor );
     
    283278}
    284279
     280extern "C"{
     281        void pthread_delete_kernel_threads_();
     282}
     283
     284
    285285static void __kernel_shutdown(void) {
    286286        if(!cfa_main_returned) return;
     287
     288        //delete kernel threads for pthread_concurrency
     289        pthread_delete_kernel_threads_();
     290
    287291        /* paranoid */ verify( __preemption_enabled() );
    288292        disable_interrupts();
     
    327331
    328332                /* paranoid */ verify( this.do_terminate == true );
    329                 __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
     333                __cfadbg_print_safe(runtime_core, "Kernel : destroyed main processor context %p\n", &runner);
    330334        }
    331335
     
    373377        register_tls( proc );
    374378
    375         // used for idle sleep when io_uring is present
    376         io_future_t future;
    377         eventfd_t idle_buf;
    378         proc->idle_wctx.ftr = &future;
    379         proc->idle_wctx.rdbuf = &idle_buf;
    380 
    381 
    382379        // SKULLDUGGERY: We want to create a context for the processor coroutine
    383380        // which is needed for the 2-step context switch. However, there is no reason
     
    388385        (proc->runner){ proc, &info };
    389386
    390         __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
     387        __cfadbg_print_safe(runtime_core, "Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
    391388
    392389        //Set global state
     
    514511        self_mon.recursion = 1;
    515512        self_mon_p = &self_mon;
    516         link.next = 0p;
    517         link.ts   = MAX;
     513        rdy_link.next = 0p;
     514        rdy_link.ts   = MAX;
    518515        preferred = ready_queue_new_preferred();
    519516        last_proc = 0p;
    520517        random_state = __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl();
    521518        #if defined( __CFA_WITH_VERIFY__ )
     519                executing = 0p;
    522520                canary = 0x0D15EA5E0D15EA5Ep;
    523521        #endif
    524522
    525         node.next = 0p;
    526         node.prev = 0p;
    527523        doregister(curr_cluster, this);
    528524
     
    647643        #endif
    648644
    649         threads{ __get };
     645        threads{};
    650646
    651647        io.arbiter = create();
    652648        io.params = io_params;
     649
     650        managed.procs = 0p;
     651        managed.cnt = 0;
    653652
    654653        doregister(this);
     
    667666
    668667void ^?{}(cluster & this) libcfa_public {
     668        set_concurrency( this, 0 );
     669
    669670        destroy(this.io.arbiter);
    670671
     
    722723        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    723724        cltr->nthreads += 1;
    724         push_front(cltr->threads, thrd);
     725        insert_first(cltr->threads, thrd);
    725726        unlock    (cltr->thread_list_lock);
    726727}
     
    728729void unregister( cluster * cltr, thread$ & thrd ) {
    729730        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    730         remove(cltr->threads, thrd );
    731         cltr->nthreads -= 1;
     731        {
     732                tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) = void;
     733                with( DLINK_VIA( thread$, struct __thread_user_link ) )
     734                        remove( thrd );
     735                cltr->nthreads -= 1;
     736        }
    732737        unlock(cltr->thread_list_lock);
    733738}
     
    777782        pthread_attr_t attr;
    778783
    779         check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
     784        check( __cfaabi_pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
    780785
    781786        size_t stacksize = max( PTHREAD_STACK_MIN, DEFAULT_STACK_SIZE );
     
    804809        #endif
    805810
    806         check( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
    807         check( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
     811        check( __cfaabi_pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
     812        check( __cfaabi_pthread_create( pthread, &attr, start, arg ), "pthread_create" );
    808813        return stack;
    809814}
    810815
    811816void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ) {
    812         int err = pthread_join( pthread, retval );
     817        int err = __cfaabi_pthread_join( pthread, retval );
    813818        if( err != 0 ) abort("KERNEL ERROR: joining pthread %p caused error %s\n", (void*)pthread, strerror(err));
    814819
     
    816821                pthread_attr_t attr;
    817822
    818                 check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
     823                check( __cfaabi_pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
    819824
    820825                size_t stacksize;
    821826                // default stack size, normally defined by shell limit
    822                 check( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
     827                check( __cfaabi_pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
    823828                assert( stacksize >= PTHREAD_STACK_MIN );
    824829                stacksize += __page_size;
     
    838843}
    839844
     845unsigned set_concurrency( cluster & this, unsigned new ) libcfa_public {
     846        unsigned old = this.managed.cnt;
     847
     848        __cfadbg_print_safe(runtime_core, "Kernel : resizing cluster from %u to %u\n", old, (unsigned)new);
     849
     850        // Delete all the old unneeded procs
     851        if(old > new) for(i; (unsigned)new ~ old) {
     852                __cfadbg_print_safe(runtime_core, "Kernel : destroying %u\n", i);
     853                delete( this.managed.procs[i] );
     854        }
     855
     856        // Allocate new array (uses realloc and memcpies the data)
     857        this.managed.procs = alloc( new, this.managed.procs`realloc );
     858        this.managed.cnt = new;
     859
     860        // Create the desired new procs
     861        if(old < new) for(i; old ~ new) {
     862                __cfadbg_print_safe(runtime_core, "Kernel : constructing %u\n", i);
     863                (*(this.managed.procs[i] = alloc())){ this };
     864        }
     865
     866        // return the old count
     867        return old;
     868}
     869
    840870#if defined(__CFA_WITH_VERIFY__)
    841871static bool verify_fwd_bck_rng(void) {
  • libcfa/src/concurrency/locks.hfa

    rb77f0e1 r63be3387  
    2121
    2222#include "bits/weakso_locks.hfa"
    23 #include "containers/queueLockFree.hfa"
     23#include "containers/lockfree.hfa"
    2424#include "containers/list.hfa"
    2525
     
    498498}
    499499
    500 static inline size_t on_wait(simple_owner_lock & this) with(this) { 
     500static inline size_t on_wait(simple_owner_lock & this) with(this) {
    501501        lock( lock __cfaabi_dbg_ctx2 );
    502502        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
  • libcfa/src/concurrency/monitor.cfa

    rb77f0e1 r63be3387  
    122122
    123123                // Some one else has the monitor, wait in line for it
    124                 /* paranoid */ verify( thrd->link.next == 0p );
     124                /* paranoid */ verify( thrd->user_link.next == 0p );
    125125                append( this->entry_queue, thrd );
    126                 /* paranoid */ verify( thrd->link.next == 1p );
     126                /* paranoid */ verify( thrd->user_link.next == 1p );
    127127
    128128                unlock( this->lock );
     
    233233
    234234                // Some one else has the monitor, wait in line for it
    235                 /* paranoid */ verify( thrd->link.next == 0p );
     235                /* paranoid */ verify( thrd->user_link.next == 0p );
    236236                append( this->entry_queue, thrd );
    237                 /* paranoid */ verify( thrd->link.next == 1p );
     237                /* paranoid */ verify( thrd->user_link.next == 1p );
    238238                unlock( this->lock );
    239239
     
    791791        thread$ * new_owner = pop_head( this->entry_queue );
    792792        /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    793         /* paranoid */ verify( !new_owner || new_owner->link.next == 0p );
     793        /* paranoid */ verify( !new_owner || new_owner->user_link.next == 0p );
    794794        __set_owner( this, new_owner );
    795795
     
    935935        __queue_t(thread$) & entry_queue = monitors[0]->entry_queue;
    936936
     937        #if defined( __CFA_WITH_VERIFY__ )
     938                thread$ * last = 0p;
     939        #endif
    937940        // For each thread in the entry-queue
    938941        for(    thread$ ** thrd_it = &entry_queue.head;
    939942                (*thrd_it) != 1p;
    940                 thrd_it = &(*thrd_it)->link.next
     943                thrd_it = &get_next(**thrd_it)
    941944        ) {
     945                thread$ * curr = *thrd_it;
     946
     947                /* paranoid */ verifyf( !last || last->user_link.next == curr, "search not making progress, from %p (%p) to %p", last, last->user_link.next, curr );
     948                /* paranoid */ verifyf( curr != last, "search not making progress, from %p to %p", last, curr );
     949
    942950                // For each acceptable check if it matches
    943951                int i = 0;
     
    946954                for( __acceptable_t * it = begin; it != end; it++, i++ ) {
    947955                        // Check if we have a match
    948                         if( *it == (*thrd_it)->monitors ) {
     956                        if( *it == curr->monitors ) {
    949957
    950958                                // If we have a match return it
     
    953961                        }
    954962                }
     963
     964                #if defined( __CFA_WITH_VERIFY__ )
     965                        last = curr;
     966                #endif
    955967        }
    956968
     
    10251037
    10261038                // Some one else has the monitor, wait in line for it
    1027                 /* paranoid */ verify( thrd->link.next == 0p );
     1039                /* paranoid */ verify( thrd->user_link.next == 0p );
    10281040                append( this->entry_queue, thrd );
    1029                 /* paranoid */ verify( thrd->link.next == 1p );
     1041                /* paranoid */ verify( thrd->user_link.next == 1p );
    10301042
    10311043                unlock( this->lock );
  • libcfa/src/concurrency/preemption.cfa

    rb77f0e1 r63be3387  
    352352        sigset_t oldset;
    353353        int ret;
    354         ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
     354        ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
    355355        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    356356
     
    385385        sigaddset( &mask, sig );
    386386
    387         if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) {
     387        if ( __cfaabi_pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) {
    388388            abort( "internal error, pthread_sigmask" );
    389389        }
     
    396396        sigaddset( &mask, sig );
    397397
    398         if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
     398        if ( __cfaabi_pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
    399399                abort( "internal error, pthread_sigmask" );
    400400        }
     
    404404static void preempt( processor * this ) {
    405405        sigval_t value = { PREEMPT_NORMAL };
    406         pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
     406        __cfaabi_pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
    407407}
    408408
     
    415415        sigset_t oldset;
    416416        int ret;
    417         ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
     417        ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
    418418        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    419419
     
    434434        sigset_t oldset;
    435435        int ret;
    436         ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
     436        ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
    437437        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    438438
     
    505505        sigval val;
    506506        val.sival_int = 0;
    507         pthread_sigqueue( alarm_thread, SIGALRM, val );
     507        __cfaabi_pthread_sigqueue( alarm_thread, SIGALRM, val );
    508508
    509509        // Wait for the preemption thread to finish
     
    579579        static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
    580580        #endif
    581         if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) {
     581        if ( __cfaabi_pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) {
    582582                abort( "internal error, sigprocmask" );
    583583        }
     
    607607        sigset_t mask;
    608608        sigfillset(&mask);
    609         if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
     609        if ( __cfaabi_pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
    610610            abort( "internal error, pthread_sigmask" );
    611611        }
  • libcfa/src/concurrency/ready_subqueue.hfa

    rb77f0e1 r63be3387  
    2525static inline thread$ * mock_head(const __intrusive_lane_t & this) {
    2626        thread$ * rhead = (thread$ *)(
    27                 (uintptr_t)( &this.l.anchor ) - __builtin_offsetof( thread$, link )
     27                (uintptr_t)( &this.l.anchor ) - __builtin_offsetof( thread$, rdy_link )
    2828        );
    2929        return rhead;
     
    3434static inline void push( __intrusive_lane_t & this, thread$ * node ) {
    3535        /* paranoid */ verify( this.l.lock );
    36         /* paranoid */ verify( node->link.next == 0p );
    37         /* paranoid */ verify( __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED) == MAX  );
    38         /* paranoid */ verify( this.l.prev->link.next == 0p );
    39         /* paranoid */ verify( __atomic_load_n(&this.l.prev->link.ts, __ATOMIC_RELAXED)   == MAX  );
     36        /* paranoid */ verify( node->rdy_link.next == 0p );
     37        /* paranoid */ verify( __atomic_load_n(&node->rdy_link.ts, __ATOMIC_RELAXED) == MAX  );
     38        /* paranoid */ verify( this.l.prev->rdy_link.next == 0p );
     39        /* paranoid */ verify( __atomic_load_n(&this.l.prev->rdy_link.ts, __ATOMIC_RELAXED)   == MAX  );
    4040        if( this.l.anchor.next == 0p ) {
    4141                /* paranoid */ verify( this.l.anchor.next == 0p );
     
    5151
    5252        // Get the relevant nodes locally
    53         this.l.prev->link.next = node;
    54         __atomic_store_n(&this.l.prev->link.ts, rdtscl(), __ATOMIC_RELAXED);
     53        this.l.prev->rdy_link.next = node;
     54        __atomic_store_n(&this.l.prev->rdy_link.ts, rdtscl(), __ATOMIC_RELAXED);
    5555        this.l.prev = node;
    5656        #if !defined(__CFA_NO_STATISTICS__)
     
    7070        // Get the relevant nodes locally
    7171        thread$ * node = this.l.anchor.next;
    72         this.l.anchor.next = node->link.next;
    73         __atomic_store_n(&this.l.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED);
     72        this.l.anchor.next = node->rdy_link.next;
     73        __atomic_store_n(&this.l.anchor.ts, __atomic_load_n(&node->rdy_link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED);
    7474        bool is_empty = this.l.anchor.next == 0p;
    75         node->link.next = 0p;
    76         __atomic_store_n(&node->link.ts, ULLONG_MAX, __ATOMIC_RELAXED);
     75        node->rdy_link.next = 0p;
     76        __atomic_store_n(&node->rdy_link.ts, ULLONG_MAX, __ATOMIC_RELAXED);
    7777        #if !defined(__CFA_NO_STATISTICS__)
    7878                this.l.cnt--;
     
    8383
    8484        unsigned long long ats = __atomic_load_n(&this.l.anchor.ts, __ATOMIC_RELAXED);
    85         /* paranoid */ verify( node->link.next == 0p );
    86         /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) == MAX );
    87         /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) != 0   );
     85        /* paranoid */ verify( node->rdy_link.next == 0p );
     86        /* paranoid */ verify( __atomic_load_n(&node->rdy_link.ts , __ATOMIC_RELAXED) == MAX );
     87        /* paranoid */ verify( __atomic_load_n(&node->rdy_link.ts , __ATOMIC_RELAXED) != 0   );
    8888        /* paranoid */ verify( ats != 0 );
    8989        /* paranoid */ verify( (ats == MAX) == is_empty );
  • libcfa/src/concurrency/thread.cfa

    rb77f0e1 r63be3387  
    4444        self_mon_p = &self_mon;
    4545        curr_cluster = &cl;
    46         link.next = 0p;
    47         link.ts   = MAX;
     46        rdy_link.next = 0p;
     47        rdy_link.ts   = MAX;
    4848        preferred = ready_queue_new_preferred();
    4949        last_proc = 0p;
    5050        random_state = __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl();
    5151        #if defined( __CFA_WITH_VERIFY__ )
     52                executing = 0p;
    5253                canary = 0x0D15EA5E0D15EA5Ep;
    5354        #endif
    54 
    55         node.next = 0p;
    56         node.prev = 0p;
    5755
    5856        clh_node = malloc( );
     
    177175
    178176//-----------------------------------------------------------------------------
     177bool migrate( thread$ * thrd, struct cluster & cl ) {
     178
     179        monitor$ * tmon = get_monitor(thrd);
     180        monitor$ * __monitors[] = { tmon };
     181        monitor_guard_t __guard = { __monitors, 1 };
     182
     183
     184        {
     185                // if nothing needs to be done, return false
     186                if( thrd->curr_cluster == &cl ) return false;
     187
     188                // are we migrating ourself?
     189                const bool local = thrd == active_thread();
     190
     191                /* paranoid */ verify( !local || &cl != active_cluster() );
     192                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
     193                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
     194                /* paranoid */ verify( local || tmon->signal_stack.top->owner->waiting_thread == thrd );
     195                /* paranoid */ verify( local || tmon->signal_stack.top );
     196
     197                // make sure we aren't interrupted while doing this
     198                // not as important if we aren't local
     199                disable_interrupts();
     200
     201                // actually move the thread
     202                unregister( thrd->curr_cluster, *thrd );
     203                thrd->curr_cluster = &cl;
     204                doregister( thrd->curr_cluster, *thrd );
     205
     206                // restore interrupts
     207                enable_interrupts();
     208
     209                // if this is the local thread, we are still running on the old cluster
     210                if(local) yield();
     211
     212                /* paranoid */ verify( !local || &cl == active_cluster() );
     213                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
     214                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
     215                /* paranoid */ verify(  local || tmon->signal_stack.top );
     216                /* paranoid */ verify(  local || tmon->signal_stack.top->owner->waiting_thread == thrd );
     217
     218                return true;
     219        }
     220}
     221
     222//-----------------------------------------------------------------------------
    179223#define GENERATOR LCG
    180224
  • libcfa/src/concurrency/thread.hfa

    rb77f0e1 r63be3387  
    132132
    133133//----------
     134// misc
     135bool migrate( thread$ * thrd, struct cluster & cl );
     136
     137forall( T & | is_thread(T) )
     138static inline bool migrate( T & mutex thrd, struct cluster & cl ) { return migrate( &(thread&)thrd, cl ); }
     139
     140
     141//----------
    134142// prng
    135143static inline {
Note: See TracChangeset for help on using the changeset viewer.