Changeset 65deb18 for src


Ignore:
Timestamp:
Jan 30, 2018, 3:52:42 PM (6 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
f792cb8
Parents:
5b51f5e
Message:

Kernel now properly uses with statments

Location:
src/libcfa
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/bits/containers.h

    r5b51f5e r65deb18  
    140140
    141141#ifdef __cforall
     142
    142143        forall(dtype T | is_node(T))
    143         static inline void ?{}( __queue(T) & this ) {
    144                 (this.head){ NULL };
    145                 (this.tail){ &this.head };
     144        static inline void ?{}( __queue(T) & this ) with( this ) {
     145                head{ NULL };
     146                tail{ &head };
    146147        }
    147148
    148149        forall(dtype T | is_node(T) | sized(T))
    149         static inline void append( __queue(T) & this, T * val ) {
    150                 verify(this.tail != NULL);
    151                 *this.tail = val;
    152                 this.tail = &get_next( *val );
     150        static inline void append( __queue(T) & this, T * val ) with( this ) {
     151                verify(tail != NULL);
     152                *tail = val;
     153                tail = &get_next( *val );
    153154        }
    154155
     
    167168
    168169        forall(dtype T | is_node(T) | sized(T))
    169         static inline T * remove( __queue(T) & this, T ** it ) {
     170        static inline T * remove( __queue(T) & this, T ** it ) with( this ) {
    170171                T * val = *it;
    171172                verify( val );
     
    173174                (*it) = get_next( *val );
    174175
    175                 if( this.tail == &get_next( *val ) ) {
    176                         this.tail = it;
     176                if( tail == &get_next( *val ) ) {
     177                        tail = it;
    177178                }
    178179
    179180                get_next( *val ) = NULL;
    180181
    181                 verify( (this.head == NULL) == (&this.head == this.tail) );
    182                 verify( *this.tail == NULL );
     182                verify( (head == NULL) == (&head == tail) );
     183                verify( *tail == NULL );
    183184                return val;
    184185        }
    185186#endif
     187
     188//-----------------------------------------------------------------------------
     189// Tools
     190//-----------------------------------------------------------------------------
     191#ifdef __cforall
     192
     193#endif
  • src/libcfa/concurrency/coroutine.c

    r5b51f5e r65deb18  
    118118} //ctxSwitchDirect
    119119
    120 void create_stack( coStack_t* this, unsigned int storageSize ) {
     120void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) {
    121121        //TEMP HACK do this on proper kernel startup
    122122        if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
     
    124124        size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
    125125
    126         if ( (intptr_t)this->storage == 0 ) {
    127                 this->userStack = false;
    128                 this->size = libCeiling( storageSize, 16 );
     126        if ( (intptr_t)storage == 0 ) {
     127                userStack = false;
     128                size = libCeiling( storageSize, 16 );
    129129                // use malloc/memalign because "new" raises an exception for out-of-memory
    130130
    131131                // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
    132                 __cfaabi_dbg_debug_do( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) );
    133                 __cfaabi_dbg_no_debug_do( this->storage = malloc( cxtSize + this->size + 8 ) );
     132                __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) );
     133                __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) );
    134134
    135135                __cfaabi_dbg_debug_do(
    136                         if ( mprotect( this->storage, pageSize, PROT_NONE ) == -1 ) {
     136                        if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) {
    137137                                abortf( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) );
    138138                        } // if
    139139                );
    140140
    141                 if ( (intptr_t)this->storage == 0 ) {
    142                         abortf( "Attempt to allocate %d bytes of storage for coroutine or task execution-state but insufficient memory available.", this->size );
     141                if ( (intptr_t)storage == 0 ) {
     142                        abortf( "Attempt to allocate %d bytes of storage for coroutine or task execution-state but insufficient memory available.", size );
    143143                } // if
    144144
    145                 __cfaabi_dbg_debug_do( this->limit = (char *)this->storage + pageSize );
    146                 __cfaabi_dbg_no_debug_do( this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ) ); // minimum alignment
     145                __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize );
     146                __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment
    147147
    148148        } else {
    149                 assertf( ((size_t)this->storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", this->storage, (int)libAlign() );
    150                 this->userStack = true;
    151                 this->size = storageSize - cxtSize;
     149                assertf( ((size_t)storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() );
     150                userStack = true;
     151                size = storageSize - cxtSize;
    152152
    153                 if ( this->size % 16 != 0u ) this->size -= 8;
     153                if ( size % 16 != 0u ) size -= 8;
    154154
    155                 this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ); // minimum alignment
     155                limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment
    156156        } // if
    157         assertf( this->size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", this->size, MinStackSize );
     157        assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
    158158
    159         this->base = (char *)this->limit + this->size;
    160         this->context = this->base;
    161         this->top = (char *)this->context + cxtSize;
     159        base = (char *)limit + size;
     160        context = base;
     161        top = (char *)context + cxtSize;
    162162}
    163163
  • src/libcfa/concurrency/kernel.c

    r5b51f5e r65deb18  
    8585}
    8686
    87 void ?{}( coStack_t & this, current_stack_info_t * info) {
    88         this.size = info->size;
    89         this.storage = info->storage;
    90         this.limit = info->limit;
    91         this.base = info->base;
    92         this.context = info->context;
    93         this.top = info->top;
    94         this.userStack = true;
    95 }
    96 
    97 void ?{}( coroutine_desc & this, current_stack_info_t * info) {
    98         (this.stack){ info };
    99         this.name = "Main Thread";
    100         this.errno_ = 0;
    101         this.state = Start;
    102         this.starter = NULL;
    103 }
    104 
    105 void ?{}( thread_desc & this, current_stack_info_t * info) {
    106         (this.self_cor){ info };
     87void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
     88        size      = info->size;
     89        storage  = info->storage;
     90        limit    = info->limit;
     91        base      = info->base;
     92        context  = info->context;
     93        top      = info->top;
     94        userStack = true;
     95}
     96
     97void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
     98        stack{ info };
     99        name = "Main Thread";
     100        errno_ = 0;
     101        state = Start;
     102        starter = NULL;
     103}
     104
     105void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
     106        self_cor{ info };
    107107}
    108108
     
    131131void ?{}(processor & this, cluster * cltr) {
    132132        this.cltr = cltr;
    133         (this.terminated){ 0 };
     133        this.terminated{ 0 };
    134134        this.do_terminate = false;
    135135        this.preemption_alarm = NULL;
     
    141141void ?{}(processor & this, cluster * cltr, processorCtx_t & runner) {
    142142        this.cltr = cltr;
    143         (this.terminated){ 0 };
     143        this.terminated{ 0 };
    144144        this.do_terminate = false;
    145145        this.preemption_alarm = NULL;
     
    152152}
    153153
    154 void ^?{}(processor & this) {
    155         if( ! this.do_terminate ) {
     154void ^?{}(processor & this) with( this ){
     155        if( ! do_terminate ) {
    156156                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
    157                 this.do_terminate = true;
    158                 P( this.terminated );
    159                 pthread_join( this.kernel_thread, NULL );
    160         }
    161 }
    162 
    163 void ?{}(cluster & this) {
    164         (this.ready_queue){};
    165         ( this.ready_queue_lock ){};
    166 
    167         this.preemption = default_preemption();
     157                do_terminate = true;
     158                P( terminated );
     159                pthread_join( kernel_thread, NULL );
     160        }
     161}
     162
     163void ?{}(cluster & this) with( this ) {
     164        ready_queue{};
     165        ready_queue_lock{};
     166
     167        preemption = default_preemption();
    168168}
    169169
     
    238238// Once a thread has finished running, some of
    239239// its final actions must be executed from the kernel
    240 void finishRunning(processor * this) {
    241         if( this->finish.action_code == Release ) {
    242                 unlock( *this->finish.lock );
    243         }
    244         else if( this->finish.action_code == Schedule ) {
    245                 ScheduleThread( this->finish.thrd );
    246         }
    247         else if( this->finish.action_code == Release_Schedule ) {
    248                 unlock( *this->finish.lock );
    249                 ScheduleThread( this->finish.thrd );
    250         }
    251         else if( this->finish.action_code == Release_Multi ) {
    252                 for(int i = 0; i < this->finish.lock_count; i++) {
    253                         unlock( *this->finish.locks[i] );
     240void finishRunning(processor * this) with( this->finish ) {
     241        if( action_code == Release ) {
     242                unlock( *lock );
     243        }
     244        else if( action_code == Schedule ) {
     245                ScheduleThread( thrd );
     246        }
     247        else if( action_code == Release_Schedule ) {
     248                unlock( *lock );
     249                ScheduleThread( thrd );
     250        }
     251        else if( action_code == Release_Multi ) {
     252                for(int i = 0; i < lock_count; i++) {
     253                        unlock( *locks[i] );
    254254                }
    255255        }
    256         else if( this->finish.action_code == Release_Multi_Schedule ) {
    257                 for(int i = 0; i < this->finish.lock_count; i++) {
    258                         unlock( *this->finish.locks[i] );
     256        else if( action_code == Release_Multi_Schedule ) {
     257                for(int i = 0; i < lock_count; i++) {
     258                        unlock( *locks[i] );
    259259                }
    260                 for(int i = 0; i < this->finish.thrd_count; i++) {
    261                         ScheduleThread( this->finish.thrds[i] );
     260                for(int i = 0; i < thrd_count; i++) {
     261                        ScheduleThread( thrds[i] );
    262262                }
    263263        }
    264264        else {
    265                 assert(this->finish.action_code == No_Action);
     265                assert(action_code == No_Action);
    266266        }
    267267}
     
    332332        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    333333
    334         lock(   this_processor->cltr->ready_queue_lock __cfaabi_dbg_ctx2 );
    335         append( this_processor->cltr->ready_queue, thrd );
    336         unlock( this_processor->cltr->ready_queue_lock );
    337 
    338         verify( disable_preempt_count > 0 );
    339 }
    340 
    341 thread_desc * nextThread(cluster * this) {
    342         verify( disable_preempt_count > 0 );
    343         lock( this->ready_queue_lock __cfaabi_dbg_ctx2 );
    344         thread_desc * head = pop_head( this->ready_queue );
    345         unlock( this->ready_queue_lock );
     334        with( *this_processor->cltr ) {
     335                lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
     336                append( ready_queue, thrd );
     337                unlock( ready_queue_lock );
     338        }
     339
     340        verify( disable_preempt_count > 0 );
     341}
     342
     343thread_desc * nextThread(cluster * this) with( *this ) {
     344        verify( disable_preempt_count > 0 );
     345        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
     346        thread_desc * head = pop_head( ready_queue );
     347        unlock( ready_queue_lock );
    346348        verify( disable_preempt_count > 0 );
    347349        return head;
     
    359361        disable_interrupts();
    360362        this_processor->finish.action_code = Release;
    361         this_processor->finish.lock = lock;
     363        this_processor->finish.lock        = lock;
    362364
    363365        verify( disable_preempt_count > 0 );
     
    369371
    370372void BlockInternal( thread_desc * thrd ) {
    371         assert(thrd);
    372373        disable_interrupts();
    373         assert( thrd->self_cor.state != Halted );
    374374        this_processor->finish.action_code = Schedule;
    375         this_processor->finish.thrd = thrd;
     375        this_processor->finish.thrd        = thrd;
    376376
    377377        verify( disable_preempt_count > 0 );
     
    386386        disable_interrupts();
    387387        this_processor->finish.action_code = Release_Schedule;
    388         this_processor->finish.lock = lock;
    389         this_processor->finish.thrd = thrd;
     388        this_processor->finish.lock        = lock;
     389        this_processor->finish.thrd        = thrd;
    390390
    391391        verify( disable_preempt_count > 0 );
     
    399399        disable_interrupts();
    400400        this_processor->finish.action_code = Release_Multi;
    401         this_processor->finish.locks = locks;
    402         this_processor->finish.lock_count = count;
     401        this_processor->finish.locks       = locks;
     402        this_processor->finish.lock_count  = count;
    403403
    404404        verify( disable_preempt_count > 0 );
     
    412412        disable_interrupts();
    413413        this_processor->finish.action_code = Release_Multi_Schedule;
    414         this_processor->finish.locks = locks;
    415         this_processor->finish.lock_count = lock_count;
    416         this_processor->finish.thrds = thrds;
    417         this_processor->finish.thrd_count = thrd_count;
     414        this_processor->finish.locks       = locks;
     415        this_processor->finish.lock_count  = lock_count;
     416        this_processor->finish.thrds       = thrds;
     417        this_processor->finish.thrd_count  = thrd_count;
    418418
    419419        verify( disable_preempt_count > 0 );
     
    427427        verify( disable_preempt_count > 0 );
    428428        this_processor->finish.action_code = thrd ? Release_Schedule : Release;
    429         this_processor->finish.lock = lock;
    430         this_processor->finish.thrd = thrd;
     429        this_processor->finish.lock        = lock;
     430        this_processor->finish.thrd        = thrd;
    431431
    432432        suspend();
     
    579579void ^?{}(semaphore & this) {}
    580580
    581 void P(semaphore & this) {
    582         lock( this.lock __cfaabi_dbg_ctx2 );
    583         this.count -= 1;
    584         if ( this.count < 0 ) {
     581void P(semaphore & this) with( this ){
     582        lock( lock __cfaabi_dbg_ctx2 );
     583        count -= 1;
     584        if ( count < 0 ) {
    585585                // queue current task
    586                 append( this.waiting, (thread_desc *)this_thread );
     586                append( waiting, (thread_desc *)this_thread );
    587587
    588588                // atomically release spin lock and block
    589                 BlockInternal( &this.lock );
     589                BlockInternal( &lock );
    590590        }
    591591        else {
    592             unlock( this.lock );
    593         }
    594 }
    595 
    596 void V(semaphore & this) {
     592            unlock( lock );
     593        }
     594}
     595
     596void V(semaphore & this) with( this ) {
    597597        thread_desc * thrd = NULL;
    598         lock( this.lock __cfaabi_dbg_ctx2 );
    599         this.count += 1;
    600         if ( this.count <= 0 ) {
     598        lock( lock __cfaabi_dbg_ctx2 );
     599        count += 1;
     600        if ( count <= 0 ) {
    601601                // remove task at head of waiting list
    602                 thrd = pop_head( this.waiting );
    603         }
    604 
    605         unlock( this.lock );
     602                thrd = pop_head( waiting );
     603        }
     604
     605        unlock( lock );
    606606
    607607        // make new owner
  • src/libcfa/concurrency/preemption.c

    r5b51f5e r65deb18  
    7070static pthread_t alarm_thread;                        // pthread handle to alarm thread
    7171
    72 void ?{}(event_kernel_t & this) {
    73         (this.alarms){};
    74         (this.lock){};
     72void ?{}(event_kernel_t & this) with( this ) {
     73        alarms{};
     74        lock{};
    7575}
    7676
     
    159159        // If counter reaches 0, execute any pending CtxSwitch
    160160        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    161                 processor * proc  = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
     161                processor   * proc = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
    162162                thread_desc * thrd = this_thread;         // Cache the thread now since interrupts can start happening after the atomic add
    163163
  • src/libcfa/concurrency/thread.c

    r5b51f5e r65deb18  
    3131// Thread ctors and dtors
    3232
    33 void ?{}(thread_desc& this) {
    34         (this.self_cor){};
    35         this.self_cor.name = "Anonymous Coroutine";
    36         this.self_mon.owner = &this;
    37         this.self_mon.recursion = 1;
    38         this.self_mon_p = &this.self_mon;
    39         this.next = NULL;
     33void ?{}(thread_desc& this) with( this ) {
     34        self_cor{};
     35        self_cor.name = "Anonymous Coroutine";
     36        self_mon.owner = &this;
     37        self_mon.recursion = 1;
     38        self_mon_p = &self_mon;
     39        next = NULL;
    4040
    41         (this.monitors){ &this.self_mon_p, 1, (fptr_t)0 };
     41        monitors{ &self_mon_p, 1, (fptr_t)0 };
    4242}
    4343
    44 void ^?{}(thread_desc& this) {
    45         ^(this.self_cor){};
     44void ^?{}(thread_desc& this) with( this ) {
     45        ^self_cor{};
    4646}
    4747
    4848forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } )
    49 void ?{}( scoped(T)& this ) {
    50         (this.handle){};
    51         __thrd_start(this.handle);
     49void ?{}( scoped(T)& this ) with( this ) {
     50        handle{};
     51        __thrd_start(handle);
    5252}
    5353
    5454forall( dtype T, ttype P | sized(T) | is_thread(T) | { void ?{}(T&, P); } )
    55 void ?{}( scoped(T)& this, P params ) {
    56         (this.handle){ params };
    57         __thrd_start(this.handle);
     55void ?{}( scoped(T)& this, P params ) with( this ) {
     56        handle{ params };
     57        __thrd_start(handle);
    5858}
    5959
    6060forall( dtype T | sized(T) | is_thread(T) )
    61 void ^?{}( scoped(T)& this ) {
    62         ^(this.handle){};
     61void ^?{}( scoped(T)& this ) with( this ) {
     62        ^handle{};
    6363}
    6464
     
    6868void __thrd_start( T& this ) {
    6969        coroutine_desc* thrd_c = get_coroutine(this);
    70         thread_desc* thrd_h = get_thread   (this);
     70        thread_desc   * thrd_h = get_thread   (this);
    7171        thrd_c->last = this_coroutine;
    7272
  • src/libcfa/interpose.h

    r5b51f5e r65deb18  
    1616#pragma once
    1717
    18 void * interpose_symbol( const char* symbol, , const char *version );
     18void * interpose_symbol( const char* symbol, const char *version );
    1919
    2020extern __typeof__( abort ) libc_abort __attribute__(( noreturn ));
Note: See TracChangeset for help on using the changeset viewer.