Changeset 633a642


Ignore:
Timestamp:
Jan 30, 2018, 4:52:54 PM (6 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
1449d83, 5ff188f
Parents:
320eb73a (diff), 7416d46a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg2:software/cfa/cfa-cc

Location:
src
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/bits/containers.h

    r320eb73a r633a642  
    140140
    141141#ifdef __cforall
     142
    142143        forall(dtype T | is_node(T))
    143         static inline void ?{}( __queue(T) & this ) {
    144                 (this.head){ NULL };
    145                 (this.tail){ &this.head };
     144        static inline void ?{}( __queue(T) & this ) with( this ) {
     145                head{ NULL };
     146                tail{ &head };
    146147        }
    147148
    148149        forall(dtype T | is_node(T) | sized(T))
    149         static inline void append( __queue(T) & this, T * val ) {
    150                 verify(this.tail != NULL);
    151                 *this.tail = val;
    152                 this.tail = &get_next( *val );
     150        static inline void append( __queue(T) & this, T * val ) with( this ) {
     151                verify(tail != NULL);
     152                *tail = val;
     153                tail = &get_next( *val );
    153154        }
    154155
     
    167168
    168169        forall(dtype T | is_node(T) | sized(T))
    169         static inline T * remove( __queue(T) & this, T ** it ) {
     170        static inline T * remove( __queue(T) & this, T ** it ) with( this ) {
    170171                T * val = *it;
    171172                verify( val );
     
    173174                (*it) = get_next( *val );
    174175
    175                 if( this.tail == &get_next( *val ) ) {
    176                         this.tail = it;
     176                if( tail == &get_next( *val ) ) {
     177                        tail = it;
    177178                }
    178179
    179180                get_next( *val ) = NULL;
    180181
    181                 verify( (this.head == NULL) == (&this.head == this.tail) );
    182                 verify( *this.tail == NULL );
     182                verify( (head == NULL) == (&head == tail) );
     183                verify( *tail == NULL );
    183184                return val;
    184185        }
    185186#endif
     187
     188//-----------------------------------------------------------------------------
     189// Tools
     190//-----------------------------------------------------------------------------
     191#ifdef __cforall
     192
     193#endif
  • src/libcfa/concurrency/coroutine.c

    r320eb73a r633a642  
    118118} //ctxSwitchDirect
    119119
    120 void create_stack( coStack_t* this, unsigned int storageSize ) {
     120void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) {
    121121        //TEMP HACK do this on proper kernel startup
    122122        if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
     
    124124        size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
    125125
    126         if ( (intptr_t)this->storage == 0 ) {
    127                 this->userStack = false;
    128                 this->size = libCeiling( storageSize, 16 );
     126        if ( (intptr_t)storage == 0 ) {
     127                userStack = false;
     128                size = libCeiling( storageSize, 16 );
    129129                // use malloc/memalign because "new" raises an exception for out-of-memory
    130130
    131131                // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
    132                 __cfaabi_dbg_debug_do( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) );
    133                 __cfaabi_dbg_no_debug_do( this->storage = malloc( cxtSize + this->size + 8 ) );
     132                __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) );
     133                __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) );
    134134
    135135                __cfaabi_dbg_debug_do(
    136                         if ( mprotect( this->storage, pageSize, PROT_NONE ) == -1 ) {
     136                        if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) {
    137137                                abortf( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) );
    138138                        } // if
    139139                );
    140140
    141                 if ( (intptr_t)this->storage == 0 ) {
    142                         abortf( "Attempt to allocate %d bytes of storage for coroutine or task execution-state but insufficient memory available.", this->size );
     141                if ( (intptr_t)storage == 0 ) {
     142                        abortf( "Attempt to allocate %d bytes of storage for coroutine or task execution-state but insufficient memory available.", size );
    143143                } // if
    144144
    145                 __cfaabi_dbg_debug_do( this->limit = (char *)this->storage + pageSize );
    146                 __cfaabi_dbg_no_debug_do( this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ) ); // minimum alignment
     145                __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize );
     146                __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment
    147147
    148148        } else {
    149                 assertf( ((size_t)this->storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", this->storage, (int)libAlign() );
    150                 this->userStack = true;
    151                 this->size = storageSize - cxtSize;
     149                assertf( ((size_t)storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() );
     150                userStack = true;
     151                size = storageSize - cxtSize;
    152152
    153                 if ( this->size % 16 != 0u ) this->size -= 8;
     153                if ( size % 16 != 0u ) size -= 8;
    154154
    155                 this->limit = (char *)libCeiling( (unsigned long)this->storage, 16 ); // minimum alignment
     155                limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment
    156156        } // if
    157         assertf( this->size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", this->size, MinStackSize );
     157        assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
    158158
    159         this->base = (char *)this->limit + this->size;
    160         this->context = this->base;
    161         this->top = (char *)this->context + cxtSize;
     159        base = (char *)limit + size;
     160        context = base;
     161        top = (char *)context + cxtSize;
    162162}
    163163
  • src/libcfa/concurrency/kernel.c

    r320eb73a r633a642  
    8787}
    8888
    89 void ?{}( coStack_t & this, current_stack_info_t * info) {
    90         this.size = info->size;
    91         this.storage = info->storage;
    92         this.limit = info->limit;
    93         this.base = info->base;
    94         this.context = info->context;
    95         this.top = info->top;
    96         this.userStack = true;
    97 }
    98 
    99 void ?{}( coroutine_desc & this, current_stack_info_t * info) {
    100         (this.stack){ info };
    101         this.name = "Main Thread";
    102         this.errno_ = 0;
    103         this.state = Start;
    104         this.starter = NULL;
    105 }
    106 
    107 void ?{}( thread_desc & this, current_stack_info_t * info) {
    108         (this.self_cor){ info };
     89void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
     90        size      = info->size;
     91        storage  = info->storage;
     92        limit    = info->limit;
     93        base      = info->base;
     94        context  = info->context;
     95        top      = info->top;
     96        userStack = true;
     97}
     98
     99void ?{}( coroutine_desc & this, current_stack_info_t * info) with( this ) {
     100        stack{ info };
     101        name = "Main Thread";
     102        errno_ = 0;
     103        state = Start;
     104        starter = NULL;
     105}
     106
     107void ?{}( thread_desc & this, current_stack_info_t * info) with( this ) {
     108        self_cor{ info };
    109109}
    110110
     
    133133void ?{}(processor & this, cluster * cltr) {
    134134        this.cltr = cltr;
    135         (this.terminated){ 0 };
     135        this.terminated{ 0 };
    136136        this.do_terminate = false;
    137137        this.preemption_alarm = NULL;
     
    143143void ?{}(processor & this, cluster * cltr, processorCtx_t & runner) {
    144144        this.cltr = cltr;
    145         (this.terminated){ 0 };
     145        this.terminated{ 0 };
    146146        this.do_terminate = false;
    147147        this.preemption_alarm = NULL;
     
    154154}
    155155
    156 void ^?{}(processor & this) {
    157         if( ! this.do_terminate ) {
     156void ^?{}(processor & this) with( this ){
     157        if( ! do_terminate ) {
    158158                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
    159                 this.do_terminate = true;
    160                 P( this.terminated );
    161                 pthread_join( this.kernel_thread, NULL );
    162         }
    163 }
    164 
    165 void ?{}(cluster & this) {
    166         (this.ready_queue){};
    167         ( this.ready_queue_lock ){};
    168 
    169         this.preemption = default_preemption();
     159                do_terminate = true;
     160                P( terminated );
     161                pthread_join( kernel_thread, NULL );
     162        }
     163}
     164
     165void ?{}(cluster & this) with( this ) {
     166        ready_queue{};
     167        ready_queue_lock{};
     168
     169        preemption = default_preemption();
    170170}
    171171
     
    240240// Once a thread has finished running, some of
    241241// its final actions must be executed from the kernel
    242 void finishRunning(processor * this) {
    243         if( this->finish.action_code == Release ) {
     242void finishRunning(processor * this) with( this->finish ) {
     243        if( action_code == Release ) {
    244244                verify( disable_preempt_count > 1 );
    245                 unlock( *this->finish.lock );
    246         }
    247         else if( this->finish.action_code == Schedule ) {
    248                 ScheduleThread( this->finish.thrd );
    249         }
    250         else if( this->finish.action_code == Release_Schedule ) {
     245                unlock( *lock );
     246        }
     247        else if( action_code == Schedule ) {
     248                ScheduleThread( thrd );
     249        }
     250        else if( action_code == Release_Schedule ) {
    251251                verify( disable_preempt_count > 1 );
    252                 unlock( *this->finish.lock );
    253                 ScheduleThread( this->finish.thrd );
    254         }
    255         else if( this->finish.action_code == Release_Multi ) {
    256                 verify( disable_preempt_count > this->finish.lock_count );
    257                 for(int i = 0; i < this->finish.lock_count; i++) {
    258                         unlock( *this->finish.locks[i] );
     252                unlock( *lock );
     253                ScheduleThread( thrd );
     254        }
     255        else if( action_code == Release_Multi ) {
     256                verify( disable_preempt_count > lock_count );
     257                for(int i = 0; i < lock_count; i++) {
     258                        unlock( *locks[i] );
    259259                }
    260260        }
    261         else if( this->finish.action_code == Release_Multi_Schedule ) {
    262                 verify( disable_preempt_count > this->finish.lock_count );
    263                 for(int i = 0; i < this->finish.lock_count; i++) {
    264                         unlock( *this->finish.locks[i] );
     261        else if( action_code == Release_Multi_Schedule ) {
     262                for(int i = 0; i < lock_count; i++) {
     263                        unlock( *locks[i] );
    265264                }
    266                 for(int i = 0; i < this->finish.thrd_count; i++) {
    267                         ScheduleThread( this->finish.thrds[i] );
     265                for(int i = 0; i < thrd_count; i++) {
     266                        ScheduleThread( thrds[i] );
    268267                }
    269268        }
    270269        else {
    271                 assert(this->finish.action_code == No_Action);
     270                assert(action_code == No_Action);
    272271        }
    273272}
     
    338337        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    339338
    340         lock(   this_processor->cltr->ready_queue_lock __cfaabi_dbg_ctx2 );
    341         append( this_processor->cltr->ready_queue, thrd );
    342         unlock( this_processor->cltr->ready_queue_lock );
    343 
    344         verify( disable_preempt_count > 0 );
    345 }
    346 
    347 thread_desc * nextThread(cluster * this) {
    348         verify( disable_preempt_count > 0 );
    349         lock( this->ready_queue_lock __cfaabi_dbg_ctx2 );
    350         thread_desc * head = pop_head( this->ready_queue );
    351         unlock( this->ready_queue_lock );
     339        with( *this_processor->cltr ) {
     340                lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
     341                append( ready_queue, thrd );
     342                unlock( ready_queue_lock );
     343        }
     344
     345        verify( disable_preempt_count > 0 );
     346}
     347
     348thread_desc * nextThread(cluster * this) with( *this ) {
     349        verify( disable_preempt_count > 0 );
     350        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
     351        thread_desc * head = pop_head( ready_queue );
     352        unlock( ready_queue_lock );
    352353        verify( disable_preempt_count > 0 );
    353354        return head;
     
    365366        disable_interrupts();
    366367        this_processor->finish.action_code = Release;
    367         this_processor->finish.lock = lock;
     368        this_processor->finish.lock        = lock;
    368369
    369370        verify( disable_preempt_count > 1 );
     
    375376
    376377void BlockInternal( thread_desc * thrd ) {
    377         assert(thrd);
    378378        disable_interrupts();
    379         assert( thrd->self_cor.state != Halted );
    380379        this_processor->finish.action_code = Schedule;
    381         this_processor->finish.thrd = thrd;
     380        this_processor->finish.thrd        = thrd;
    382381
    383382        verify( disable_preempt_count > 0 );
     
    392391        disable_interrupts();
    393392        this_processor->finish.action_code = Release_Schedule;
    394         this_processor->finish.lock = lock;
    395         this_processor->finish.thrd = thrd;
     393        this_processor->finish.lock        = lock;
     394        this_processor->finish.thrd        = thrd;
    396395
    397396        verify( disable_preempt_count > 1 );
     
    405404        disable_interrupts();
    406405        this_processor->finish.action_code = Release_Multi;
    407         this_processor->finish.locks = locks;
    408         this_processor->finish.lock_count = count;
     406        this_processor->finish.locks       = locks;
     407        this_processor->finish.lock_count  = count;
    409408
    410409        verify( disable_preempt_count > 0 );
     
    418417        disable_interrupts();
    419418        this_processor->finish.action_code = Release_Multi_Schedule;
    420         this_processor->finish.locks = locks;
    421         this_processor->finish.lock_count = lock_count;
    422         this_processor->finish.thrds = thrds;
    423         this_processor->finish.thrd_count = thrd_count;
     419        this_processor->finish.locks       = locks;
     420        this_processor->finish.lock_count  = lock_count;
     421        this_processor->finish.thrds       = thrds;
     422        this_processor->finish.thrd_count  = thrd_count;
    424423
    425424        verify( disable_preempt_count > 0 );
     
    433432        verify( disable_preempt_count > 0 );
    434433        this_processor->finish.action_code = thrd ? Release_Schedule : Release;
    435         this_processor->finish.lock = lock;
    436         this_processor->finish.thrd = thrd;
     434        this_processor->finish.lock        = lock;
     435        this_processor->finish.thrd        = thrd;
    437436
    438437        suspend();
     
    590589void ^?{}(semaphore & this) {}
    591590
    592 void P(semaphore & this) {
    593         lock( this.lock __cfaabi_dbg_ctx2 );
    594         this.count -= 1;
    595         if ( this.count < 0 ) {
     591void P(semaphore & this) with( this ){
     592        lock( lock __cfaabi_dbg_ctx2 );
     593        count -= 1;
     594        if ( count < 0 ) {
    596595                // queue current task
    597                 append( this.waiting, (thread_desc *)this_thread );
     596                append( waiting, (thread_desc *)this_thread );
    598597
    599598                // atomically release spin lock and block
    600                 BlockInternal( &this.lock );
     599                BlockInternal( &lock );
    601600        }
    602601        else {
    603             unlock( this.lock );
    604         }
    605 }
    606 
    607 void V(semaphore & this) {
     602            unlock( lock );
     603        }
     604}
     605
     606void V(semaphore & this) with( this ) {
    608607        thread_desc * thrd = NULL;
    609         lock( this.lock __cfaabi_dbg_ctx2 );
    610         this.count += 1;
    611         if ( this.count <= 0 ) {
     608        lock( lock __cfaabi_dbg_ctx2 );
     609        count += 1;
     610        if ( count <= 0 ) {
    612611                // remove task at head of waiting list
    613                 thrd = pop_head( this.waiting );
    614         }
    615 
    616         unlock( this.lock );
     612                thrd = pop_head( waiting );
     613        }
     614
     615        unlock( lock );
    617616
    618617        // make new owner
  • src/libcfa/concurrency/preemption.c

    r320eb73a r633a642  
    6060static pthread_t alarm_thread;                        // pthread handle to alarm thread
    6161
    62 void ?{}(event_kernel_t & this) {
    63         (this.alarms){};
    64         (this.lock){};
     62void ?{}(event_kernel_t & this) with( this ) {
     63        alarms{};
     64        lock{};
    6565}
    6666
     
    149149        // If counter reaches 0, execute any pending CtxSwitch
    150150        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    151                 processor * proc  = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
     151                processor   * proc = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
    152152                thread_desc * thrd = this_thread;         // Cache the thread now since interrupts can start happening after the atomic add
    153153
  • src/libcfa/concurrency/thread.c

    r320eb73a r633a642  
    3131// Thread ctors and dtors
    3232
    33 void ?{}(thread_desc& this) {
    34         (this.self_cor){};
    35         this.self_cor.name = "Anonymous Thread";
    36         this.self_mon.owner = &this;
    37         this.self_mon.recursion = 1;
    38         this.self_mon_p = &this.self_mon;
    39         this.next = NULL;
     33void ?{}(thread_desc& this) with( this ) {
     34        self_cor{};
     35        self_cor.name = "Anonymous Coroutine";
     36        self_mon.owner = &this;
     37        self_mon.recursion = 1;
     38        self_mon_p = &self_mon;
     39        next = NULL;
    4040        __cfaabi_dbg_debug_do(
    41                 this.dbg_next = NULL;
    42                 this.dbg_prev = NULL;
     41                dbg_next = NULL;
     42                dbg_prev = NULL;
    4343                __cfaabi_dbg_thread_register(&this);
    4444        )
    4545
    46         (this.monitors){ &this.self_mon_p, 1, (fptr_t)0 };
     46        monitors{ &self_mon_p, 1, (fptr_t)0 };
    4747}
    4848
    49 void ^?{}(thread_desc& this) {
    50         __cfaabi_dbg_debug_do(
    51                 __cfaabi_dbg_thread_unregister(&this);
    52         )
    53         ^(this.self_cor){};
     49void ^?{}(thread_desc& this) with( this ) {
     50        ^self_cor{};
    5451}
    5552
    5653forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } )
    57 void ?{}( scoped(T)& this ) {
    58         (this.handle){};
    59         __thrd_start(this.handle);
     54void ?{}( scoped(T)& this ) with( this ) {
     55        handle{};
     56        __thrd_start(handle);
    6057}
    6158
    6259forall( dtype T, ttype P | sized(T) | is_thread(T) | { void ?{}(T&, P); } )
    63 void ?{}( scoped(T)& this, P params ) {
    64         (this.handle){ params };
    65         __thrd_start(this.handle);
     60void ?{}( scoped(T)& this, P params ) with( this ) {
     61        handle{ params };
     62        __thrd_start(handle);
    6663}
    6764
    6865forall( dtype T | sized(T) | is_thread(T) )
    69 void ^?{}( scoped(T)& this ) {
    70         ^(this.handle){};
     66void ^?{}( scoped(T)& this ) with( this ) {
     67        ^handle{};
    7168}
    7269
     
    7673void __thrd_start( T& this ) {
    7774        coroutine_desc* thrd_c = get_coroutine(this);
    78         thread_desc* thrd_h = get_thread   (this);
     75        thread_desc   * thrd_h = get_thread   (this);
    7976        thrd_c->last = this_coroutine;
    8077
  • src/libcfa/interpose.h

    r320eb73a r633a642  
    1616#pragma once
    1717
    18 void * interpose_symbol( const char* symbol, , const char *version );
     18void * interpose_symbol( const char* symbol, const char *version );
    1919
    2020extern __typeof__( abort ) libc_abort __attribute__(( noreturn ));
  • src/tests/Makefile.am

    r320eb73a r633a642  
    6565
    6666concurrency :
    67         @+python test.py --debug=${debug} ${concurrent} ${concurrent_test}
     67        @+python test.py --debug=${debug} -Iconcurrent
    6868
    6969.dummy : .dummy.c @CFA_BINDIR@/@CFA_NAME@
  • src/tests/Makefile.in

    r320eb73a r633a642  
    743743
    744744concurrency :
    745         @+python test.py --debug=${debug} ${concurrent} ${concurrent_test}
     745        @+python test.py --debug=${debug} -Iconcurrent
    746746
    747747.dummy : .dummy.c @CFA_BINDIR@/@CFA_NAME@
Note: See TracChangeset for help on using the changeset viewer.