Changes in / [c4c8571:3f95dab]
- Files:
-
- 17 edited
-
benchmark/rmit.py (modified) (1 diff)
-
libcfa/src/concurrency/io.cfa (modified) (1 diff)
-
libcfa/src/concurrency/io/setup.cfa (modified) (1 diff)
-
libcfa/src/concurrency/io/types.hfa (modified) (2 diffs)
-
libcfa/src/concurrency/kernel.hfa (modified) (3 diffs)
-
libcfa/src/concurrency/kernel/cluster.cfa (modified) (6 diffs)
-
libcfa/src/concurrency/kernel/cluster.hfa (modified) (2 diffs)
-
libcfa/src/concurrency/kernel/fwd.hfa (modified) (1 diff)
-
libcfa/src/concurrency/kernel/private.hfa (modified) (1 diff)
-
libcfa/src/concurrency/kernel/startup.cfa (modified) (1 diff)
-
libcfa/src/concurrency/ready_queue.cfa (modified) (7 diffs)
-
libcfa/src/concurrency/ready_subqueue.hfa (modified) (6 diffs)
-
libcfa/src/concurrency/stats.hfa (modified) (1 diff)
-
src/AST/Decl.hpp (modified) (2 diffs)
-
src/GenPoly/SpecializeNew.cpp (modified) (3 diffs)
-
src/InitTweak/InitTweak.cc (modified) (10 diffs)
-
src/SymTab/Autogen.cc (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
benchmark/rmit.py
rc4c8571 r3f95dab 183 183 range( 97, 145) : "24-95,120-191", 184 184 range(145, 193) : "0-95,96-191", 185 },186 "nasus": {187 range( 1, 65) : "64-127",188 range( 65, 129) : "64-127,192-255",189 range(129, 193) : "64-255",190 range(193, 257) : "0-255",191 185 }, 192 186 } -
libcfa/src/concurrency/io.cfa
rc4c8571 r3f95dab 241 241 else { 242 242 const unsigned target = proc->io.target; 243 /* paranoid */ verify( io.tscs[target].t .tv != ULLONG_MAX );243 /* paranoid */ verify( io.tscs[target].tv != ULLONG_MAX ); 244 244 HELP: if(target < ctxs_count) { 245 245 const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io); 246 const unsigned long long age = moving_average(ctsc, io.tscs[target].t .tv, io.tscs[target].t.ma);246 const unsigned long long age = moving_average(ctsc, io.tscs[target].tv, io.tscs[target].ma); 247 247 __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no"); 248 248 if(age <= cutoff) break HELP; -
libcfa/src/concurrency/io/setup.cfa
rc4c8571 r3f95dab 359 359 } 360 360 361 void ^?{}( $io_arbiter & mutexthis ) {}361 void ^?{}( $io_arbiter & this ) {} 362 362 363 363 $io_arbiter * create(void) { -
libcfa/src/concurrency/io/types.hfa
rc4c8571 r3f95dab 125 125 126 126 127 struct __attribute__((aligned( 64))) $io_context {127 struct __attribute__((aligned(128))) $io_context { 128 128 $io_arbiter * arbiter; 129 129 processor * proc; … … 153 153 }; 154 154 155 monitor __attribute__((aligned(64))) $io_arbiter {155 struct __attribute__((aligned(128))) $io_arbiter { 156 156 __outstanding_io_queue pending; 157 157 }; -
libcfa/src/concurrency/kernel.hfa
rc4c8571 r3f95dab 83 83 84 84 // Wrapper around kernel threads 85 struct __attribute__((aligned( 64))) processor {85 struct __attribute__((aligned(128))) processor { 86 86 // Cluster from which to get threads 87 87 struct cluster * cltr; … … 171 171 172 172 // Intrusives lanes which are used by the ready queue 173 union __attribute__((aligned(64))) __intrusive_lane_t;173 struct __attribute__((aligned(128))) __intrusive_lane_t; 174 174 void ?{}(__intrusive_lane_t & this); 175 175 void ^?{}(__intrusive_lane_t & this); 176 176 177 177 // Aligned timestamps which are used by the ready queue and io subsystem 178 union __attribute__((aligned(64))) __timestamp_t { 179 struct { 180 volatile unsigned long long tv; 181 volatile unsigned long long ma; 182 } t; 183 char __padding[192]; 184 }; 185 186 static inline void ?{}(__timestamp_t & this) { this.t.tv = 0; this.t.ma = 0; } 178 struct __attribute__((aligned(128))) __timestamp_t { 179 volatile unsigned long long tv; 180 volatile unsigned long long ma; 181 }; 182 183 static inline void ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; } 187 184 static inline void ^?{}(__timestamp_t &) {} 188 185 … … 215 212 //----------------------------------------------------------------------------- 216 213 // Cluster 217 struct __attribute__((aligned( 64))) cluster {214 struct __attribute__((aligned(128))) cluster { 218 215 struct { 219 216 struct { -
libcfa/src/concurrency/kernel/cluster.cfa
rc4c8571 r3f95dab 229 229 for( idx ; lanes_count ) { 230 230 __intrusive_lane_t & sl = readyQ.data[idx]; 231 assert(!readyQ.data[idx].l .lock);231 assert(!readyQ.data[idx].lock); 232 232 233 233 if(is_empty(sl)) { 234 assert( sl. l.anchor.next == 0p );235 assert( sl. l.anchor.ts == MAX );236 assert( mock_head(sl) == sl. l.prev );234 assert( sl.anchor.next == 0p ); 235 assert( sl.anchor.ts == MAX ); 236 assert( mock_head(sl) == sl.prev ); 237 237 } else { 238 assert( sl. l.anchor.next != 0p );239 assert( sl. l.anchor.ts != MAX );240 assert( mock_head(sl) != sl. l.prev );238 assert( sl.anchor.next != 0p ); 239 assert( sl.anchor.ts != MAX ); 240 assert( mock_head(sl) != sl.prev ); 241 241 } 242 242 } … … 249 249 static inline void fix(__intrusive_lane_t & ll) { 250 250 if(is_empty(ll)) { 251 verify(ll. l.anchor.next == 0p);252 ll. l.prev = mock_head(ll);251 verify(ll.anchor.next == 0p); 252 ll.prev = mock_head(ll); 253 253 } 254 254 } … … 299 299 tscs = alloc(count, tscs`realloc); 300 300 for(i; count) { 301 tscs[i].t .tv = rdtscl();302 tscs[i]. t.ma = 0;301 tscs[i].tv = rdtscl(); 302 tscs[i].ma = 0; 303 303 } 304 304 } … … 400 400 for( idx; ncount ~ ocount) { 401 401 // Lock is not strictly needed but makes checking invariants much easier 402 __attribute__((unused)) bool locked = __atomic_try_acquire(&readyQ.data[idx].l .lock);402 __attribute__((unused)) bool locked = __atomic_try_acquire(&readyQ.data[idx].lock); 403 403 verify(locked); 404 404 … … 418 418 419 419 // Unlock the lane 420 __atomic_unlock(&readyQ.data[idx].l .lock);420 __atomic_unlock(&readyQ.data[idx].lock); 421 421 422 422 // TODO print the queue statistics here … … 467 467 } 468 468 469 #define nested_offsetof(type, field) ((off_t)(&(((type*)0)-> field)))470 471 469 // Ctor 472 470 void ?{}( __intrusive_lane_t & this ) { 473 this.l .lock = false;474 this. l.prev = mock_head(this);475 this. l.anchor.next = 0p;476 this. l.anchor.ts = MAX;471 this.lock = false; 472 this.prev = mock_head(this); 473 this.anchor.next = 0p; 474 this.anchor.ts = MAX; 477 475 #if !defined(__CFA_NO_STATISTICS__) 478 this. l.cnt = 0;476 this.cnt = 0; 479 477 #endif 480 478 481 479 // We add a boat-load of assertions here because the anchor code is very fragile 482 /* paranoid */ _Static_assert( offsetof( thread$, link ) == nested_offsetof(__intrusive_lane_t, l.anchor) );483 /* paranoid */ verify( offsetof( thread$, link ) == nested_offsetof(__intrusive_lane_t, l.anchor) );484 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this. l.anchor) );485 /* paranoid */ verify( &mock_head(this)->link.next == &this. l.anchor.next );486 /* paranoid */ verify( &mock_head(this)->link.ts == &this. l.anchor.ts );480 /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); 481 /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); 482 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) ); 483 /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next ); 484 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts ); 487 485 /* paranoid */ verify( mock_head(this)->link.next == 0p ); 488 486 /* paranoid */ verify( mock_head(this)->link.ts == MAX ); 489 /* paranoid */ verify( mock_head(this) == this.l.prev ); 490 /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 64 ); 491 /* paranoid */ verify( __alignof__(this) == 64 ); 492 /* paranoid */ verifyf( ((intptr_t)(&this) % 64) == 0, "Expected address to be aligned %p %% 64 == %zd", &this, ((intptr_t)(&this) % 64) ); 493 } 494 495 #undef nested_offsetof 487 /* paranoid */ verify( mock_head(this) == this.prev ); 488 /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 ); 489 /* paranoid */ verify( __alignof__(this) == 128 ); 490 /* paranoid */ verifyf( ((intptr_t)(&this) % 128) == 0, "Expected address to be aligned %p %% 128 == %zd", &this, ((intptr_t)(&this) % 128) ); 491 } 496 492 497 493 // Dtor is trivial 498 494 void ^?{}( __intrusive_lane_t & this ) { 499 495 // Make sure the list is empty 500 /* paranoid */ verify( this. l.anchor.next == 0p );501 /* paranoid */ verify( this. l.anchor.ts == MAX );502 /* paranoid */ verify( mock_head(this) == this.l.prev );496 /* paranoid */ verify( this.anchor.next == 0p ); 497 /* paranoid */ verify( this.anchor.ts == MAX ); 498 /* paranoid */ verify( mock_head(this) == this.prev ); 503 499 } 504 500 -
libcfa/src/concurrency/kernel/cluster.hfa
rc4c8571 r3f95dab 39 39 if (ts_next == ULLONG_MAX) return; 40 40 unsigned long long now = rdtscl(); 41 unsigned long long pma = __atomic_load_n(&tscs[ idx ]. t.ma, __ATOMIC_RELAXED);42 __atomic_store_n(&tscs[ idx ].t .tv, ts_next, __ATOMIC_RELAXED);43 __atomic_store_n(&tscs[ idx ]. t.ma, moving_average(now, ts_prev, pma), __ATOMIC_RELAXED);41 unsigned long long pma = __atomic_load_n(&tscs[ idx ].ma, __ATOMIC_RELAXED); 42 __atomic_store_n(&tscs[ idx ].tv, ts_next, __ATOMIC_RELAXED); 43 __atomic_store_n(&tscs[ idx ].ma, moving_average(now, ts_prev, pma), __ATOMIC_RELAXED); 44 44 } 45 45 … … 61 61 if(ptsc != ULLONG_MAX) { 62 62 /* paranoid */ verify( start + i < count ); 63 unsigned long long tsc = moving_average(ctsc, ptsc, tscs[start + i]. t.ma);63 unsigned long long tsc = moving_average(ctsc, ptsc, tscs[start + i].ma); 64 64 if(tsc > max) max = tsc; 65 65 } -
libcfa/src/concurrency/kernel/fwd.hfa
rc4c8571 r3f95dab 35 35 extern "C" { 36 36 extern "Cforall" { 37 extern __attribute__((aligned( 64))) thread_local struct KernelThreadData {37 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 38 38 struct thread$ * volatile this_thread; 39 39 struct processor * volatile this_processor; -
libcfa/src/concurrency/kernel/private.hfa
rc4c8571 r3f95dab 88 88 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 89 89 extern "Cforall" { 90 extern __attribute__((aligned( 64))) thread_local volatile struct rseq __cfaabi_rseq;90 extern __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq; 91 91 } 92 92 #else -
libcfa/src/concurrency/kernel/startup.cfa
rc4c8571 r3f95dab 152 152 #elif defined(CFA_HAVE_LINUX_RSEQ_H) 153 153 extern "Cforall" { 154 __attribute__((aligned( 64))) thread_local volatile struct rseq __cfaabi_rseq @= {154 __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq @= { 155 155 .cpu_id : RSEQ_CPU_ID_UNINITIALIZED, 156 156 }; -
libcfa/src/concurrency/ready_queue.cfa
rc4c8571 r3f95dab 81 81 /* paranoid */ verify( i < lanes_count ); 82 82 // If we can't lock it retry 83 } while( !__atomic_try_acquire( &readyQ.data[i].l .lock ) );83 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 84 84 } else { 85 85 do { 86 86 i = __tls_rand() % lanes_count; 87 } while( !__atomic_try_acquire( &readyQ.data[i].l .lock ) );87 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 88 88 } 89 89 } else { … … 93 93 /* paranoid */ verify( i < lanes_count ); 94 94 // If we can't lock it retry 95 } while( !__atomic_try_acquire( &readyQ.data[i].l .lock ) );95 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) ); 96 96 } 97 97 … … 100 100 101 101 // Unlock and return 102 __atomic_unlock( &readyQ.data[i].l .lock );102 __atomic_unlock( &readyQ.data[i].lock ); 103 103 104 104 #if !defined(__CFA_NO_STATISTICS__) … … 136 136 else { 137 137 const unsigned target = proc->rdq.target; 138 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].t .tv);139 /* paranoid */ verify( readyQ.tscs[target].t .tv != ULLONG_MAX );138 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv); 139 /* paranoid */ verify( readyQ.tscs[target].tv != ULLONG_MAX ); 140 140 if(target < lanes_count) { 141 141 const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq); 142 const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].t .tv, readyQ.tscs[target].t.ma);142 const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma); 143 143 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no"); 144 144 if(age > cutoff) { … … 188 188 189 189 // If we can't get the lock retry 190 if( !__atomic_try_acquire(&lane.l .lock) ) {190 if( !__atomic_try_acquire(&lane.lock) ) { 191 191 return 0p; 192 192 } … … 194 194 // If list is empty, unlock and retry 195 195 if( is_empty(lane) ) { 196 __atomic_unlock(&lane.l .lock);196 __atomic_unlock(&lane.lock); 197 197 return 0p; 198 198 } … … 206 206 /* paranoid */ verify(thrd); 207 207 /* paranoid */ verify(ts_next); 208 /* paranoid */ verify(lane.l .lock);208 /* paranoid */ verify(lane.lock); 209 209 210 210 // Unlock and return 211 __atomic_unlock(&lane.l .lock);211 __atomic_unlock(&lane.lock); 212 212 213 213 // Update statistics -
libcfa/src/concurrency/ready_subqueue.hfa
rc4c8571 r3f95dab 6 6 7 7 // Intrusives lanes which are used by the relaxed ready queue 8 union __attribute__((aligned(64))) __intrusive_lane_t { 9 struct { 10 struct thread$ * prev; 8 struct __attribute__((aligned(128))) __intrusive_lane_t { 9 struct thread$ * prev; 11 10 12 // spin lock protecting the queue13 volatile bool lock;11 // spin lock protecting the queue 12 volatile bool lock; 14 13 15 __thread_desc_link anchor;14 __thread_desc_link anchor; 16 15 17 #if !defined(__CFA_NO_STATISTICS__) 18 unsigned cnt; 19 #endif 20 } l; 21 char __padding[192]; 16 #if !defined(__CFA_NO_STATISTICS__) 17 unsigned cnt; 18 #endif 22 19 }; 23 20 … … 25 22 static inline thread$ * mock_head(const __intrusive_lane_t & this) { 26 23 thread$ * rhead = (thread$ *)( 27 (uintptr_t)( &this. l.anchor ) - __builtin_offsetof( thread$, link )24 (uintptr_t)( &this.anchor ) - __builtin_offsetof( thread$, link ) 28 25 ); 29 26 return rhead; … … 33 30 // returns true of lane was empty before push, false otherwise 34 31 static inline void push( __intrusive_lane_t & this, thread$ * node ) { 35 /* paranoid */ verify( this.l .lock );32 /* paranoid */ verify( this.lock ); 36 33 /* paranoid */ verify( node->link.next == 0p ); 37 34 /* paranoid */ verify( __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED) == MAX ); 38 /* paranoid */ verify( this. l.prev->link.next == 0p );39 /* paranoid */ verify( __atomic_load_n(&this. l.prev->link.ts, __ATOMIC_RELAXED) == MAX );40 if( this. l.anchor.next == 0p ) {41 /* paranoid */ verify( this. l.anchor.next == 0p );42 /* paranoid */ verify( __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED) == MAX );43 /* paranoid */ verify( __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED) != 0 );44 /* paranoid */ verify( this. l.prev == mock_head( this ) );35 /* paranoid */ verify( this.prev->link.next == 0p ); 36 /* paranoid */ verify( __atomic_load_n(&this.prev->link.ts, __ATOMIC_RELAXED) == MAX ); 37 if( this.anchor.next == 0p ) { 38 /* paranoid */ verify( this.anchor.next == 0p ); 39 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) == MAX ); 40 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 41 /* paranoid */ verify( this.prev == mock_head( this ) ); 45 42 } else { 46 /* paranoid */ verify( this. l.anchor.next != 0p );47 /* paranoid */ verify( __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED) != MAX );48 /* paranoid */ verify( __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED) != 0 );49 /* paranoid */ verify( this. l.prev != mock_head( this ) );43 /* paranoid */ verify( this.anchor.next != 0p ); 44 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX ); 45 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 46 /* paranoid */ verify( this.prev != mock_head( this ) ); 50 47 } 51 48 52 49 // Get the relevant nodes locally 53 this. l.prev->link.next = node;54 __atomic_store_n(&this. l.prev->link.ts, rdtscl(), __ATOMIC_RELAXED);55 this. l.prev = node;50 this.prev->link.next = node; 51 __atomic_store_n(&this.prev->link.ts, rdtscl(), __ATOMIC_RELAXED); 52 this.prev = node; 56 53 #if !defined(__CFA_NO_STATISTICS__) 57 this. l.cnt++;54 this.cnt++; 58 55 #endif 59 56 } … … 63 60 // returns true of lane was empty before push, false otherwise 64 61 static inline [* thread$, unsigned long long] pop( __intrusive_lane_t & this ) { 65 /* paranoid */ verify( this.l .lock );66 /* paranoid */ verify( this. l.anchor.next != 0p );67 /* paranoid */ verify( __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED) != MAX );68 /* paranoid */ verify( __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED) != 0 );62 /* paranoid */ verify( this.lock ); 63 /* paranoid */ verify( this.anchor.next != 0p ); 64 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX ); 65 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 69 66 70 67 // Get the relevant nodes locally 71 thread$ * node = this. l.anchor.next;72 this. l.anchor.next = node->link.next;73 __atomic_store_n(&this. l.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED);74 bool is_empty = this. l.anchor.next == 0p;68 thread$ * node = this.anchor.next; 69 this.anchor.next = node->link.next; 70 __atomic_store_n(&this.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED); 71 bool is_empty = this.anchor.next == 0p; 75 72 node->link.next = 0p; 76 73 __atomic_store_n(&node->link.ts, ULLONG_MAX, __ATOMIC_RELAXED); 77 74 #if !defined(__CFA_NO_STATISTICS__) 78 this. l.cnt--;75 this.cnt--; 79 76 #endif 80 77 81 78 // Update head time stamp 82 if(is_empty) this. l.prev = mock_head( this );79 if(is_empty) this.prev = mock_head( this ); 83 80 84 unsigned long long ats = __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED);81 unsigned long long ats = __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED); 85 82 /* paranoid */ verify( node->link.next == 0p ); 86 83 /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) == MAX ); … … 93 90 // Check whether or not list is empty 94 91 static inline bool is_empty(__intrusive_lane_t & this) { 95 return this. l.anchor.next == 0p;92 return this.anchor.next == 0p; 96 93 } 97 94 … … 99 96 static inline unsigned long long ts(__intrusive_lane_t & this) { 100 97 // Cannot verify 'emptiness' here since it may not be locked 101 /* paranoid */ verify(this. l.anchor.ts != 0);102 /* paranoid */ static_assert(__atomic_always_lock_free(sizeof(this. l.anchor.ts), &this.l.anchor.ts));103 return __atomic_load_n(&this. l.anchor.ts, __ATOMIC_RELAXED);98 /* paranoid */ verify(this.anchor.ts != 0); 99 /* paranoid */ static_assert(__atomic_always_lock_free(sizeof(this.anchor.ts), &this.anchor.ts)); 100 return __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED); 104 101 } -
libcfa/src/concurrency/stats.hfa
rc4c8571 r3f95dab 132 132 #endif 133 133 134 struct __attribute__((aligned( 64))) __stats_t {134 struct __attribute__((aligned(128))) __stats_t { 135 135 __stats_readyQ_t ready; 136 136 #if defined(CFA_HAVE_LINUX_IO_URING_H) -
src/AST/Decl.hpp
rc4c8571 r3f95dab 143 143 FunctionDecl( const CodeLocation & loc, const std::string & name, std::vector<ptr<TypeDecl>>&& forall, 144 144 std::vector<ptr<DeclWithType>>&& params, std::vector<ptr<DeclWithType>>&& returns, 145 CompoundStmt * stmts, Storage::Classes storage = {}, Linkage::Spec linkage = Linkage::C forall,145 CompoundStmt * stmts, Storage::Classes storage = {}, Linkage::Spec linkage = Linkage::C, 146 146 std::vector<ptr<Attribute>>&& attrs = {}, Function::Specs fs = {}, bool isVarArgs = false); 147 147 … … 149 149 std::vector<ptr<TypeDecl>>&& forall, std::vector<ptr<DeclWithType>>&& assertions, 150 150 std::vector<ptr<DeclWithType>>&& params, std::vector<ptr<DeclWithType>>&& returns, 151 CompoundStmt * stmts, Storage::Classes storage = {}, Linkage::Spec linkage = Linkage::C forall,151 CompoundStmt * stmts, Storage::Classes storage = {}, Linkage::Spec linkage = Linkage::C, 152 152 std::vector<ptr<Attribute>>&& attrs = {}, Function::Specs fs = {}, bool isVarArgs = false); 153 153 -
src/GenPoly/SpecializeNew.cpp
rc4c8571 r3f95dab 230 230 if ( auto tuple = type.as<ast::TupleType>() ) { 231 231 std::vector<ast::ptr<ast::Expr>> exprs; 232 for ( const ast:: ptr<ast::Type> &t : *tuple ) {232 for ( const ast::Type * t : *tuple ) { 233 233 exprs.push_back( structureArg( location, t, begin, end ) ); 234 234 } … … 248 248 if (typeMap.count(typeInst->base)) { 249 249 ast::TypeInstType * newInst = mutate(typeInst); 250 auto const & pair = typeMap[typeInst->base]; 251 newInst->expr_id = pair.first; 252 newInst->formal_usage = pair.second; 250 newInst->expr_id = typeMap[typeInst->base].first; 251 newInst->formal_usage = typeMap[typeInst->base].second; 253 252 return newInst; 254 253 } … … 463 462 if ( specialized != expr->arg ) { 464 463 // Assume that the specialization incorporates the cast. 464 std::cerr << expr <<std::endl; 465 465 return specialized; 466 466 } else { -
src/InitTweak/InitTweak.cc
rc4c8571 r3f95dab 27 27 #include "AST/Stmt.hpp" 28 28 #include "AST/Type.hpp" 29 #include "CodeGen/OperatorTable.h" // for isConstructor, isDestructor, isCto...30 29 #include "Common/PassVisitor.h" 31 30 #include "Common/SemanticError.h" // for SemanticError … … 771 770 std::list< Expression * > callExprs; 772 771 collectCtorDtorCalls( stmt, callExprs ); 772 // if ( callExprs.empty() ) return false; // xxx - do I still need this check? 773 773 return std::all_of( callExprs.begin(), callExprs.end(), pred); 774 774 } … … 901 901 } else if ( VariableExpr * varExpr = dynamic_cast< VariableExpr * >( func ) ) { 902 902 return varExpr->get_var()->get_name(); 903 } else if ( CastExpr * castExpr = dynamic_cast< CastExpr * >( func ) ) {903 } else if ( CastExpr * castExpr = dynamic_cast< CastExpr * >( func ) ) { 904 904 return funcName( castExpr->get_arg() ); 905 905 } else if ( MemberExpr * memberExpr = dynamic_cast< MemberExpr * >( func ) ) { … … 923 923 } else if ( const ast::VariableExpr * varExpr = dynamic_cast< const ast::VariableExpr * >( func ) ) { 924 924 return varExpr->var->name; 925 } else if ( const ast::CastExpr * castExpr = dynamic_cast< const ast::CastExpr * >( func ) ) {925 } else if ( const ast::CastExpr * castExpr = dynamic_cast< const ast::CastExpr * >( func ) ) { 926 926 return funcName( castExpr->arg ); 927 927 } else if ( const ast::MemberExpr * memberExpr = dynamic_cast< const ast::MemberExpr * >( func ) ) { … … 991 991 992 992 Type * isPointerType( Type * type ) { 993 return getPointerBase( type ) ? type : nullptr; 993 if ( getPointerBase( type ) ) return type; 994 else return nullptr; 994 995 } 995 996 … … 1013 1014 src = new AddressExpr( src ); 1014 1015 } 1016 // src = new CastExpr( src, new ReferenceType( noQualifiers, src->result->stripReferences()->clone() ) ); 1015 1017 } 1016 1018 return new ApplicationExpr( VariableExpr::functionPointer( assign ), { dst, src } ); … … 1165 1167 } 1166 1168 1169 bool isConstructor( const std::string & str ) { return str == "?{}"; } 1170 bool isDestructor( const std::string & str ) { return str == "^?{}"; } 1171 bool isAssignment( const std::string & str ) { return str == "?=?"; } 1172 bool isCtorDtor( const std::string & str ) { return isConstructor( str ) || isDestructor( str ); } 1173 bool isCtorDtorAssign( const std::string & str ) { return isCtorDtor( str ) || isAssignment( str ); } 1174 1167 1175 const FunctionDecl * isCopyFunction( const Declaration * decl, const std::string & fname ) { 1168 1176 const FunctionDecl * function = dynamic_cast< const FunctionDecl * >( decl ); … … 1184 1192 1185 1193 bool isAssignment( const ast::FunctionDecl * decl ) { 1186 return CodeGen::isAssignment( decl->name ) && isCopyFunction( decl );1194 return isAssignment( decl->name ) && isCopyFunction( decl ); 1187 1195 } 1188 1196 1189 1197 bool isDestructor( const ast::FunctionDecl * decl ) { 1190 return CodeGen::isDestructor( decl->name );1198 return isDestructor( decl->name ); 1191 1199 } 1192 1200 1193 1201 bool isDefaultConstructor( const ast::FunctionDecl * decl ) { 1194 return CodeGen::isConstructor( decl->name ) && 1 == decl->params.size();1202 return isConstructor( decl->name ) && 1 == decl->params.size(); 1195 1203 } 1196 1204 1197 1205 bool isCopyConstructor( const ast::FunctionDecl * decl ) { 1198 return CodeGen::isConstructor( decl->name ) && 2 == decl->params.size();1206 return isConstructor( decl->name ) && 2 == decl->params.size(); 1199 1207 } 1200 1208 … … 1214 1222 } 1215 1223 const FunctionDecl * isDestructor( const Declaration * decl ) { 1216 if ( CodeGen::isDestructor( decl->name ) ) {1224 if ( isDestructor( decl->name ) ) { 1217 1225 return dynamic_cast< const FunctionDecl * >( decl ); 1218 1226 } … … 1220 1228 } 1221 1229 const FunctionDecl * isDefaultConstructor( const Declaration * decl ) { 1222 if ( CodeGen::isConstructor( decl->name ) ) {1230 if ( isConstructor( decl->name ) ) { 1223 1231 if ( const FunctionDecl * func = dynamic_cast< const FunctionDecl * >( decl ) ) { 1224 1232 if ( func->type->parameters.size() == 1 ) { -
src/SymTab/Autogen.cc
rc4c8571 r3f95dab 258 258 } 259 259 260 /// Given type T, generate type of default ctor/dtor, i.e. function type void (*) (T &).260 /// 261 261 ast::FunctionDecl * genDefaultFunc(const CodeLocation loc, const std::string fname, const ast::Type * paramType, bool maybePolymorphic) { 262 262 std::vector<ast::ptr<ast::TypeDecl>> typeParams; 263 263 if (maybePolymorphic) typeParams = getGenericParams(paramType); 264 264 auto dstParam = new ast::ObjectDecl(loc, "_dst", new ast::ReferenceType(paramType), nullptr, {}, ast::Linkage::Cforall); 265 return new ast::FunctionDecl(loc, fname, std::move(typeParams), {dstParam}, {}, new ast::CompoundStmt(loc) , {}, ast::Linkage::Cforall);265 return new ast::FunctionDecl(loc, fname, std::move(typeParams), {dstParam}, {}, new ast::CompoundStmt(loc)); 266 266 } 267 267
Note:
See TracChangeset
for help on using the changeset viewer.