Changeset af4487d
- Timestamp:
- Aug 25, 2020, 10:10:12 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- f238769d
- Parents:
- 29d618e (diff), f7fac4b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 5 added
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/Makefile.am
r29d618e raf4487d 53 53 54 54 thread_headers = concurrency/coroutine.hfa concurrency/thread.hfa concurrency/kernel.hfa \ 55 concurrency/monitor.hfa concurrency/mutex.hfa 55 concurrency/monitor.hfa concurrency/mutex.hfa concurrency/exception.hfa 56 56 57 57 thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa \ -
libcfa/src/concurrency/coroutine.cfa
r29d618e raf4487d 215 215 return cor; 216 216 } 217 218 struct $coroutine * __cfactx_cor_active(void) {219 return active_coroutine();220 }221 217 } 222 218 -
libcfa/src/concurrency/invoke.c
r29d618e raf4487d 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern struct $coroutine * __cfactx_cor_active(void);32 31 extern struct $coroutine * __cfactx_cor_finish(void); 33 32 extern void __cfactx_cor_leave ( struct $coroutine * ); … … 36 35 extern void disable_interrupts() OPTIONAL_THREAD; 37 36 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 38 39 struct exception_context_t * this_exception_context() {40 return &__get_stack( __cfactx_cor_active() )->exception_context;41 }42 37 43 38 void __cfactx_invoke_coroutine( -
libcfa/src/concurrency/invoke.h
r29d618e raf4487d 98 98 } 99 99 100 struct exception_context_t * this_exception_context();101 102 100 // struct which calls the monitor is accepting 103 101 struct __waitfor_mask_t { -
libcfa/src/concurrency/io/setup.cfa
r29d618e raf4487d 384 384 /* paranoid */ verify( is_pow2( params_in.num_ready ) || (params_in.num_ready < 8) ); 385 385 sq.ready_cnt = max( params_in.num_ready, 8 ); 386 sq.ready = alloc _align( 64, sq.ready_cnt);386 sq.ready = alloc( sq.ready_cnt, 64`align ); 387 387 for(i; sq.ready_cnt) { 388 388 sq.ready[i] = -1ul32; -
libcfa/src/concurrency/kernel/startup.cfa
r29d618e raf4487d 579 579 580 580 // Lock the RWlock so no-one pushes/pops while we are changing the queue 581 disable_interrupts(); 581 582 uint_fast32_t last_size = ready_mutate_lock(); 582 583 … … 586 587 // Unlock the RWlock 587 588 ready_mutate_unlock( last_size ); 589 enable_interrupts_noPoll(); // Don't poll, could be in main cluster 590 588 591 589 592 this.io.cnt = num_io; … … 601 604 602 605 // Lock the RWlock so no-one pushes/pops while we are changing the queue 606 disable_interrupts(); 603 607 uint_fast32_t last_size = ready_mutate_lock(); 604 608 … … 608 612 // Unlock the RWlock 609 613 ready_mutate_unlock( last_size ); 614 enable_interrupts_noPoll(); // Don't poll, could be in main cluster 610 615 611 616 #if !defined(__CFA_NO_STATISTICS__) -
libcfa/src/concurrency/ready_queue.cfa
r29d618e raf4487d 215 215 } 216 216 217 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) { 218 unsigned i; 219 bool local; 220 #if defined(BIAS) 221 unsigned rlow = r % BIAS; 222 unsigned rhigh = r / BIAS; 223 if((0 != rlow) && preferred >= 0) { 224 // (BIAS - 1) out of BIAS chances 225 // Use perferred queues 226 i = preferred + (rhigh % 4); 227 local = true; 228 } 229 else { 230 // 1 out of BIAS chances 231 // Use all queues 232 i = rhigh; 233 local = false; 234 } 235 #else 236 i = r; 237 local = false; 238 #endif 239 return [i, local]; 240 } 241 217 242 //----------------------------------------------------------------------- 218 243 __attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) { … … 222 247 thrd->link.ts = rdtscl(); 223 248 224 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) 225 bool local = false; 226 int preferred = 249 __attribute__((unused)) bool local; 250 __attribute__((unused)) int preferred; 251 #if defined(BIAS) 252 preferred = 227 253 //* 228 254 kernelTLS.this_processor ? kernelTLS.this_processor->id * 4 : -1; … … 230 256 thrd->link.preferred * 4; 231 257 //*/ 232 233 234 258 #endif 235 259 … … 238 262 do { 239 263 // Pick the index of a lane 240 #if defined(BIAS) 241 unsigned r = __tls_rand(); 242 unsigned rlow = r % BIAS; 243 unsigned rhigh = r / BIAS; 244 if((0 != rlow) && preferred >= 0) { 245 // (BIAS - 1) out of BIAS chances 246 // Use perferred queues 247 i = preferred + (rhigh % 4); 248 249 #if !defined(__CFA_NO_STATISTICS__) 250 local = true; 251 __tls_stats()->ready.pick.push.local++; 252 #endif 253 } 254 else { 255 // 1 out of BIAS chances 256 // Use all queues 257 i = rhigh; 258 local = false; 259 } 260 #else 261 i = __tls_rand(); 264 // unsigned r = __tls_rand(); 265 unsigned r = __tls_rand_fwd(); 266 [i, local] = idx_from_r(r, preferred); 267 268 #if !defined(__CFA_NO_STATISTICS__) 269 if(local) { 270 __tls_stats()->ready.pick.push.local++; 271 } 262 272 #endif 263 273 … … 274 284 275 285 // Actually push it 276 bool lane_first = push(lanes.data[i], thrd); 286 #ifdef USE_SNZI 287 bool lane_first = 288 #endif 289 290 push(lanes.data[i], thrd); 277 291 278 292 #ifdef USE_SNZI … … 287 301 #endif 288 302 303 __tls_rand_advance_bck(); 304 289 305 // Unlock and return 290 306 __atomic_unlock( &lanes.data[i].lock ); … … 311 327 /* paranoid */ verify( lanes.count > 0 ); 312 328 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); 329 int preferred; 313 330 #if defined(BIAS) 314 331 // Don't bother trying locally too much 315 332 int local_tries = 8; 316 #endif 333 preferred = kernelTLS.this_processor->id * 4; 334 #endif 335 317 336 318 337 // As long as the list is not empty, try finding a lane that isn't empty and pop from it … … 323 342 #endif 324 343 // Pick two lists at random 325 unsigned i,j; 326 #if defined(BIAS) 327 #if !defined(__CFA_NO_STATISTICS__) 328 bool local = false; 329 #endif 330 uint64_t r = __tls_rand(); 331 unsigned rlow = r % BIAS; 332 uint64_t rhigh = r / BIAS; 333 if(local_tries && 0 != rlow) { 334 // (BIAS - 1) out of BIAS chances 335 // Use perferred queues 336 unsigned pid = kernelTLS.this_processor->id * 4; 337 i = pid + (rhigh % 4); 338 j = pid + ((rhigh >> 32ull) % 4); 339 340 // count the tries 341 local_tries--; 342 343 #if !defined(__CFA_NO_STATISTICS__) 344 local = true; 345 __tls_stats()->ready.pick.pop.local++; 346 #endif 347 } 348 else { 349 // 1 out of BIAS chances 350 // Use all queues 351 i = rhigh; 352 j = rhigh >> 32ull; 353 } 354 #else 355 i = __tls_rand(); 356 j = __tls_rand(); 344 // unsigned ri = __tls_rand(); 345 // unsigned rj = __tls_rand(); 346 unsigned ri = __tls_rand_bck(); 347 unsigned rj = __tls_rand_bck(); 348 349 unsigned i, j; 350 __attribute__((unused)) bool locali, localj; 351 [i, locali] = idx_from_r(ri, preferred); 352 [j, localj] = idx_from_r(rj, preferred); 353 354 #if !defined(__CFA_NO_STATISTICS__) 355 if(locali) { 356 __tls_stats()->ready.pick.pop.local++; 357 } 358 if(localj) { 359 __tls_stats()->ready.pick.pop.local++; 360 } 357 361 #endif 358 362 … … 364 368 if(thrd) { 365 369 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__) 366 if( local ) __tls_stats()->ready.pick.pop.lsuccess++;370 if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++; 367 371 #endif 368 372 return thrd; … … 543 547 544 548 // Allocate new array (uses realloc and memcpies the data) 545 lanes.data = alloc( lanes.data, ncount);549 lanes.data = alloc( ncount, lanes.data`realloc ); 546 550 547 551 // Fix the moved data … … 634 638 635 639 // Allocate new array (uses realloc and memcpies the data) 636 lanes.data = alloc( lanes.data, lanes.count);640 lanes.data = alloc( lanes.count, lanes.data`realloc ); 637 641 638 642 // Fix the moved data -
libcfa/src/exception.c
r29d618e raf4487d 209 209 void * stop_param) { 210 210 // Verify actions follow the rules we expect. 211 verify((actions & _UA_CLEANUP_PHASE) && (actions & _UA_FORCE_UNWIND)); 212 verify(!(actions & (_UA_SEARCH_PHASE | _UA_HANDLER_FRAME))); 211 verify(actions & _UA_CLEANUP_PHASE); 212 verify(actions & _UA_FORCE_UNWIND); 213 verify(!(actions & _UA_SEARCH_PHASE)); 214 verify(!(actions & _UA_HANDLER_FRAME)); 213 215 214 216 if ( actions & _UA_END_OF_STACK ) { 215 exit(1);217 abort(); 216 218 } else { 217 219 return _URC_NO_REASON; … … 219 221 } 220 222 221 static struct _Unwind_Exception cancel_exception_storage; 223 __attribute__((weak)) _Unwind_Reason_Code 224 __cfaehm_cancellation_unwind( struct _Unwind_Exception * exception ) { 225 return _Unwind_ForcedUnwind( exception, _Stop_Fn, (void*)0x22 ); 226 } 222 227 223 228 // Cancel the current stack, prefroming approprate clean-up and messaging. 224 229 void __cfaehm_cancel_stack( exception_t * exception ) { 225 // TODO: Detect current stack and pick a particular stop-function. 230 __cfaehm_allocate_exception( exception ); 231 232 struct exception_context_t * context = this_exception_context(); 233 struct __cfaehm_node * node = EXCEPT_TO_NODE(context->current_exception); 234 235 // Preform clean-up of any extra active exceptions. 236 while ( node->next ) { 237 struct __cfaehm_node * to_free = node->next; 238 node->next = to_free->next; 239 exception_t * except = NODE_TO_EXCEPT( to_free ); 240 except->virtual_table->free( except ); 241 free( to_free ); 242 } 243 226 244 _Unwind_Reason_Code ret; 227 ret = _ Unwind_ForcedUnwind( &cancel_exception_storage, _Stop_Fn, (void*)0x22);245 ret = __cfaehm_cancellation_unwind( &node->unwind_exception ); 228 246 printf("UNWIND ERROR %d after force unwind\n", ret); 229 247 abort(); -
libcfa/src/stdlib.hfa
r29d618e raf4487d 114 114 } // distribution 115 115 116 /* 117 FIX ME : fix alloc interface after Ticker Number 214 is resolved, define and add union to S_fill. Then, modify postfix-fill functions to support T * with nmemb, char, and T object of any size. Finally, change alloc_internal. 118 Or, just follow the instructions below for that. 119 120 1. Replace the current forall-block that contains defintions of S_fill and S_realloc with following: 121 forall( dtype T | sized(T) ) { 122 union U_fill { char c; T * a; T t; }; 123 struct S_fill { char tag; char c; size_t size; T * at; char t[50]; }; 124 struct S_realloc { inline T *; }; 125 } 126 127 2. Replace all current postfix-fill functions with following for updated S_fill: 128 S_fill(T) ?`fill( char a ) { S_fill(T) ret = {'c'}; ret.fill.c = a; return ret; } 129 S_fill(T) ?`fill( T a ) { S_fill(T) ret = {'t'}; memcpy(&ret.fill.t, &a, sizeof(T)); return ret; } 130 S_fill(T) ?`fill( T a[], size_t nmemb ) { S_fill(T) ret = {'a', nmemb}; ret.fill.a = a; return ret; } 131 132 3. Replace the $alloc_internal function which is outside ttype forall-block with following function: 133 T * $alloc_internal( void * Resize, T * Realloc, size_t Align, size_t Dim, S_fill(T) Fill) { 134 T * ptr = NULL; 135 size_t size = sizeof(T); 136 size_t copy_end = 0; 137 138 if(Resize) { 139 ptr = (T*) (void *) resize( (int *)Resize, Align, Dim * size ); 140 } else if (Realloc) { 141 if (Fill.tag != '0') copy_end = min(malloc_size( Realloc ), Dim * size); 142 ptr = (T*) (void *) realloc( (int *)Realloc, Align, Dim * size ); 143 } else { 144 ptr = (T*) (void *) memalign( Align, Dim * size ); 145 } 146 147 if(Fill.tag == 'c') { 148 memset( (char *)ptr + copy_end, (int)Fill.fill.c, Dim * size - copy_end ); 149 } else if(Fill.tag == 't') { 150 for ( int i = copy_end; i <= Dim * size - size ; i += size ) { 151 memcpy( (char *)ptr + i, &Fill.fill.t, size ); 152 } 153 } else if(Fill.tag == 'a') { 154 memcpy( (char *)ptr + copy_end, Fill.fill.a, min(Dim * size - copy_end, size * Fill.nmemb) ); 155 } 156 157 return ptr; 158 } // $alloc_internal 159 */ 160 161 typedef struct S_align { inline size_t; } T_align; 162 typedef struct S_resize { inline void *; } T_resize; 163 164 forall( dtype T ) { 165 struct S_fill { char tag; char c; size_t size; T * at; char t[50]; }; 166 struct S_realloc { inline T *; }; 167 } 168 169 static inline T_align ?`align ( size_t a ) { return (T_align){a}; } 170 static inline T_resize ?`resize ( void * a ) { return (T_resize){a}; } 116 171 static inline forall( dtype T | sized(T) ) { 117 // Cforall safe general allocation, fill, resize, array 118 119 T * alloc( void ) { 120 return malloc(); 121 } // alloc 122 123 T * alloc( size_t dim ) { 124 return aalloc( dim ); 125 } // alloc 126 127 forall( dtype S | sized(S) ) 128 T * alloc( S ptr[], size_t dim = 1 ) { // singleton/array resize 129 return resize( (T *)ptr, dim * sizeof(T) ); // CFA resize 130 } // alloc 131 132 T * alloc( T ptr[], size_t dim = 1, bool copy = true ) { 133 if ( copy ) { 134 return realloc( ptr, dim * sizeof(T) ); // CFA realloc 172 173 S_fill(T) ?`fill ( T t ) { 174 S_fill(T) ret = { 't' }; 175 size_t size = sizeof(T); 176 if(size > sizeof(ret.t)) { printf("ERROR: const object of size greater than 50 bytes given for dynamic memory fill\n"); exit(1); } 177 memcpy( &ret.t, &t, size ); 178 return ret; 179 } 180 S_fill(T) ?`fill ( char c ) { return (S_fill(T)){ 'c', c }; } 181 S_fill(T) ?`fill ( T * a ) { return (S_fill(T)){ 'T', '0', 0, a }; } 182 S_fill(T) ?`fill ( T a[], size_t nmemb ) { return (S_fill(T)){ 'a', '0', nmemb * sizeof(T), a }; } 183 184 S_realloc(T) ?`realloc ( T * a ) { return (S_realloc(T)){a}; } 185 186 T * $alloc_internal( void * Resize, T * Realloc, size_t Align, size_t Dim, S_fill(T) Fill) { 187 T * ptr = NULL; 188 size_t size = sizeof(T); 189 size_t copy_end = 0; 190 191 if(Resize) { 192 ptr = (T*) (void *) resize( (int *)Resize, Align, Dim * size ); 193 } else if (Realloc) { 194 if (Fill.tag != '0') copy_end = min(malloc_size( Realloc ), Dim * size); 195 ptr = (T*) (void *) realloc( (int *)Realloc, Align, Dim * size ); 135 196 } else { 136 return resize( ptr, dim * sizeof(T) ); // CFA resize 137 } // if 138 } // alloc 139 140 T * alloc_set( char fill ) { 141 return (T *)memset( (T *)alloc(), (int)fill, sizeof(T) ); // initialize with fill value 142 } // alloc_set 143 144 T * alloc_set( const T & fill ) { 145 return (T *)memcpy( (T *)alloc(), &fill, sizeof(T) ); // initialize with fill value 146 } // alloc_set 147 148 T * alloc_set( size_t dim, char fill ) { 149 return (T *)memset( (T *)alloc( dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value 150 } // alloc_set 151 152 T * alloc_set( size_t dim, const T & fill ) { 153 T * r = (T *)alloc( dim ); 154 for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value 155 return r; 156 } // alloc_set 157 158 T * alloc_set( size_t dimNew, const T fill[], size_t dimOld ) { 159 return (T *)memcpy( (T *)alloc( dimNew ), fill, min( dimNew, dimOld ) * sizeof(T) ); // initialize with fill value 160 } // alloc_set 161 162 T * alloc_set( T ptr[], size_t dim, char fill ) { // realloc array with fill 163 size_t osize = malloc_size( ptr ); // current allocation 164 size_t nsize = dim * sizeof(T); // new allocation 165 T * nptr = realloc( ptr, nsize ); // CFA realloc 166 if ( nsize > osize ) { // larger ? 167 memset( (char *)nptr + osize, (int)fill, nsize - osize ); // initialize added storage 168 } // if 169 return nptr; 170 } // alloc_set 171 172 T * alloc_set( T ptr[], size_t dim, const T & fill ) { // realloc array with fill 173 size_t odim = malloc_size( ptr ) / sizeof(T); // current dimension 174 size_t nsize = dim * sizeof(T); // new allocation 175 size_t ndim = nsize / sizeof(T); // new dimension 176 T * nptr = realloc( ptr, nsize ); // CFA realloc 177 if ( ndim > odim ) { // larger ? 178 for ( i; odim ~ ndim ) { 179 memcpy( &nptr[i], &fill, sizeof(T) ); // initialize with fill value 180 } // for 181 } // if 182 return nptr; 183 } // alloc_set 184 } // distribution 185 186 static inline forall( dtype T | sized(T) ) { 187 T * alloc_align( size_t align ) { 188 return (T *)memalign( align, sizeof(T) ); 189 } // alloc_align 190 191 T * alloc_align( size_t align, size_t dim ) { 192 return (T *)memalign( align, dim * sizeof(T) ); 193 } // alloc_align 194 195 T * alloc_align( T * ptr, size_t align ) { // aligned realloc array 196 return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA C realloc 197 } // alloc_align 198 199 forall( dtype S | sized(S) ) 200 T * alloc_align( S ptr[], size_t align ) { // aligned reuse array 201 return (T *)(void *)resize( (void *)ptr, align, sizeof(T) ); // CFA realloc 202 } // alloc_align 203 204 T * alloc_align( T ptr[], size_t align, size_t dim ) { // aligned realloc array 205 return (T *)(void *)realloc( (void *)ptr, align, dim * sizeof(T) ); // CFA realloc 206 } // alloc_align 207 208 T * alloc_align_set( size_t align, char fill ) { 209 return (T *)memset( (T *)alloc_align( align ), (int)fill, sizeof(T) ); // initialize with fill value 210 } // alloc_align_set 211 212 T * alloc_align_set( size_t align, const T & fill ) { 213 return (T *)memcpy( (T *)alloc_align( align ), &fill, sizeof(T) ); // initialize with fill value 214 } // alloc_align_set 215 216 T * alloc_align_set( size_t align, size_t dim, char fill ) { 217 return (T *)memset( (T *)alloc_align( align, dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value 218 } // alloc_align_set 219 220 T * alloc_align_set( size_t align, size_t dim, const T & fill ) { 221 T * r = (T *)alloc_align( align, dim ); 222 for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value 223 return r; 224 } // alloc_align_set 225 226 T * alloc_align_set( size_t align, size_t dimNew, const T fill[], size_t dimOld ) { 227 return (T *)memcpy( (T *)alloc_align( align, dimNew ), fill, min( dimNew, dimOld ) * sizeof(T) ); 228 } // alloc_align_set 229 230 T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill ) { 231 size_t osize = malloc_size( ptr ); // current allocation 232 size_t nsize = dim * sizeof(T); // new allocation 233 T * nptr = alloc_align( ptr, align, nsize ); 234 if ( nsize > osize ) { // larger ? 235 memset( (char *)nptr + osize, (int)fill, nsize - osize ); // initialize added storage 236 } // if 237 return nptr; 238 } // alloc_align_set 239 240 T * alloc_align_set( T ptr[], size_t align, size_t dim, const T & fill ) { 241 size_t odim = malloc_size( ptr ) / sizeof(T); // current dimension 242 size_t nsize = dim * sizeof(T); // new allocation 243 size_t ndim = nsize / sizeof(T); // new dimension 244 T * nptr = alloc_align( ptr, align, nsize ); 245 if ( ndim > odim ) { // larger ? 246 for ( i; odim ~ ndim ) { 247 memcpy( &nptr[i], &fill, sizeof(T) ); // initialize with fill value 248 } // for 249 } // if 250 return nptr; 251 } // alloc_align_set 252 } // distribution 197 ptr = (T*) (void *) memalign( Align, Dim * size ); 198 } 199 200 if(Fill.tag == 'c') { 201 memset( (char *)ptr + copy_end, (int)Fill.c, Dim * size - copy_end ); 202 } else if(Fill.tag == 't') { 203 for ( int i = copy_end; i <= Dim * size - size ; i += size ) { 204 memcpy( (char *)ptr + i, &Fill.t, size ); 205 } 206 } else if(Fill.tag == 'a') { 207 memcpy( (char *)ptr + copy_end, Fill.at, min(Dim * size - copy_end, Fill.size) ); 208 } else if(Fill.tag == 'T') { 209 for ( int i = copy_end; i <= Dim * size - size ; i += size ) { 210 memcpy( (char *)ptr + i, Fill.at, size ); 211 } 212 } 213 214 return ptr; 215 } // $alloc_internal 216 217 forall( ttype TT | { T * $alloc_internal( void *, T *, size_t, size_t, S_fill(T), TT ); } ) { 218 219 T * $alloc_internal( void * , T * Realloc, size_t Align, size_t Dim, S_fill(T) Fill, T_resize Resize, TT rest) { 220 return $alloc_internal( Resize, (T*)0p, Align, Dim, Fill, rest); 221 } 222 223 T * $alloc_internal( void * Resize, T * , size_t Align, size_t Dim, S_fill(T) Fill, S_realloc(T) Realloc, TT rest) { 224 return $alloc_internal( (void*)0p, Realloc, Align, Dim, Fill, rest); 225 } 226 227 T * $alloc_internal( void * Resize, T * Realloc, size_t , size_t Dim, S_fill(T) Fill, T_align Align, TT rest) { 228 return $alloc_internal( Resize, Realloc, Align, Dim, Fill, rest); 229 } 230 231 T * $alloc_internal( void * Resize, T * Realloc, size_t Align, size_t Dim, S_fill(T) , S_fill(T) Fill, TT rest) { 232 return $alloc_internal( Resize, Realloc, Align, Dim, Fill, rest); 233 } 234 235 T * alloc( TT all ) { 236 return $alloc_internal( (void*)0p, (T*)0p, (_Alignof(T) > libAlign() ? _Alignof(T) : libAlign()), (size_t)1, (S_fill(T)){'0'}, all); 237 } 238 239 T * alloc( size_t dim, TT all ) { 240 return $alloc_internal( (void*)0p, (T*)0p, (_Alignof(T) > libAlign() ? _Alignof(T) : libAlign()), dim, (S_fill(T)){'0'}, all); 241 } 242 243 } // distribution TT 244 245 } // distribution T 253 246 254 247 static inline forall( dtype T | sized(T) ) {
Note: See TracChangeset
for help on using the changeset viewer.