Changeset 13d33a75 for libcfa/src
- Timestamp:
- Aug 18, 2020, 4:31:19 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 8e9d567
- Parents:
- ef9988b (diff), f2384c9a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/locks.hfa
ref9988b r13d33a75 219 219 } 220 220 } 221 222 // Semaphore which only supports a single thread and one post 223 // Semaphore which only supports a single thread 224 struct oneshot { 225 struct $thread * volatile ptr; 226 }; 227 228 static inline { 229 void ?{}(oneshot & this) { 230 this.ptr = 0p; 231 } 232 233 void ^?{}(oneshot & this) {} 234 235 bool wait(oneshot & this) { 236 for() { 237 struct $thread * expected = this.ptr; 238 if(expected == 1p) return false; 239 /* paranoid */ verify( expected == 0p ); 240 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 241 park( __cfaabi_dbg_ctx ); 242 /* paranoid */ verify( this.ptr == 1p ); 243 return true; 244 } 245 } 246 } 247 248 bool post(oneshot & this) { 249 struct $thread * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 250 if( got == 0p ) return false; 251 unpark( got __cfaabi_dbg_ctx2 ); 252 return true; 253 } 254 } 221 255 #endif -
libcfa/src/common.hfa
ref9988b r13d33a75 10 10 // Created On : Wed Jul 11 17:54:36 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jul 12 08:02:18 201813 // Update Count : 512 // Last Modified On : Sat Aug 15 08:51:29 2020 13 // Update Count : 14 14 14 // 15 15 … … 67 67 68 68 static inline { 69 char min( char t1, char t2 ) { return t1 < t2 ? t1 : t2; } // optimization 70 intptr_t min( intptr_t t1, intptr_t t2 ) { return t1 < t2 ? t1 : t2; } // optimization 71 uintptr_t min( uintptr_t t1, uintptr_t t2 ) { return t1 < t2 ? t1 : t2; } // optimization 69 72 forall( otype T | { int ?<?( T, T ); } ) 70 73 T min( T t1, T t2 ) { return t1 < t2 ? t1 : t2; } 71 74 75 char max( char t1, char t2 ) { return t1 > t2 ? t1 : t2; } // optimization 76 intptr_t max( intptr_t t1, intptr_t t2 ) { return t1 > t2 ? t1 : t2; } // optimization 77 uintptr_t max( uintptr_t t1, uintptr_t t2 ) { return t1 > t2 ? t1 : t2; } // optimization 72 78 forall( otype T | { int ?>?( T, T ); } ) 73 79 T max( T t1, T t2 ) { return t1 > t2 ? t1 : t2; } -
libcfa/src/concurrency/coroutine.cfa
ref9988b r13d33a75 215 215 return cor; 216 216 } 217 218 struct $coroutine * __cfactx_cor_active(void) { 219 return active_coroutine(); 220 } 217 221 } 218 222 -
libcfa/src/concurrency/invoke.c
ref9988b r13d33a75 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern struct $coroutine * __cfactx_cor_active(void); 31 32 extern struct $coroutine * __cfactx_cor_finish(void); 32 33 extern void __cfactx_cor_leave ( struct $coroutine * ); … … 35 36 extern void disable_interrupts() OPTIONAL_THREAD; 36 37 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 38 39 struct exception_context_t * this_exception_context() { 40 return &__get_stack( __cfactx_cor_active() )->exception_context; 41 } 37 42 38 43 void __cfactx_invoke_coroutine( -
libcfa/src/concurrency/invoke.h
ref9988b r13d33a75 26 26 #ifndef _INVOKE_H_ 27 27 #define _INVOKE_H_ 28 29 struct __cfaehm_try_resume_node; 30 struct __cfaehm_base_exception_t; 31 struct exception_context_t { 32 struct __cfaehm_try_resume_node * top_resume; 33 struct __cfaehm_base_exception_t * current_exception; 34 }; 28 35 29 36 struct __stack_context_t { … … 51 58 // base of stack 52 59 void * base; 60 61 // Information for exception handling. 62 struct exception_context_t exception_context; 53 63 }; 54 64 … … 84 94 }; 85 95 86 static inline struct __stack_t * __get_stack( struct $coroutine * cor ) { return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2)); } 96 static inline struct __stack_t * __get_stack( struct $coroutine * cor ) { 97 return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2)); 98 } 99 100 struct exception_context_t * this_exception_context(); 87 101 88 102 // struct which calls the monitor is accepting -
libcfa/src/concurrency/io.cfa
ref9988b r13d33a75 41 41 #include "kernel/fwd.hfa" 42 42 #include "io/types.hfa" 43 44 // returns true of acquired as leader or second leader 45 static inline bool try_lock( __leaderlock_t & this ) { 46 const uintptr_t thrd = 1z | (uintptr_t)active_thread(); 47 bool block; 48 disable_interrupts(); 49 for() { 50 struct $thread * expected = this.value; 51 if( 1p != expected && 0p != expected ) { 52 /* paranoid */ verify( thrd != (uintptr_t)expected ); // We better not already be the next leader 53 enable_interrupts( __cfaabi_dbg_ctx ); 54 return false; 55 } 56 struct $thread * desired; 57 if( 0p == expected ) { 58 // If the lock isn't locked acquire it, no need to block 59 desired = 1p; 60 block = false; 61 } 62 else { 63 // If the lock is already locked try becomming the next leader 64 desired = (struct $thread *)thrd; 65 block = true; 66 } 67 if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break; 68 } 69 if( block ) { 70 enable_interrupts( __cfaabi_dbg_ctx ); 71 park( __cfaabi_dbg_ctx ); 72 disable_interrupts(); 73 } 74 return true; 75 } 76 77 static inline bool next( __leaderlock_t & this ) { 78 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 79 struct $thread * nextt; 80 for() { 81 struct $thread * expected = this.value; 82 /* paranoid */ verify( (1 & (uintptr_t)expected) == 1 ); // The lock better be locked 83 84 struct $thread * desired; 85 if( 1p == expected ) { 86 // No next leader, just unlock 87 desired = 0p; 88 nextt = 0p; 89 } 90 else { 91 // There is a next leader, remove but keep locked 92 desired = 1p; 93 nextt = (struct $thread *)(~1z & (uintptr_t)expected); 94 } 95 if( __atomic_compare_exchange_n(&this.value, &expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) break; 96 } 97 98 if(nextt) { 99 unpark( nextt __cfaabi_dbg_ctx2 ); 100 enable_interrupts( __cfaabi_dbg_ctx ); 101 return true; 102 } 103 enable_interrupts( __cfaabi_dbg_ctx ); 104 return false; 105 } 43 106 44 107 //============================================================================================= … … 93 156 //============================================================================================= 94 157 static unsigned __collect_submitions( struct __io_data & ring ); 95 static uint32_t__release_consumed_submission( struct __io_data & ring );158 static __u32 __release_consumed_submission( struct __io_data & ring ); 96 159 97 160 static inline void process(struct io_uring_cqe & cqe ) { … … 100 163 101 164 data->result = cqe.res; 102 unpark( data->thrd __cfaabi_dbg_ctx2);165 post( data->sem ); 103 166 } 104 167 … … 136 199 unsigned head = *ring.completion_q.head; 137 200 unsigned tail = *ring.completion_q.tail; 138 const uint32_tmask = *ring.completion_q.mask;201 const __u32 mask = *ring.completion_q.mask; 139 202 140 203 // Nothing was new return 0 … … 143 206 } 144 207 145 uint32_tcount = tail - head;208 __u32 count = tail - head; 146 209 /* paranoid */ verify( count != 0 ); 147 210 for(i; count) { … … 182 245 __STATS__( true, 183 246 io.complete_q.completed_avg.val += count; 184 io.complete_q.completed_avg. fast_cnt += 1;247 io.complete_q.completed_avg.cnt += 1; 185 248 ) 186 249 enable_interrupts( __cfaabi_dbg_ctx ); … … 192 255 // We didn't get anything baton pass to the slow poller 193 256 else { 257 __STATS__( false, 258 io.complete_q.blocks += 1; 259 ) 194 260 __cfadbg_print_safe(io_core, "Kernel I/O : Parking io poller %p\n", &this.self); 195 261 reset = 0; … … 224 290 // 225 291 226 [* struct io_uring_sqe, uint32_t] __submit_alloc( struct __io_data & ring, uint64_tdata ) {292 [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data ) { 227 293 /* paranoid */ verify( data != 0 ); 228 294 … … 230 296 __attribute((unused)) int len = 0; 231 297 __attribute((unused)) int block = 0; 232 uint32_tcnt = *ring.submit_q.num;233 uint32_tmask = *ring.submit_q.mask;298 __u32 cnt = *ring.submit_q.num; 299 __u32 mask = *ring.submit_q.mask; 234 300 235 301 disable_interrupts(); 236 uint32_toff = __tls_rand();302 __u32 off = __tls_rand(); 237 303 enable_interrupts( __cfaabi_dbg_ctx ); 238 304 … … 241 307 // Look through the list starting at some offset 242 308 for(i; cnt) { 243 uint64_texpected = 0;244 uint32_tidx = (i + off) & mask;309 __u64 expected = 0; 310 __u32 idx = (i + off) & mask; 245 311 struct io_uring_sqe * sqe = &ring.submit_q.sqes[idx]; 246 volatile uint64_t * udata = (volatile uint64_t *)&sqe->user_data;312 volatile __u64 * udata = &sqe->user_data; 247 313 248 314 if( *udata == expected && … … 270 336 } 271 337 272 static inline uint32_t __submit_to_ready_array( struct __io_data & ring, uint32_t idx, const uint32_tmask ) {338 static inline __u32 __submit_to_ready_array( struct __io_data & ring, __u32 idx, const __u32 mask ) { 273 339 /* paranoid */ verify( idx <= mask ); 274 340 /* paranoid */ verify( idx != -1ul32 ); … … 277 343 __attribute((unused)) int len = 0; 278 344 __attribute((unused)) int block = 0; 279 uint32_tready_mask = ring.submit_q.ready_cnt - 1;345 __u32 ready_mask = ring.submit_q.ready_cnt - 1; 280 346 281 347 disable_interrupts(); 282 uint32_toff = __tls_rand();348 __u32 off = __tls_rand(); 283 349 enable_interrupts( __cfaabi_dbg_ctx ); 284 350 285 uint32_tpicked;351 __u32 picked; 286 352 LOOKING: for() { 287 353 for(i; ring.submit_q.ready_cnt) { 288 354 picked = (i + off) & ready_mask; 289 uint32_texpected = -1ul32;355 __u32 expected = -1ul32; 290 356 if( __atomic_compare_exchange_n( &ring.submit_q.ready[picked], &expected, idx, true, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED ) ) { 291 357 break LOOKING; … … 297 363 298 364 block++; 299 if( try_lock(ring.submit_q.lock __cfaabi_dbg_ctx2) ) { 300 __release_consumed_submission( ring ); 301 unlock( ring.submit_q.lock ); 302 } 303 else { 365 366 __u32 released = __release_consumed_submission( ring ); 367 if( released == 0 ) { 304 368 yield(); 305 369 } … … 316 380 } 317 381 318 void __submit( struct io_context * ctx, uint32_tidx ) __attribute__((nonnull (1))) {382 void __submit( struct io_context * ctx, __u32 idx ) __attribute__((nonnull (1))) { 319 383 __io_data & ring = *ctx->thrd.ring; 320 384 // Get now the data we definetely need 321 volatile uint32_t* const tail = ring.submit_q.tail;322 const uint32_tmask = *ring.submit_q.mask;385 volatile __u32 * const tail = ring.submit_q.tail; 386 const __u32 mask = *ring.submit_q.mask; 323 387 324 388 // There are 2 submission schemes, check which one we are using … … 332 396 } 333 397 else if( ring.eager_submits ) { 334 uint32_t picked = __submit_to_ready_array( ring, idx, mask ); 335 336 for() { 337 yield(); 338 339 // If some one else collected our index, we are done 340 #warning ABA problem 341 if( ring.submit_q.ready[picked] != idx ) { 398 __u32 picked = __submit_to_ready_array( ring, idx, mask ); 399 400 #if defined(LEADER_LOCK) 401 if( !try_lock(ring.submit_q.submit_lock) ) { 342 402 __STATS__( false, 343 403 io.submit_q.helped += 1; … … 345 405 return; 346 406 } 347 348 if( try_lock(ring.submit_q.lock __cfaabi_dbg_ctx2) ) { 407 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 408 __STATS__( true, 409 io.submit_q.leader += 1; 410 ) 411 #else 412 for() { 413 yield(); 414 415 if( try_lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2) ) { 416 __STATS__( false, 417 io.submit_q.leader += 1; 418 ) 419 break; 420 } 421 422 // If some one else collected our index, we are done 423 #warning ABA problem 424 if( ring.submit_q.ready[picked] != idx ) { 425 __STATS__( false, 426 io.submit_q.helped += 1; 427 ) 428 return; 429 } 430 349 431 __STATS__( false, 350 io.submit_q. leader+= 1;432 io.submit_q.busy += 1; 351 433 ) 352 break; 353 } 354 355 __STATS__( false, 356 io.submit_q.busy += 1; 357 ) 358 } 434 } 435 #endif 359 436 360 437 // We got the lock 438 // Collect the submissions 361 439 unsigned to_submit = __collect_submitions( ring ); 440 441 // Actually submit 362 442 int ret = __io_uring_enter( ring, to_submit, false ); 363 if( ret < 0 ) { 364 unlock(ring.submit_q.lock); 365 return; 366 } 367 368 /* paranoid */ verify( ret > 0 || to_submit == 0 || (ring.ring_flags & IORING_SETUP_SQPOLL) ); 443 444 #if defined(LEADER_LOCK) 445 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 446 next(ring.submit_q.submit_lock); 447 #else 448 unlock(ring.submit_q.submit_lock); 449 #endif 450 if( ret < 0 ) return; 369 451 370 452 // Release the consumed SQEs … … 372 454 373 455 // update statistics 374 __STATS__( true,456 __STATS__( false, 375 457 io.submit_q.submit_avg.rdy += to_submit; 376 458 io.submit_q.submit_avg.csm += ret; 377 459 io.submit_q.submit_avg.cnt += 1; 378 460 ) 379 380 unlock(ring.submit_q.lock);381 461 } 382 462 else { 383 463 // get mutual exclusion 384 lock(ring.submit_q.lock __cfaabi_dbg_ctx2); 464 #if defined(LEADER_LOCK) 465 while(!try_lock(ring.submit_q.submit_lock)); 466 #else 467 lock(ring.submit_q.submit_lock __cfaabi_dbg_ctx2); 468 #endif 385 469 386 470 /* paranoid */ verifyf( ring.submit_q.sqes[ idx ].user_data != 0, … … 420 504 __release_consumed_submission( ring ); 421 505 422 unlock(ring.submit_q.lock); 506 #if defined(LEADER_LOCK) 507 next(ring.submit_q.submit_lock); 508 #else 509 unlock(ring.submit_q.submit_lock); 510 #endif 423 511 424 512 __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret ); … … 426 514 } 427 515 516 // #define PARTIAL_SUBMIT 32 428 517 static unsigned __collect_submitions( struct __io_data & ring ) { 429 518 /* paranoid */ verify( ring.submit_q.ready != 0p ); … … 431 520 432 521 unsigned to_submit = 0; 433 uint32_t tail = *ring.submit_q.tail; 434 const uint32_t mask = *ring.submit_q.mask; 522 __u32 tail = *ring.submit_q.tail; 523 const __u32 mask = *ring.submit_q.mask; 524 #if defined(PARTIAL_SUBMIT) 525 #if defined(LEADER_LOCK) 526 #error PARTIAL_SUBMIT and LEADER_LOCK cannot co-exist 527 #endif 528 const __u32 cnt = ring.submit_q.ready_cnt > PARTIAL_SUBMIT ? PARTIAL_SUBMIT : ring.submit_q.ready_cnt; 529 const __u32 offset = ring.submit_q.prev_ready; 530 ring.submit_q.prev_ready += cnt; 531 #else 532 const __u32 cnt = ring.submit_q.ready_cnt; 533 const __u32 offset = 0; 534 #endif 435 535 436 536 // Go through the list of ready submissions 437 for( i; ring.submit_q.ready_cnt ) { 537 for( c; cnt ) { 538 __u32 i = (offset + c) % ring.submit_q.ready_cnt; 539 438 540 // replace any submission with the sentinel, to consume it. 439 uint32_tidx = __atomic_exchange_n( &ring.submit_q.ready[i], -1ul32, __ATOMIC_RELAXED);541 __u32 idx = __atomic_exchange_n( &ring.submit_q.ready[i], -1ul32, __ATOMIC_RELAXED); 440 542 441 543 // If it was already the sentinel, then we are done … … 453 555 } 454 556 455 static uint32_t__release_consumed_submission( struct __io_data & ring ) {456 const uint32_tsmask = *ring.submit_q.mask;557 static __u32 __release_consumed_submission( struct __io_data & ring ) { 558 const __u32 smask = *ring.submit_q.mask; 457 559 458 560 if( !try_lock(ring.submit_q.release_lock __cfaabi_dbg_ctx2) ) return 0; 459 uint32_tchead = *ring.submit_q.head;460 uint32_tphead = ring.submit_q.prev_head;561 __u32 chead = *ring.submit_q.head; 562 __u32 phead = ring.submit_q.prev_head; 461 563 ring.submit_q.prev_head = chead; 462 564 unlock(ring.submit_q.release_lock); 463 565 464 uint32_tcount = chead - phead;566 __u32 count = chead - phead; 465 567 for( i; count ) { 466 uint32_tidx = ring.submit_q.array[ (phead + i) & smask ];568 __u32 idx = ring.submit_q.array[ (phead + i) & smask ]; 467 569 ring.submit_q.sqes[ idx ].user_data = 0; 468 570 } -
libcfa/src/concurrency/io/setup.cfa
ref9988b r13d33a75 298 298 if( params_in.poll_complete ) params.flags |= IORING_SETUP_IOPOLL; 299 299 300 uint32_t nentries = params_in.num_entries; 300 __u32 nentries = params_in.num_entries != 0 ? params_in.num_entries : 256; 301 if( !is_pow2(nentries) ) { 302 abort("ERROR: I/O setup 'num_entries' must be a power of 2\n"); 303 } 304 if( params_in.poller_submits && params_in.eager_submits ) { 305 abort("ERROR: I/O setup 'poller_submits' and 'eager_submits' cannot be used together\n"); 306 } 301 307 302 308 int fd = syscall(__NR_io_uring_setup, nentries, ¶ms ); … … 356 362 // Get the pointers from the kernel to fill the structure 357 363 // submit queue 358 sq.head = (volatile uint32_t*)(((intptr_t)sq.ring_ptr) + params.sq_off.head);359 sq.tail = (volatile uint32_t*)(((intptr_t)sq.ring_ptr) + params.sq_off.tail);360 sq.mask = ( const uint32_t*)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask);361 sq.num = ( const uint32_t*)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries);362 sq.flags = ( uint32_t*)(((intptr_t)sq.ring_ptr) + params.sq_off.flags);363 sq.dropped = ( uint32_t*)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped);364 sq.array = ( uint32_t*)(((intptr_t)sq.ring_ptr) + params.sq_off.array);364 sq.head = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.head); 365 sq.tail = (volatile __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.tail); 366 sq.mask = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_mask); 367 sq.num = ( const __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.ring_entries); 368 sq.flags = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.flags); 369 sq.dropped = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.dropped); 370 sq.array = ( __u32 *)(((intptr_t)sq.ring_ptr) + params.sq_off.array); 365 371 sq.prev_head = *sq.head; 366 372 367 373 { 368 const uint32_tnum = *sq.num;374 const __u32 num = *sq.num; 369 375 for( i; num ) { 370 376 sq.sqes[i].user_data = 0ul64; … … 372 378 } 373 379 374 (sq. lock){};380 (sq.submit_lock){}; 375 381 (sq.release_lock){}; 376 382 … … 382 388 sq.ready[i] = -1ul32; 383 389 } 390 sq.prev_ready = 0; 384 391 } 385 392 else { 386 393 sq.ready_cnt = 0; 387 394 sq.ready = 0p; 395 sq.prev_ready = 0; 388 396 } 389 397 390 398 // completion queue 391 cq.head = (volatile uint32_t*)(((intptr_t)cq.ring_ptr) + params.cq_off.head);392 cq.tail = (volatile uint32_t*)(((intptr_t)cq.ring_ptr) + params.cq_off.tail);393 cq.mask = ( const uint32_t*)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask);394 cq.num = ( const uint32_t*)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries);395 cq.overflow = ( uint32_t*)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow);396 cq.cqes 399 cq.head = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head); 400 cq.tail = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail); 401 cq.mask = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_mask); 402 cq.num = ( const __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.ring_entries); 403 cq.overflow = ( __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.overflow); 404 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes); 397 405 398 406 // some paranoid checks … … 442 450 void __ioctx_register($io_ctx_thread & ctx, struct epoll_event & ev) { 443 451 ev.events = EPOLLIN | EPOLLONESHOT; 444 ev.data.u64 = ( uint64_t)&ctx;452 ev.data.u64 = (__u64)&ctx; 445 453 int ret = epoll_ctl(iopoll.epollfd, EPOLL_CTL_ADD, ctx.ring->fd, &ev); 446 454 if (ret < 0) { -
libcfa/src/concurrency/io/types.hfa
ref9988b r13d33a75 17 17 18 18 #if defined(CFA_HAVE_LINUX_IO_URING_H) 19 extern "C" { 20 #include <linux/types.h> 21 } 22 19 23 #include "bits/locks.hfa" 24 25 #define LEADER_LOCK 26 struct __leaderlock_t { 27 struct $thread * volatile value; // ($thread) next_leader | (bool:1) is_locked 28 }; 29 30 static inline void ?{}( __leaderlock_t & this ) { this.value = 0p; } 20 31 21 32 //----------------------------------------------------------------------- … … 23 34 struct __submition_data { 24 35 // Head and tail of the ring (associated with array) 25 volatile uint32_t* head;26 volatile uint32_t* tail;27 volatile uint32_tprev_head;36 volatile __u32 * head; 37 volatile __u32 * tail; 38 volatile __u32 prev_head; 28 39 29 40 // The actual kernel ring which uses head/tail 30 41 // indexes into the sqes arrays 31 uint32_t* array;42 __u32 * array; 32 43 33 44 // number of entries and mask to go with it 34 const uint32_t* num;35 const uint32_t* mask;45 const __u32 * num; 46 const __u32 * mask; 36 47 37 48 // Submission flags (Not sure what for) 38 uint32_t* flags;49 __u32 * flags; 39 50 40 51 // number of sqes not submitted (whatever that means) 41 uint32_t* dropped;52 __u32 * dropped; 42 53 43 54 // Like head/tail but not seen by the kernel 44 volatile uint32_t * ready; 45 uint32_t ready_cnt; 55 volatile __u32 * ready; 56 __u32 ready_cnt; 57 __u32 prev_ready; 46 58 47 __spinlock_t lock; 48 __spinlock_t release_lock; 59 #if defined(LEADER_LOCK) 60 __leaderlock_t submit_lock; 61 #else 62 __spinlock_t submit_lock; 63 #endif 64 __spinlock_t release_lock; 49 65 50 66 // A buffer of sqes (not the actual ring) … … 58 74 struct __completion_data { 59 75 // Head and tail of the ring 60 volatile uint32_t* head;61 volatile uint32_t* tail;76 volatile __u32 * head; 77 volatile __u32 * tail; 62 78 63 79 // number of entries and mask to go with it 64 const uint32_t* mask;65 const uint32_t* num;80 const __u32 * mask; 81 const __u32 * num; 66 82 67 83 // number of cqes not submitted (whatever that means) 68 uint32_t* overflow;84 __u32 * overflow; 69 85 70 86 // the kernel ring … … 79 95 struct __submition_data submit_q; 80 96 struct __completion_data completion_q; 81 uint32_tring_flags;97 __u32 ring_flags; 82 98 int fd; 83 99 bool eager_submits:1; … … 89 105 // IO user data 90 106 struct __io_user_data_t { 91 int32_tresult;92 $thread * thrd;107 __s32 result; 108 oneshot sem; 93 109 }; 94 110 -
libcfa/src/concurrency/iocall.cfa
ref9988b r13d33a75 32 32 #include "io/types.hfa" 33 33 34 extern [* struct io_uring_sqe, uint32_t] __submit_alloc( struct __io_data & ring, uint64_tdata );35 extern void __submit( struct io_context * ctx, uint32_tidx ) __attribute__((nonnull (1)));36 37 static inline void ?{}(struct io_uring_sqe & this, uint8_topcode, int fd) {34 extern [* struct io_uring_sqe, __u32] __submit_alloc( struct __io_data & ring, __u64 data ); 35 extern void __submit( struct io_context * ctx, __u32 idx ) __attribute__((nonnull (1))); 36 37 static inline void ?{}(struct io_uring_sqe & this, __u8 opcode, int fd) { 38 38 this.opcode = opcode; 39 39 #if !defined(IOSQE_ASYNC) … … 51 51 } 52 52 53 static inline void ?{}(struct io_uring_sqe & this, uint8_t opcode, int fd, void * addr, uint32_t len, uint64_toff ) {53 static inline void ?{}(struct io_uring_sqe & this, __u8 opcode, int fd, void * addr, __u32 len, __u64 off ) { 54 54 (this){ opcode, fd }; 55 55 this.off = off; 56 this.addr = ( uint64_t)(uintptr_t)addr;56 this.addr = (__u64)(uintptr_t)addr; 57 57 this.len = len; 58 58 } … … 101 101 #endif 102 102 103 104 103 #define __submit_prelude \ 105 104 if( 0 != (submit_flags & LINK_FLAGS) ) { errno = ENOTSUP; return -1; } \ 106 105 (void)timeout; (void)cancellation; \ 107 106 if( !context ) context = __get_io_context(); \ 108 __io_user_data_t data = { 0 , active_thread()}; \107 __io_user_data_t data = { 0 }; \ 109 108 struct __io_data & ring = *context->thrd.ring; \ 110 109 struct io_uring_sqe * sqe; \ 111 uint32_t idx; \ 112 [sqe, idx] = __submit_alloc( ring, (uint64_t)(uintptr_t)&data ); \ 113 sqe->flags = REGULAR_FLAGS & submit_flags; 110 __u32 idx; \ 111 __u8 sflags = REGULAR_FLAGS & submit_flags; \ 112 [sqe, idx] = __submit_alloc( ring, (__u64)(uintptr_t)&data ); \ 113 sqe->flags = sflags; 114 114 115 115 #define __submit_wait \ 116 116 /*__cfaabi_bits_print_safe( STDERR_FILENO, "Preparing user data %p for %p\n", &data, data.thrd );*/ \ 117 verify( sqe->user_data == ( uint64_t)(uintptr_t)&data ); \117 verify( sqe->user_data == (__u64)(uintptr_t)&data ); \ 118 118 __submit( context, idx ); \ 119 park( __cfaabi_dbg_ctx); \119 wait( data.sem ); \ 120 120 if( data.result < 0 ) { \ 121 121 errno = -data.result; \ … … 149 149 150 150 extern int fsync(int fd); 151 extern int sync_file_range(int fd, int64_t offset, int64_t nbytes, unsigned int flags); 151 152 typedef __off64_t off_t; 153 typedef __off64_t off64_t; 154 extern int sync_file_range(int fd, off64_t offset, off64_t nbytes, unsigned int flags); 152 155 153 156 struct msghdr; … … 160 163 extern int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen); 161 164 162 extern int fallocate(int fd, int mode, uint64_t offset, uint64_t len);163 extern int posix_fadvise(int fd, uint64_t offset, uint64_t len, int advice);165 extern int fallocate(int fd, int mode, off_t offset, off_t len); 166 extern int posix_fadvise(int fd, off_t offset, off_t len, int advice); 164 167 extern int madvise(void *addr, size_t length, int advice); 165 168 … … 186 189 __submit_prelude 187 190 188 (*sqe){ IORING_OP_READV, fd, iov, iovcnt, offset }; 191 sqe->opcode = IORING_OP_READV; 192 sqe->ioprio = 0; 193 sqe->fd = fd; 194 sqe->off = offset; 195 sqe->addr = (__u64)iov; 196 sqe->len = iovcnt; 197 sqe->rw_flags = 0; 198 sqe->__pad2[0] = sqe->__pad2[1] = sqe->__pad2[2] = 0; 189 199 190 200 __submit_wait … … 200 210 __submit_prelude 201 211 202 (*sqe){ IORING_OP_WRITEV, fd, iov, iovcnt, offset }; 212 sqe->opcode = IORING_OP_WRITEV; 213 sqe->ioprio = 0; 214 sqe->fd = fd; 215 sqe->off = offset; 216 sqe->addr = (__u64)iov; 217 sqe->len = iovcnt; 218 sqe->rw_flags = 0; 219 sqe->__pad2[0] = sqe->__pad2[1] = sqe->__pad2[2] = 0; 203 220 204 221 __submit_wait … … 213 230 __submit_prelude 214 231 215 (*sqe){ IORING_OP_FSYNC, fd }; 216 217 __submit_wait 218 #endif 219 } 220 221 int cfa_sync_file_range(int fd, int64_t offset, int64_t nbytes, unsigned int flags, int submit_flags, Duration timeout, io_cancellation * cancellation, io_context * context) { 232 sqe->opcode = IORING_OP_FSYNC; 233 sqe->ioprio = 0; 234 sqe->fd = fd; 235 sqe->off = 0; 236 sqe->addr = 0; 237 sqe->len = 0; 238 sqe->rw_flags = 0; 239 sqe->__pad2[0] = sqe->__pad2[1] = sqe->__pad2[2] = 0; 240 241 __submit_wait 242 #endif 243 } 244 245 int cfa_sync_file_range(int fd, off64_t offset, off64_t nbytes, unsigned int flags, int submit_flags, Duration timeout, io_cancellation * cancellation, io_context * context) { 222 246 #if !defined(CFA_HAVE_LINUX_IO_URING_H) || !defined(CFA_HAVE_IORING_OP_SYNC_FILE_RANGE) 223 247 return sync_file_range(fd, offset, nbytes, flags); … … 268 292 269 293 (*sqe){ IORING_OP_SEND, sockfd }; 270 sqe->addr = ( uint64_t)buf;294 sqe->addr = (__u64)buf; 271 295 sqe->len = len; 272 296 sqe->msg_flags = flags; … … 283 307 284 308 (*sqe){ IORING_OP_RECV, sockfd }; 285 sqe->addr = ( uint64_t)buf;309 sqe->addr = (__u64)buf; 286 310 sqe->len = len; 287 311 sqe->msg_flags = flags; … … 298 322 299 323 (*sqe){ IORING_OP_ACCEPT, sockfd }; 300 sqe->addr = (uint64_t)(uintptr_t)addr;301 sqe->addr2 = ( uint64_t)(uintptr_t)addrlen;324 sqe->addr = (__u64)addr; 325 sqe->addr2 = (__u64)addrlen; 302 326 sqe->accept_flags = flags; 303 327 … … 313 337 314 338 (*sqe){ IORING_OP_CONNECT, sockfd }; 315 sqe->addr = ( uint64_t)(uintptr_t)addr;316 sqe->off = ( uint64_t)(uintptr_t)addrlen;317 318 __submit_wait 319 #endif 320 } 321 322 int cfa_fallocate(int fd, int mode, uint64_t offset, uint64_t len, int submit_flags, Duration timeout, io_cancellation * cancellation, io_context * context) {339 sqe->addr = (__u64)addr; 340 sqe->off = (__u64)addrlen; 341 342 __submit_wait 343 #endif 344 } 345 346 int cfa_fallocate(int fd, int mode, off_t offset, off_t len, int submit_flags, Duration timeout, io_cancellation * cancellation, io_context * context) { 323 347 #if !defined(CFA_HAVE_LINUX_IO_URING_H) || !defined(CFA_HAVE_IORING_OP_FALLOCATE) 324 348 return fallocate( fd, mode, offset, len ); … … 337 361 } 338 362 339 int cfa_fadvise(int fd, uint64_t offset, uint64_t len, int advice, int submit_flags, Duration timeout, io_cancellation * cancellation, io_context * context) {363 int cfa_fadvise(int fd, off_t offset, off_t len, int advice, int submit_flags, Duration timeout, io_cancellation * cancellation, io_context * context) { 340 364 #if !defined(CFA_HAVE_LINUX_IO_URING_H) || !defined(CFA_HAVE_IORING_OP_FADVISE) 341 365 return posix_fadvise( fd, offset, len, advice ); … … 344 368 345 369 (*sqe){ IORING_OP_FADVISE, fd }; 346 sqe->off = ( uint64_t)offset;370 sqe->off = (__u64)offset; 347 371 sqe->len = len; 348 372 sqe->fadvise_advice = advice; … … 359 383 360 384 (*sqe){ IORING_OP_MADVISE, 0 }; 361 sqe->addr = ( uint64_t)addr;385 sqe->addr = (__u64)addr; 362 386 sqe->len = length; 363 387 sqe->fadvise_advice = advice; … … 374 398 375 399 (*sqe){ IORING_OP_OPENAT, dirfd }; 376 sqe->addr = ( uint64_t)pathname;400 sqe->addr = (__u64)pathname; 377 401 sqe->open_flags = flags; 378 402 sqe->len = mode; … … 407 431 __submit_prelude 408 432 409 (*sqe){ IORING_OP_STATX, dirfd, pathname, mask, ( uint64_t)statxbuf };433 (*sqe){ IORING_OP_STATX, dirfd, pathname, mask, (__u64)statxbuf }; 410 434 sqe->statx_flags = flags; 411 435 … … 449 473 } 450 474 else { 451 sqe->off = ( uint64_t)-1;475 sqe->off = (__u64)-1; 452 476 } 453 477 sqe->len = len; … … 457 481 } 458 482 else { 459 sqe->splice_off_in = ( uint64_t)-1;483 sqe->splice_off_in = (__u64)-1; 460 484 } 461 485 sqe->splice_flags = flags | (SPLICE_FLAGS & submit_flags); -
libcfa/src/concurrency/kernel.cfa
ref9988b r13d33a75 103 103 // Do it here 104 104 kernelTLS.rand_seed ^= rdtscl(); 105 kernelTLS.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner); 106 __tls_rand_advance_bck(); 105 107 106 108 processor * this = runner.proc; … … 532 534 unsigned total = this.total; 533 535 processor * proc = &this.list`first; 534 // Threadfence is unnecessary, but gcc-8 and older incorrectly reorder code without it535 __atomic_thread_fence(__ATOMIC_SEQ_CST);536 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it 537 asm volatile("": : :"memory"); 536 538 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; } 537 539 return [idle, total, proc]; -
libcfa/src/concurrency/kernel/fwd.hfa
ref9988b r13d33a75 50 50 uint64_t rand_seed; 51 51 #endif 52 struct { 53 uint64_t fwd_seed; 54 uint64_t bck_seed; 55 } ready_rng; 52 56 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 57 58 53 59 54 60 static inline uint64_t __tls_rand() { … … 58 64 return __xorshift64( kernelTLS.rand_seed ); 59 65 #endif 66 } 67 68 #define M (1_l64u << 48_l64u) 69 #define A (25214903917_l64u) 70 #define AI (18446708753438544741_l64u) 71 #define C (11_l64u) 72 #define D (16_l64u) 73 74 static inline unsigned __tls_rand_fwd() { 75 76 kernelTLS.ready_rng.fwd_seed = (A * kernelTLS.ready_rng.fwd_seed + C) & (M - 1); 77 return kernelTLS.ready_rng.fwd_seed >> D; 78 } 79 80 static inline unsigned __tls_rand_bck() { 81 unsigned int r = kernelTLS.ready_rng.bck_seed >> D; 82 kernelTLS.ready_rng.bck_seed = AI * (kernelTLS.ready_rng.bck_seed - C) & (M - 1); 83 return r; 84 } 85 86 #undef M 87 #undef A 88 #undef AI 89 #undef C 90 #undef D 91 92 static inline void __tls_rand_advance_bck(void) { 93 kernelTLS.ready_rng.bck_seed = kernelTLS.ready_rng.fwd_seed; 60 94 } 61 95 } -
libcfa/src/concurrency/kernel/startup.cfa
ref9988b r13d33a75 78 78 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info); 79 79 80 #if defined(__CFA_WITH_VERIFY__) 81 static bool verify_fwd_bck_rng(void); 82 #endif 83 80 84 //----------------------------------------------------------------------------- 81 85 // Forward Declarations for other modules … … 158 162 __cfa_dbg_global_clusters.list{ __get }; 159 163 __cfa_dbg_global_clusters.lock{}; 164 165 /* paranoid */ verify( verify_fwd_bck_rng() ); 160 166 161 167 // Initialize the global scheduler lock … … 516 522 ( this.terminated ){ 0 }; 517 523 ( this.runner ){}; 518 init( this, name, _cltr ); 524 525 disable_interrupts(); 526 init( this, name, _cltr ); 527 enable_interrupts( __cfaabi_dbg_ctx ); 519 528 520 529 __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this); … … 540 549 free( this.stack ); 541 550 542 deinit( this ); 551 disable_interrupts(); 552 deinit( this ); 553 enable_interrupts( __cfaabi_dbg_ctx ); 543 554 } 544 555 … … 672 683 return stack; 673 684 } 685 686 #if defined(__CFA_WITH_VERIFY__) 687 static bool verify_fwd_bck_rng(void) { 688 kernelTLS.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&verify_fwd_bck_rng); 689 690 unsigned values[10]; 691 for(i; 10) { 692 values[i] = __tls_rand_fwd(); 693 } 694 695 __tls_rand_advance_bck(); 696 697 for ( i; 9 -~= 0 ) { 698 if(values[i] != __tls_rand_bck()) { 699 return false; 700 } 701 } 702 703 return true; 704 } 705 #endif -
libcfa/src/concurrency/ready_queue.cfa
ref9988b r13d33a75 150 150 // queues or removing them. 151 151 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { 152 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 153 152 154 // Step 1 : lock global lock 153 155 // It is needed to avoid processors that register mid Critical-Section … … 164 166 } 165 167 168 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 166 169 return s; 167 170 } 168 171 169 172 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { 173 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 174 170 175 // Step 1 : release local locks 171 176 // This must be done while the global lock is held to avoid … … 182 187 /*paranoid*/ assert(true == lock); 183 188 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); 189 190 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 184 191 } 185 192 -
libcfa/src/concurrency/stats.cfa
ref9988b r13d33a75 38 38 stats->io.submit_q.busy = 0; 39 39 stats->io.complete_q.completed_avg.val = 0; 40 stats->io.complete_q.completed_avg. slow_cnt = 0;41 stats->io.complete_q. completed_avg.fast_cnt= 0;40 stats->io.complete_q.completed_avg.cnt = 0; 41 stats->io.complete_q.blocks = 0; 42 42 #endif 43 43 } … … 60 60 61 61 #if defined(CFA_HAVE_LINUX_IO_URING_H) 62 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.rdy , proc->io.submit_q.submit_avg.rdy, __ATOMIC_SEQ_CST );63 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.csm , proc->io.submit_q.submit_avg.csm, __ATOMIC_SEQ_CST );64 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.avl , proc->io.submit_q.submit_avg.avl, __ATOMIC_SEQ_CST );65 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.cnt , proc->io.submit_q.submit_avg.cnt, __ATOMIC_SEQ_CST );66 __atomic_fetch_add( &cltr->io.submit_q.look_avg.val , proc->io.submit_q.look_avg.val, __ATOMIC_SEQ_CST );67 __atomic_fetch_add( &cltr->io.submit_q.look_avg.cnt , proc->io.submit_q.look_avg.cnt, __ATOMIC_SEQ_CST );68 __atomic_fetch_add( &cltr->io.submit_q.look_avg.block , proc->io.submit_q.look_avg.block, __ATOMIC_SEQ_CST );69 __atomic_fetch_add( &cltr->io.submit_q.alloc_avg.val , proc->io.submit_q.alloc_avg.val, __ATOMIC_SEQ_CST );70 __atomic_fetch_add( &cltr->io.submit_q.alloc_avg.cnt , proc->io.submit_q.alloc_avg.cnt, __ATOMIC_SEQ_CST );71 __atomic_fetch_add( &cltr->io.submit_q.alloc_avg.block , proc->io.submit_q.alloc_avg.block, __ATOMIC_SEQ_CST );72 __atomic_fetch_add( &cltr->io.submit_q.helped , proc->io.submit_q.helped, __ATOMIC_SEQ_CST );73 __atomic_fetch_add( &cltr->io.submit_q.leader , proc->io.submit_q.leader, __ATOMIC_SEQ_CST );74 __atomic_fetch_add( &cltr->io.submit_q.busy , proc->io.submit_q.busy, __ATOMIC_SEQ_CST );75 __atomic_fetch_add( &cltr->io.complete_q.completed_avg.val , proc->io.complete_q.completed_avg.val, __ATOMIC_SEQ_CST );76 __atomic_fetch_add( &cltr->io.complete_q.completed_avg. slow_cnt, proc->io.complete_q.completed_avg.slow_cnt, __ATOMIC_SEQ_CST );77 __atomic_fetch_add( &cltr->io.complete_q. completed_avg.fast_cnt, proc->io.complete_q.completed_avg.fast_cnt, __ATOMIC_SEQ_CST );62 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.rdy , proc->io.submit_q.submit_avg.rdy , __ATOMIC_SEQ_CST ); 63 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.csm , proc->io.submit_q.submit_avg.csm , __ATOMIC_SEQ_CST ); 64 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.avl , proc->io.submit_q.submit_avg.avl , __ATOMIC_SEQ_CST ); 65 __atomic_fetch_add( &cltr->io.submit_q.submit_avg.cnt , proc->io.submit_q.submit_avg.cnt , __ATOMIC_SEQ_CST ); 66 __atomic_fetch_add( &cltr->io.submit_q.look_avg.val , proc->io.submit_q.look_avg.val , __ATOMIC_SEQ_CST ); 67 __atomic_fetch_add( &cltr->io.submit_q.look_avg.cnt , proc->io.submit_q.look_avg.cnt , __ATOMIC_SEQ_CST ); 68 __atomic_fetch_add( &cltr->io.submit_q.look_avg.block , proc->io.submit_q.look_avg.block , __ATOMIC_SEQ_CST ); 69 __atomic_fetch_add( &cltr->io.submit_q.alloc_avg.val , proc->io.submit_q.alloc_avg.val , __ATOMIC_SEQ_CST ); 70 __atomic_fetch_add( &cltr->io.submit_q.alloc_avg.cnt , proc->io.submit_q.alloc_avg.cnt , __ATOMIC_SEQ_CST ); 71 __atomic_fetch_add( &cltr->io.submit_q.alloc_avg.block , proc->io.submit_q.alloc_avg.block , __ATOMIC_SEQ_CST ); 72 __atomic_fetch_add( &cltr->io.submit_q.helped , proc->io.submit_q.helped , __ATOMIC_SEQ_CST ); 73 __atomic_fetch_add( &cltr->io.submit_q.leader , proc->io.submit_q.leader , __ATOMIC_SEQ_CST ); 74 __atomic_fetch_add( &cltr->io.submit_q.busy , proc->io.submit_q.busy , __ATOMIC_SEQ_CST ); 75 __atomic_fetch_add( &cltr->io.complete_q.completed_avg.val, proc->io.complete_q.completed_avg.val, __ATOMIC_SEQ_CST ); 76 __atomic_fetch_add( &cltr->io.complete_q.completed_avg.cnt, proc->io.complete_q.completed_avg.cnt, __ATOMIC_SEQ_CST ); 77 __atomic_fetch_add( &cltr->io.complete_q.blocks , proc->io.complete_q.blocks , __ATOMIC_SEQ_CST ); 78 78 #endif 79 79 } … … 154 154 "- avg alloc search len : %'18.2lf\n" 155 155 "- avg alloc search block : %'18.2lf\n" 156 "- total wait calls : %'15" PRIu64 " (%'" PRIu64 " slow, %'" PRIu64 " fast)\n"156 "- total wait calls : %'15" PRIu64 "\n" 157 157 "- avg completion/wait : %'18.2lf\n" 158 "- total completion blocks: %'15" PRIu64 "\n" 158 159 "\n" 159 160 , cluster ? "Cluster" : "Processor", name, id … … 165 166 , io.submit_q.alloc_avg.cnt 166 167 , aavgv, aavgb 167 , io.complete_q.completed_avg. slow_cnt + io.complete_q.completed_avg.fast_cnt168 , io.complete_q.completed_avg.slow_cnt, io.complete_q.completed_avg.fast_cnt169 , ((double)io.complete_q.completed_avg.val) / (io.complete_q.completed_avg.slow_cnt + io.complete_q.completed_avg.fast_cnt)168 , io.complete_q.completed_avg.cnt 169 , ((double)io.complete_q.completed_avg.val) / io.complete_q.completed_avg.cnt 170 , io.complete_q.blocks 170 171 ); 171 172 } -
libcfa/src/concurrency/stats.hfa
ref9988b r13d33a75 90 90 struct { 91 91 volatile uint64_t val; 92 volatile uint64_t slow_cnt; 93 volatile uint64_t fast_cnt; 92 volatile uint64_t cnt; 94 93 } completed_avg; 94 volatile uint64_t blocks; 95 95 } complete_q; 96 96 }; -
libcfa/src/exception.c
ref9988b r13d33a75 10 10 // Created On : Mon Jun 26 15:13:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Thr May 21 12:18:00 202013 // Update Count : 2 012 // Last Modified On : Wed Aug 12 13:55:00 2020 13 // Update Count : 21 14 14 // 15 15 … … 28 28 #include <unwind.h> 29 29 #include <bits/debug.hfa> 30 #include "concurrency/invoke.h" 30 31 #include "stdhdr/assert.h" 31 32 … … 58 59 59 60 60 // Temperary global exception context. Does not work with concurency.61 struct exception_context_t {62 struct __cfaehm_try_resume_node * top_resume;63 64 exception_t * current_exception;65 int current_handler_index;66 } static shared_stack = {NULL, NULL, 0};67 68 61 // Get the current exception context. 69 62 // There can be a single global until multithreading occurs, then each stack 70 // needs its own. It will have to be updated to handle that. 71 struct exception_context_t * this_exception_context() { 63 // needs its own. We get this from libcfathreads (no weak attribute). 64 __attribute__((weak)) struct exception_context_t * this_exception_context() { 65 static struct exception_context_t shared_stack = {NULL, NULL}; 72 66 return &shared_stack; 73 67 } … … 122 116 123 117 // MEMORY MANAGEMENT ========================================================= 118 119 struct __cfaehm_node { 120 struct _Unwind_Exception unwind_exception; 121 struct __cfaehm_node * next; 122 int handler_index; 123 }; 124 125 #define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node))) 126 #define EXCEPT_TO_NODE(except) ((struct __cfaehm_node *)(except) - 1) 127 #define UNWIND_TO_NODE(unwind) ((struct __cfaehm_node *)(unwind)) 128 #define NULL_MAP(map, ptr) ((ptr) ? (map(ptr)) : NULL) 124 129 125 130 // How to clean up an exception in various situations. … … 137 142 } 138 143 139 // We need a piece of storage to raise the exception, for now its a single140 // piece.141 static struct _Unwind_Exception this_exception_storage;142 143 struct __cfaehm_node {144 struct __cfaehm_node * next;145 };146 147 #define NODE_TO_EXCEPT(node) ((exception_t *)(1 + (node)))148 #define EXCEPT_TO_NODE(except) ((struct __cfaehm_node *)(except) - 1)149 150 144 // Creates a copy of the indicated exception and sets current_exception to it. 151 145 static void __cfaehm_allocate_exception( exception_t * except ) { … … 161 155 } 162 156 157 // Initialize the node: 158 exception_t * except_store = NODE_TO_EXCEPT(store); 159 store->unwind_exception.exception_class = __cfaehm_exception_class; 160 store->unwind_exception.exception_cleanup = __cfaehm_exception_cleanup; 161 store->handler_index = 0; 162 except->virtual_table->copy( except_store, except ); 163 163 164 // Add the node to the list: 164 store->next = EXCEPT_TO_NODE(context->current_exception); 165 context->current_exception = NODE_TO_EXCEPT(store); 166 167 // Copy the exception to storage. 168 except->virtual_table->copy( context->current_exception, except ); 169 170 // Set up the exception storage. 171 this_exception_storage.exception_class = __cfaehm_exception_class; 172 this_exception_storage.exception_cleanup = __cfaehm_exception_cleanup; 165 store->next = NULL_MAP(EXCEPT_TO_NODE, context->current_exception); 166 context->current_exception = except_store; 173 167 } 174 168 … … 185 179 if ( context->current_exception == except ) { 186 180 node = to_free->next; 187 context->current_exception = (node) ? NODE_TO_EXCEPT(node) : 0;181 context->current_exception = NULL_MAP(NODE_TO_EXCEPT, node); 188 182 } else { 189 183 node = EXCEPT_TO_NODE(context->current_exception); … … 213 207 // Verify actions follow the rules we expect. 214 208 verify((actions & _UA_CLEANUP_PHASE) && (actions & _UA_FORCE_UNWIND)); 215 verify(!(actions & (_UA_SEARCH_PHASE | _UA_HAND ER_FRAME)));209 verify(!(actions & (_UA_SEARCH_PHASE | _UA_HANDLER_FRAME))); 216 210 217 211 if ( actions & _UA_END_OF_STACK ) { … … 222 216 } 223 217 218 static struct _Unwind_Exception cancel_exception_storage; 219 224 220 // Cancel the current stack, prefroming approprate clean-up and messaging. 225 221 void __cfaehm_cancel_stack( exception_t * exception ) { 226 222 // TODO: Detect current stack and pick a particular stop-function. 227 223 _Unwind_Reason_Code ret; 228 ret = _Unwind_ForcedUnwind( & this_exception_storage, _Stop_Fn, (void*)0x22 );224 ret = _Unwind_ForcedUnwind( &cancel_exception_storage, _Stop_Fn, (void*)0x22 ); 229 225 printf("UNWIND ERROR %d after force unwind\n", ret); 230 226 abort(); … … 247 243 static void __cfaehm_begin_unwind(void(*defaultHandler)(exception_t *)) { 248 244 struct exception_context_t * context = this_exception_context(); 249 struct _Unwind_Exception * storage = &this_exception_storage;250 245 if ( NULL == context->current_exception ) { 251 246 printf("UNWIND ERROR missing exception in begin unwind\n"); 252 247 abort(); 253 248 } 249 struct _Unwind_Exception * storage = 250 &EXCEPT_TO_NODE(context->current_exception)->unwind_exception; 254 251 255 252 // Call stdlibc to raise the exception … … 419 416 _Unwind_Reason_Code ret = (0 == index) 420 417 ? _URC_CONTINUE_UNWIND : _URC_HANDLER_FOUND; 421 context->current_handler_index = index;418 UNWIND_TO_NODE(unwind_exception)->handler_index = index; 422 419 423 420 // Based on the return value, check if we matched the exception … … 425 422 __cfadbg_print_safe(exception, " handler found\n"); 426 423 } else { 424 // TODO: Continue the search if there is more in the table. 427 425 __cfadbg_print_safe(exception, " no handler\n"); 428 426 } … … 516 514 // Exception handler 517 515 // Note: Saving the exception context on the stack breaks termination exceptions. 518 catch_block( this_exception_context()->current_handler_index,516 catch_block( EXCEPT_TO_NODE( this_exception_context()->current_exception )->handler_index, 519 517 this_exception_context()->current_exception ); 520 518 } -
libcfa/src/heap.cfa
ref9988b r13d33a75 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Aug 9 12:23:20202013 // Update Count : 89412 // Last Modified On : Wed Aug 12 16:43:38 2020 13 // Update Count : 902 14 14 // 15 15 … … 650 650 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 651 651 #else 652 for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { 652 // for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { 653 for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) { 654 typeof(p) temp = (( p )`next)->top; // FIX ME: direct assignent fails, initialization works 655 p = temp; 653 656 #endif // BUCKETLOCK 654 657 total += size; … … 1162 1165 choose( option ) { 1163 1166 case M_TOP_PAD: 1164 heapExpand = ceiling ( value, pageSize ); return 1;1167 heapExpand = ceiling2( value, pageSize ); return 1; 1165 1168 case M_MMAP_THRESHOLD: 1166 1169 if ( setMmapStart( value ) ) return 1; -
libcfa/src/iostream.cfa
ref9988b r13d33a75 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Aug 10 09:32:14202013 // Update Count : 112 612 // Last Modified On : Tue Aug 11 22:16:33 2020 13 // Update Count : 1128 14 14 // 15 15 … … 37 37 38 38 forall( dtype ostype | ostream( ostype ) ) { 39 ostype & ?|?( ostype & os, zero_t ) {40 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) );41 fmt( os, "%d", 0n );42 return os;43 } // ?|?44 void ?|?( ostype & os, zero_t z ) {45 (ostype &)(os | z); ends( os );46 } // ?|?47 48 ostype & ?|?( ostype & os, one_t ) {49 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) );50 fmt( os, "%d", 1n );51 return os;52 } // ?|?53 void ?|?( ostype & os, one_t o ) {54 (ostype &)(os | o); ends( os );55 } // ?|?56 57 39 ostype & ?|?( ostype & os, bool b ) { 58 40 if ( $sepPrt( os ) ) fmt( os, "%s", $sepGetCur( os ) ); -
libcfa/src/iostream.hfa
ref9988b r13d33a75 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Jul 16 07:43:32202013 // Update Count : 3 4812 // Last Modified On : Tue Aug 11 22:16:14 2020 13 // Update Count : 350 14 14 // 15 15 … … 67 67 68 68 forall( dtype ostype | ostream( ostype ) ) { 69 ostype & ?|?( ostype &, zero_t );70 void ?|?( ostype &, zero_t );71 ostype & ?|?( ostype &, one_t );72 void ?|?( ostype &, one_t );73 74 69 ostype & ?|?( ostype &, bool ); 75 70 void ?|?( ostype &, bool ); -
libcfa/src/parseargs.cfa
ref9988b r13d33a75 19 19 extern long long int strtoll (const char* str, char** endptr, int base); 20 20 extern unsigned long long int strtoull(const char* str, char** endptr, int base); 21 extern double strtod (const char* str, char** endptr); 21 22 } 22 23 … … 28 29 extern char ** cfa_args_envp; 29 30 30 void printopt(FILE * out, int width, int max, char sn, const char * ln, const char * help) { 31 int hwidth = max - (11 + width); 32 if(hwidth <= 0) hwidth = max; 33 34 fprintf(out, " -%c, --%-*s %.*s\n", sn, width, ln, hwidth, help); 35 for() { 36 help += min(strlen(help), hwidth); 37 if('\0' == *help) break; 38 fprintf(out, "%*s%.*s\n", width + 11, "", hwidth, help); 39 } 40 } 31 static void usage(char * cmd, cfa_option options[], size_t opt_count, const char * usage, FILE * out) __attribute__ ((noreturn)); 41 32 42 33 void parse_args( cfa_option options[], size_t opt_count, const char * usage, char ** & left ) { … … 44 35 } 45 36 37 //----------------------------------------------------------------------------- 38 // getopt_long wrapping 46 39 void parse_args( 47 40 int argc, … … 53 46 ) { 54 47 struct option optarr[opt_count + 2]; 55 int width = 0;56 int max_width = 1_000_000;57 48 { 58 49 int idx = 0; … … 69 60 } 70 61 idx++; 71 72 int w = strlen(options[i].long_name);73 if(w > width) width = w;74 62 } 75 63 } … … 106 94 out = stdout; 107 95 case '?': 108 goto USAGE;96 usage(argv[0], options, opt_count, usage, out); 109 97 default: 110 98 for(i; opt_count) { … … 115 103 116 104 fprintf(out, "Argument '%s' for option %c could not be parsed\n\n", arg, (char)opt); 117 goto USAGE;105 usage(argv[0], options, opt_count, usage, out); 118 106 } 119 107 } … … 122 110 123 111 } 124 125 USAGE:; 112 } 113 114 //----------------------------------------------------------------------------- 115 // Print usage 116 static void printopt(FILE * out, int width, int max, char sn, const char * ln, const char * help) { 117 int hwidth = max - (11 + width); 118 if(hwidth <= 0) hwidth = max; 119 120 fprintf(out, " -%c, --%-*s %.*s\n", sn, width, ln, hwidth, help); 121 for() { 122 help += min(strlen(help), hwidth); 123 if('\0' == *help) break; 124 fprintf(out, "%*s%.*s\n", width + 11, "", hwidth, help); 125 } 126 } 127 128 void print_args_usage(cfa_option options[], size_t opt_count, const char * usage, bool error) __attribute__ ((noreturn)) { 129 usage(cfa_args_argv[0], options, opt_count, usage, error ? stderr : stdout); 130 } 131 132 void print_args_usage(int , char * argv[], cfa_option options[], size_t opt_count, const char * usage, bool error) __attribute__ ((noreturn)) { 133 usage(argv[0], options, opt_count, usage, error ? stderr : stdout); 134 } 135 136 static void usage(char * cmd, cfa_option options[], size_t opt_count, const char * help, FILE * out) __attribute__((noreturn)) { 137 int width = 0; 138 { 139 for(i; opt_count) { 140 if(options[i].long_name) { 141 int w = strlen(options[i].long_name); 142 if(w > width) width = w; 143 } 144 } 145 } 146 147 int max_width = 1_000_000; 126 148 int outfd = fileno(out); 127 149 if(isatty(outfd)) { … … 132 154 } 133 155 134 fprintf(out, "Usage:\n %s %s\n", argv[0], usage);156 fprintf(out, "Usage:\n %s %s\n", cmd, help); 135 157 136 158 for(i; opt_count) { … … 141 163 } 142 164 165 //----------------------------------------------------------------------------- 166 // Typed argument parsing 143 167 bool parse_yesno(const char * arg, bool & value ) { 144 168 if(strcmp(arg, "yes") == 0) { … … 167 191 bool parse(const char * arg, const char * & value ) { 168 192 value = arg; 193 return true; 194 } 195 196 bool parse(const char * arg, int & value) { 197 char * end; 198 int r = strtoll(arg, &end, 10); 199 if(*end != '\0') return false; 200 201 value = r; 169 202 return true; 170 203 } … … 200 233 } 201 234 202 bool parse(const char * arg, int& value) {203 char * end; 204 int r = strtoll(arg, &end, 10);205 if(*end != '\0') return false; 206 207 value = r; 208 return true; 209 } 235 bool parse(const char * arg, double & value) { 236 char * end; 237 double r = strtod(arg, &end); 238 if(*end != '\0') return false; 239 240 value = r; 241 return true; 242 } -
libcfa/src/parseargs.hfa
ref9988b r13d33a75 34 34 void parse_args( int argc, char * argv[], cfa_option options[], size_t opt_count, const char * usage, char ** & left ); 35 35 36 void print_args_usage(cfa_option options[], size_t opt_count, const char * usage, bool error) __attribute__ ((noreturn)); 37 void print_args_usage(int argc, char * argv[], cfa_option options[], size_t opt_count, const char * usage, bool error) __attribute__ ((noreturn)); 38 36 39 bool parse_yesno (const char *, bool & ); 37 40 bool parse_settrue (const char *, bool & ); … … 39 42 40 43 bool parse(const char *, const char * & ); 44 bool parse(const char *, int & ); 41 45 bool parse(const char *, unsigned & ); 42 46 bool parse(const char *, unsigned long & ); 43 47 bool parse(const char *, unsigned long long & ); 44 bool parse(const char *, int& );48 bool parse(const char *, double & ); -
libcfa/src/stdlib.hfa
ref9988b r13d33a75 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Jul 30 16:14:58202013 // Update Count : 49012 // Last Modified On : Fri Aug 14 23:38:50 2020 13 // Update Count : 504 14 14 // 15 15 … … 39 39 //--------------------------------------- 40 40 41 #include "common.hfa" 42 43 //--------------------------------------- 44 41 45 // Macro because of returns 42 46 #define $VAR_ALLOC( allocation, alignment ) \ … … 136 140 T * alloc_set( char fill ) { 137 141 return (T *)memset( (T *)alloc(), (int)fill, sizeof(T) ); // initialize with fill value 138 } // alloc 139 140 T * alloc_set( Tfill ) {142 } // alloc_set 143 144 T * alloc_set( const T & fill ) { 141 145 return (T *)memcpy( (T *)alloc(), &fill, sizeof(T) ); // initialize with fill value 142 } // alloc 146 } // alloc_set 143 147 144 148 T * alloc_set( size_t dim, char fill ) { 145 149 return (T *)memset( (T *)alloc( dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value 146 } // alloc 147 148 T * alloc_set( size_t dim, Tfill ) {150 } // alloc_set 151 152 T * alloc_set( size_t dim, const T & fill ) { 149 153 T * r = (T *)alloc( dim ); 150 154 for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value 151 155 return r; 152 } // alloc 153 154 T * alloc_set( size_t dim , const T fill[]) {155 return (T *)memcpy( (T *)alloc( dim ), fill, dim* sizeof(T) ); // initialize with fill value156 } // alloc 156 } // alloc_set 157 158 T * alloc_set( size_t dimNew, const T fill[], size_t dimOld ) { 159 return (T *)memcpy( (T *)alloc( dimNew ), fill, min( dimNew, dimOld ) * sizeof(T) ); // initialize with fill value 160 } // alloc_set 157 161 158 162 T * alloc_set( T ptr[], size_t dim, char fill ) { // realloc array with fill … … 166 170 } // alloc_set 167 171 168 T * alloc_set( T ptr[], size_t dim, T & fill ) { // realloc array with fill172 T * alloc_set( T ptr[], size_t dim, const T & fill ) { // realloc array with fill 169 173 size_t odim = malloc_size( ptr ) / sizeof(T); // current dimension 170 174 size_t nsize = dim * sizeof(T); // new allocation … … 177 181 } // if 178 182 return nptr; 179 } // alloc_ align_set183 } // alloc_set 180 184 } // distribution 181 185 … … 204 208 T * alloc_align_set( size_t align, char fill ) { 205 209 return (T *)memset( (T *)alloc_align( align ), (int)fill, sizeof(T) ); // initialize with fill value 206 } // alloc_align 207 208 T * alloc_align_set( size_t align, Tfill ) {210 } // alloc_align_set 211 212 T * alloc_align_set( size_t align, const T & fill ) { 209 213 return (T *)memcpy( (T *)alloc_align( align ), &fill, sizeof(T) ); // initialize with fill value 210 } // alloc_align 214 } // alloc_align_set 211 215 212 216 T * alloc_align_set( size_t align, size_t dim, char fill ) { 213 217 return (T *)memset( (T *)alloc_align( align, dim ), (int)fill, dim * sizeof(T) ); // initialize with fill value 214 } // alloc_align 215 216 T * alloc_align_set( size_t align, size_t dim, Tfill ) {218 } // alloc_align_set 219 220 T * alloc_align_set( size_t align, size_t dim, const T & fill ) { 217 221 T * r = (T *)alloc_align( align, dim ); 218 222 for ( i; dim ) { memcpy( &r[i], &fill, sizeof(T) ); } // initialize with fill value 219 223 return r; 220 } // alloc_align 221 222 T * alloc_align_set( size_t align, size_t dim , const T fill[]) {223 return (T *)memcpy( (T *)alloc_align( align, dim ), fill, dim* sizeof(T) );224 } // alloc_align 224 } // alloc_align_set 225 226 T * alloc_align_set( size_t align, size_t dimNew, const T fill[], size_t dimOld ) { 227 return (T *)memcpy( (T *)alloc_align( align, dimNew ), fill, min( dimNew, dimOld ) * sizeof(T) ); 228 } // alloc_align_set 225 229 226 230 T * alloc_align_set( T ptr[], size_t align, size_t dim, char fill ) { … … 234 238 } // alloc_align_set 235 239 236 T * alloc_align_set( T ptr[], size_t align, size_t dim, T & fill ) {240 T * alloc_align_set( T ptr[], size_t align, size_t dim, const T & fill ) { 237 241 size_t odim = malloc_size( ptr ) / sizeof(T); // current dimension 238 242 size_t nsize = dim * sizeof(T); // new allocation … … 374 378 //--------------------------------------- 375 379 376 #include "common.hfa"377 378 //---------------------------------------379 380 380 extern bool threading_enabled(void) OPTIONAL_THREAD; 381 381
Note:
See TracChangeset
for help on using the changeset viewer.