Changeset 1f55a75 for libcfa/src
- Timestamp:
- Mar 23, 2021, 9:19:47 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 98d9ce9
- Parents:
- f9c3100 (diff), e825c9d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 1 added
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/Makefile.am
rf9c3100 r1f55a75 58 58 concurrency/iofwd.hfa \ 59 59 containers/list.hfa \ 60 containers/queueLockFree.hfa \ 60 61 containers/stackLockFree.hfa \ 61 62 vec/vec.hfa \ -
libcfa/src/bits/queue.hfa
rf9c3100 r1f55a75 15 15 }; 16 16 17 inline {17 static inline { 18 18 // wrappers to make Collection have T 19 19 T & head( Queue(T) & q ) with( q ) { … … 154 154 struct QueueIter { 155 155 inline ColIter; // Plan 9 inheritance 156 }; 156 }; 157 157 158 inline {158 static inline { 159 159 void ?{}( QueueIter(T) & qi ) with( qi ) { 160 160 ((ColIter &)qi){}; -
libcfa/src/bits/weakso_locks.hfa
rf9c3100 r1f55a75 70 70 static inline void ^?{}( multiple_acquisition_lock & this ) {} 71 71 static inline void lock ( multiple_acquisition_lock & this ) { lock ( (blocking_lock &)this ); } 72 static inline void try_lock ( multiple_acquisition_lock & this ) {try_lock( (blocking_lock &)this ); }72 static inline bool try_lock ( multiple_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 73 73 static inline void unlock ( multiple_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } 74 74 static inline void on_wait ( multiple_acquisition_lock & this ) { on_wait ( (blocking_lock &)this ); } -
libcfa/src/concurrency/clib/cfathread.cfa
rf9c3100 r1f55a75 17 17 #include "locks.hfa" 18 18 #include "kernel.hfa" 19 #include "stats.hfa" 19 20 #include "thread.hfa" 20 21 #include "time.hfa" 21 22 22 23 #include "cfathread.h" 24 25 extern void ?{}(processor &, const char[], cluster &, $thread *); 26 extern "C" { 27 extern void __cfactx_invoke_thread(void (*main)(void *), void * this); 28 } 29 30 //================================================================================ 31 // Thread run y the C Interface 23 32 24 33 struct cfathread_object { … … 65 74 } 66 75 67 processor * procs = 0p; 68 int proc_cnt = 1; 69 76 //================================================================================ 77 // Special Init Thread responsible for the initialization or processors 78 struct __cfainit { 79 $thread self; 80 void (*init)( void * ); 81 void * arg; 82 }; 83 void main(__cfainit & this); 84 void ^?{}(__cfainit & mutex this); 85 86 static inline $thread * get_thread( __cfainit & this ) { return &this.self; } 87 88 typedef ThreadCancelled(__cfainit) __cfainit_exception; 89 typedef ThreadCancelled_vtable(__cfainit) __cfainit_vtable; 90 91 void defaultResumptionHandler(ThreadCancelled(__cfainit) & except) { 92 abort | "The init thread was cancelled"; 93 } 94 95 __cfainit_vtable ___cfainit_vtable_instance; 96 97 __cfainit_vtable const & get_exception_vtable(__cfainit_exception *) { 98 return ___cfainit_vtable_instance; 99 } 100 101 static void ?{}( __cfainit & this, void (*init)( void * ), void * arg ) { 102 this.init = init; 103 this.arg = arg; 104 ((thread&)this){"Processir Init"}; 105 106 // Don't use __thrd_start! just prep the context manually 107 $thread * this_thrd = get_thread(this); 108 void (*main_p)(__cfainit &) = main; 109 110 disable_interrupts(); 111 __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread); 112 113 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 114 /* paranoid */ verify( this_thrd->context.SP ); 115 116 this_thrd->state = Ready; 117 enable_interrupts( __cfaabi_dbg_ctx ); 118 } 119 120 void ^?{}(__cfainit & mutex this) { 121 ^(this.self){}; 122 } 123 124 void main( __cfainit & this ) { 125 __attribute__((unused)) void * const thrd_obj = (void*)&this; 126 __attribute__((unused)) void * const thrd_hdl = (void*)active_thread(); 127 /* paranoid */ verify( thrd_obj == thrd_hdl ); 128 129 this.init( this.arg ); 130 } 131 132 //================================================================================ 133 // Main Api 70 134 extern "C" { 71 135 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) { … … 78 142 } 79 143 144 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) { 145 #if !defined(__CFA_NO_STATISTICS__) 146 print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO ); 147 print_stats_now( *cl, CFA_STATS_READY_Q | CFA_STATS_IO ); 148 #endif 149 return 0; 150 } 151 80 152 int cfathread_cluster_add_worker(cfathread_cluster_t cl, pthread_t* tid, void (*init_routine) (void *), void * arg) { 81 // processor * proc = new("C-processor", *cl, init_routine, arg); 153 __cfainit * it = 0p; 154 if(init_routine) { 155 it = alloc(); 156 (*it){init_routine, arg}; 157 } 82 158 processor * proc = alloc(); 83 (*proc){ "C-processor", *cl, init_routine, arg }; 159 (*proc){ "C-processor", *cl, get_thread(*it) }; 160 161 // Wait for the init thread to return before continuing 162 if(it) { 163 ^(*it){}; 164 free(it); 165 } 166 84 167 if(tid) *tid = proc->kernel_thread; 85 168 return 0; … … 162 245 int cfathread_mutex_init(cfathread_mutex_t *restrict mut, const cfathread_mutexattr_t *restrict) __attribute__((nonnull (1))) { *mut = new(); return 0; } 163 246 int cfathread_mutex_destroy(cfathread_mutex_t *mut) __attribute__((nonnull (1))) { delete( *mut ); return 0; } 164 int cfathread_mutex_lock (cfathread_mutex_t *mut) __attribute__((nonnull (1))) { lock ( (*mut)->impl ); return 0; } 165 int cfathread_mutex_trylock(cfathread_mutex_t *mut) __attribute__((nonnull (1))) { try_lock( (*mut)->impl ); return 0; } 166 int cfathread_mutex_unlock (cfathread_mutex_t *mut) __attribute__((nonnull (1))) { unlock ( (*mut)->impl ); return 0; } 247 int cfathread_mutex_lock (cfathread_mutex_t *mut) __attribute__((nonnull (1))) { lock( (*mut)->impl ); return 0; } 248 int cfathread_mutex_unlock (cfathread_mutex_t *mut) __attribute__((nonnull (1))) { unlock( (*mut)->impl ); return 0; } 249 int cfathread_mutex_trylock(cfathread_mutex_t *mut) __attribute__((nonnull (1))) { 250 bool ret = try_lock( (*mut)->impl ); 251 if( ret ) return 0; 252 else return EBUSY; 253 } 167 254 168 255 //-------------------- -
libcfa/src/concurrency/clib/cfathread.h
rf9c3100 r1f55a75 13 13 // Update Count : 14 14 // 15 16 #include "stddef.h"17 #include "invoke.h"18 15 19 16 #if defined(__cforall) || defined(__cplusplus) … … 32 29 int cfathread_cluster_create(cfathread_cluster_t * cluster); 33 30 cfathread_cluster_t cfathread_cluster_self(void); 31 int cfathread_cluster_print_stats(cfathread_cluster_t cluster); 34 32 int cfathread_cluster_add_worker(cfathread_cluster_t cluster, pthread_t* tid, void (*init_routine) (void *), void * arg); 35 33 int cfathread_cluster_pause (cfathread_cluster_t cluster); -
libcfa/src/concurrency/invoke.h
rf9c3100 r1f55a75 224 224 } 225 225 226 static inline $thread * volatile & ?`next ( $thread * this ) __attribute__((const)) { 227 return this->seqable.back; 228 } 229 226 230 static inline $thread *& Back( $thread * this ) __attribute__((const)) { 227 231 return this->seqable.back; -
libcfa/src/concurrency/io.cfa
rf9c3100 r1f55a75 80 80 }; 81 81 82 static $io_context * __ioarbiter_allocate( $io_arbiter & mutex this, processor *, __u32 idxs[], __u32 want );83 static void __ioarbiter_submit( $io_ arbiter & mutex this, $io_context * , __u32 idxs[], __u32 have, bool lazy );84 static void __ioarbiter_flush ( $io_ arbiter & mutex this, $io_context *);82 static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ); 83 static void __ioarbiter_submit( $io_context * , __u32 idxs[], __u32 have, bool lazy ); 84 static void __ioarbiter_flush ( $io_context & ); 85 85 static inline void __ioarbiter_notify( $io_context & ctx ); 86 86 //============================================================================================= … … 134 134 $io_context & ctx = *proc->io.ctx; 135 135 136 if(!ctx.ext_sq.empty) { 137 __ioarbiter_flush( *ctx.arbiter, &ctx ); 138 } 136 __ioarbiter_flush( ctx ); 139 137 140 138 __STATS__( true, io.calls.flush++; ) … … 263 261 __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for allocation\n"); 264 262 265 struct $io_context * ret = __ioarbiter_allocate(*ioarb, proc,idxs, want);263 struct $io_context * ret = __ioarbiter_allocate(*ioarb, idxs, want); 266 264 267 265 __cfadbg_print_safe(io, "Kernel I/O : slow allocation completed from ring %d\n", ret->fd); … … 326 324 __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n"); 327 325 328 __ioarbiter_submit( *inctx->arbiter,inctx, idxs, have, lazy);326 __ioarbiter_submit(inctx, idxs, have, lazy); 329 327 } 330 328 … … 384 382 // I/O Arbiter 385 383 //============================================================================================= 386 static $io_context * __ioarbiter_allocate( $io_arbiter & mutex this, processor * proc, __u32 idxs[], __u32 want ) { 384 static inline void block(__outstanding_io_queue & queue, __outstanding_io & item) { 385 // Lock the list, it's not thread safe 386 lock( queue.lock __cfaabi_dbg_ctx2 ); 387 { 388 // Add our request to the list 389 add( queue.queue, item ); 390 391 // Mark as pending 392 __atomic_store_n( &queue.empty, false, __ATOMIC_SEQ_CST ); 393 } 394 unlock( queue.lock ); 395 396 wait( item.sem ); 397 } 398 399 static inline bool empty(__outstanding_io_queue & queue ) { 400 return __atomic_load_n( &queue.empty, __ATOMIC_SEQ_CST); 401 } 402 403 static $io_context * __ioarbiter_allocate( $io_arbiter & this, __u32 idxs[], __u32 want ) { 387 404 __cfadbg_print_safe(io, "Kernel I/O : arbiter allocating\n"); 388 405 … … 390 407 391 408 // No one has any resources left, wait for something to finish 392 // Mark as pending 393 __atomic_store_n( &this.pending.flag, true, __ATOMIC_SEQ_CST ); 394 395 // Wait for our turn to submit 396 wait( this.pending.blocked, want ); 397 398 __attribute((unused)) bool ret = 399 __alloc( this.pending.ctx, idxs, want); 400 /* paranoid */ verify( ret ); 401 402 return this.pending.ctx; 403 404 } 405 406 static void __ioarbiter_notify( $io_arbiter & mutex this, $io_context * ctx ) { 407 /* paranoid */ verify( !is_empty(this.pending.blocked) ); 408 this.pending.ctx = ctx; 409 410 while( !is_empty(this.pending.blocked) ) { 411 __cfadbg_print_safe(io, "Kernel I/O : notifying\n"); 412 __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head; 413 __u32 want = front( this.pending.blocked ); 414 415 if( have > want ) return; 416 417 signal_block( this.pending.blocked ); 418 } 419 420 this.pending.flag = false; 409 // We need to add ourself to a list of pending allocs and wait for an answer 410 __pending_alloc pa; 411 pa.idxs = idxs; 412 pa.want = want; 413 414 block(this.pending, (__outstanding_io&)pa); 415 416 return pa.ctx; 417 418 } 419 420 static void __ioarbiter_notify( $io_arbiter & this, $io_context * ctx ) { 421 /* paranoid */ verify( !empty(this.pending.queue) ); 422 423 lock( this.pending.lock __cfaabi_dbg_ctx2 ); 424 { 425 while( !empty(this.pending.queue) ) { 426 __cfadbg_print_safe(io, "Kernel I/O : notifying\n"); 427 __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head; 428 __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue ); 429 430 if( have > pa.want ) goto DONE; 431 drop( this.pending.queue ); 432 433 /* paranoid */__attribute__((unused)) bool ret = 434 435 __alloc(ctx, pa.idxs, pa.want); 436 437 /* paranoid */ verify( ret ); 438 439 pa.ctx = ctx; 440 441 post( pa.sem ); 442 } 443 444 this.pending.empty = true; 445 DONE:; 446 } 447 unlock( this.pending.lock ); 421 448 } 422 449 423 450 static void __ioarbiter_notify( $io_context & ctx ) { 424 if( __atomic_load_n( &ctx.arbiter->pending.flag, __ATOMIC_SEQ_CST)) {451 if(!empty( ctx.arbiter->pending )) { 425 452 __ioarbiter_notify( *ctx.arbiter, &ctx ); 426 453 } … … 428 455 429 456 // Simply append to the pending 430 static void __ioarbiter_submit( $io_ arbiter & mutex this, $io_context * ctx, __u32 idxs[], __u32 have, bool lazy ) {457 static void __ioarbiter_submit( $io_context * ctx, __u32 idxs[], __u32 have, bool lazy ) { 431 458 __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd); 432 459 433 /* paranoid */ verify( &this == ctx->arbiter );434 435 // Mark as pending436 __atomic_store_n( &ctx->ext_sq.empty, false, __ATOMIC_SEQ_CST );437 438 460 __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have); 439 461 440 // Wait for our turn to submit 441 wait( ctx->ext_sq.blocked ); 442 443 // Submit our indexes 444 __submit(ctx, idxs, have, lazy); 462 __external_io ei; 463 ei.idxs = idxs; 464 ei.have = have; 465 ei.lazy = lazy; 466 467 block(ctx->ext_sq, (__outstanding_io&)ei); 445 468 446 469 __cfadbg_print_safe(io, "Kernel I/O : %u submitted from arbiter\n", have); 447 470 } 448 471 449 static void __ioarbiter_flush( $io_arbiter & mutex this, $io_context * ctx ) { 450 /* paranoid */ verify( &this == ctx->arbiter ); 451 452 __STATS__( false, io.flush.external += 1; ) 453 454 __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n"); 455 456 condition & blcked = ctx->ext_sq.blocked; 457 /* paranoid */ verify( ctx->ext_sq.empty == is_empty( blcked ) ); 458 while(!is_empty( blcked )) { 459 signal_block( blcked ); 460 } 461 462 ctx->ext_sq.empty = true; 472 static void __ioarbiter_flush( $io_context & ctx ) { 473 if(!empty( ctx.ext_sq )) { 474 __STATS__( false, io.flush.external += 1; ) 475 476 __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n"); 477 478 lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 ); 479 { 480 while( !empty(ctx.ext_sq.queue) ) { 481 __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue ); 482 483 __submit(&ctx, ei.idxs, ei.have, ei.lazy); 484 485 post( ei.sem ); 486 } 487 488 ctx.ext_sq.empty = true; 489 } 490 unlock(ctx.ext_sq.lock ); 491 } 463 492 } 464 493 #endif -
libcfa/src/concurrency/io/setup.cfa
rf9c3100 r1f55a75 110 110 this.arbiter = cl.io.arbiter; 111 111 this.ext_sq.empty = true; 112 (this.ext_sq. blocked){};112 (this.ext_sq.queue){}; 113 113 __io_uring_setup( this, cl.io.params, proc->idle ); 114 114 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this); … … 329 329 //============================================================================================= 330 330 void ?{}( $io_arbiter & this ) { 331 this.pending.flag = false; 332 } 333 334 void ^?{}( $io_arbiter & mutex this ) { 335 // /* paranoid */ verify( empty(this.assigned) ); 336 // /* paranoid */ verify( empty(this.available) ); 337 /* paranoid */ verify( is_empty(this.pending.blocked) ); 338 } 331 this.pending.empty = true; 332 } 333 334 void ^?{}( $io_arbiter & this ) {} 339 335 340 336 $io_arbiter * create(void) { -
libcfa/src/concurrency/io/types.hfa
rf9c3100 r1f55a75 22 22 23 23 #include "bits/locks.hfa" 24 #include "bits/queue.hfa" 24 25 #include "kernel/fwd.hfa" 25 26 … … 95 96 }; 96 97 98 struct __outstanding_io { 99 inline Colable; 100 single_sem sem; 101 }; 102 static inline __outstanding_io *& Next( __outstanding_io * n ) { return (__outstanding_io *)Next( (Colable *)n ); } 103 104 struct __outstanding_io_queue { 105 __spinlock_t lock; 106 Queue(__outstanding_io) queue; 107 volatile bool empty; 108 }; 109 110 struct __external_io { 111 inline __outstanding_io; 112 __u32 * idxs; 113 __u32 have; 114 bool lazy; 115 }; 116 117 97 118 struct __attribute__((aligned(128))) $io_context { 98 119 $io_arbiter * arbiter; 99 120 processor * proc; 100 121 101 struct { 102 volatile bool empty; 103 condition blocked; 104 } ext_sq; 122 __outstanding_io_queue ext_sq; 105 123 106 124 struct __sub_ring_t sq; … … 110 128 }; 111 129 112 monitor __attribute__((aligned(128))) $io_arbiter { 113 struct { 114 condition blocked; 115 $io_context * ctx; 116 volatile bool flag; 117 } pending; 130 struct __pending_alloc { 131 inline __outstanding_io; 132 __u32 * idxs; 133 __u32 want; 134 $io_context * ctx; 135 }; 136 137 struct __attribute__((aligned(128))) $io_arbiter { 138 __outstanding_io_queue pending; 118 139 }; 119 140 -
libcfa/src/concurrency/kernel.cfa
rf9c3100 r1f55a75 149 149 #endif 150 150 151 // if we need to run some special setup, now is the time to do it.152 if(this->init.fnc) {153 this->init.fnc(this->init.arg);154 }155 156 151 { 157 152 // Setup preemption data … … 162 157 #endif 163 158 159 // if we need to run some special setup, now is the time to do it. 160 if(this->init.thrd) { 161 this->init.thrd->curr_cluster = this->cltr; 162 __run_thread(this, this->init.thrd); 163 } 164 164 165 165 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); -
libcfa/src/concurrency/kernel.hfa
rf9c3100 r1f55a75 112 112 // it is not a particularly safe scheme as it can make processors less homogeneous 113 113 struct { 114 void (*fnc) (void *); 115 void * arg; 114 $thread * thrd; 116 115 } init; 117 116 … … 127 126 }; 128 127 129 void ?{}(processor & this, const char name[], struct cluster & cltr , void (*init) (void *), void * arg);128 void ?{}(processor & this, const char name[], struct cluster & cltr); 130 129 void ^?{}(processor & this); 131 130 132 static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster , 0p, 0p}; }133 static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr , 0p, 0p}; }134 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster , 0p, 0p}; }131 static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } 132 static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } 133 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster}; } 135 134 136 135 DLISTED_MGD_IMPL_OUT(processor) -
libcfa/src/concurrency/kernel/startup.cfa
rf9c3100 r1f55a75 73 73 static void __kernel_first_resume( processor * this ); 74 74 static void __kernel_last_resume ( processor * this ); 75 static void init(processor & this, const char name[], cluster & _cltr, void (*fnc) (void *), void * arg);75 static void init(processor & this, const char name[], cluster & _cltr, $thread * initT); 76 76 static void deinit(processor & this); 77 77 static void doregister( struct cluster & cltr ); … … 198 198 ( this.terminated ){}; 199 199 ( this.runner ){}; 200 init( this, "Main Processor", *mainCluster, 0p , 0p);200 init( this, "Main Processor", *mainCluster, 0p ); 201 201 kernel_thread = pthread_self(); 202 202 … … 452 452 } 453 453 454 static void init(processor & this, const char name[], cluster & _cltr, void (*fnc) (void *), void * arg) with( this ) {454 static void init(processor & this, const char name[], cluster & _cltr, $thread * initT) with( this ) { 455 455 this.name = name; 456 456 this.cltr = &_cltr; … … 464 464 this.io.dirty = false; 465 465 466 this.init.fnc = fnc; 467 this.init.arg = arg; 466 this.init.thrd = initT; 468 467 469 468 this.idle = eventfd(0, 0); … … 516 515 } 517 516 518 void ?{}(processor & this, const char name[], cluster & _cltr, void (*fnc) (void *), void * arg) {517 void ?{}(processor & this, const char name[], cluster & _cltr, $thread * initT) { 519 518 ( this.terminated ){}; 520 519 ( this.runner ){}; 521 520 522 521 disable_interrupts(); 523 init( this, name, _cltr, fnc, arg);522 init( this, name, _cltr, initT ); 524 523 enable_interrupts( __cfaabi_dbg_ctx ); 525 524 … … 527 526 528 527 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 529 528 } 529 530 void ?{}(processor & this, const char name[], cluster & _cltr) { 531 (this){name, _cltr, 0p}; 530 532 } 531 533 -
libcfa/src/concurrency/locks.hfa
rf9c3100 r1f55a75 32 32 static inline void ^?{}( single_acquisition_lock & this ) {} 33 33 static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); } 34 static inline void try_lock ( single_acquisition_lock & this ) {try_lock( (blocking_lock &)this ); }34 static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } 35 35 static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); } 36 36 static inline void on_wait ( single_acquisition_lock & this ) { on_wait ( (blocking_lock &)this ); } … … 47 47 static inline void ^?{}( owner_lock & this ) {} 48 48 static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); } 49 static inline void try_lock ( owner_lock & this ) {try_lock( (blocking_lock &)this ); }49 static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } 50 50 static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); } 51 51 static inline void on_wait ( owner_lock & this ) { on_wait ( (blocking_lock &)this ); } -
libcfa/src/concurrency/preemption.cfa
rf9c3100 r1f55a75 15 15 16 16 #define __cforall_thread__ 17 // #define __CFA_DEBUG_PRINT_PREEMPTION__ 17 18 18 19 #include "preemption.hfa" … … 28 29 #include "kernel_private.hfa" 29 30 31 30 32 #if !defined(__CFA_DEFAULT_PREEMPTION__) 31 33 #define __CFA_DEFAULT_PREEMPTION__ 10`ms 32 34 #endif 33 35 34 Duration default_preemption() __attribute__((weak)) { 35 return __CFA_DEFAULT_PREEMPTION__; 36 __attribute__((weak)) Duration default_preemption() { 37 const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); 38 if(!preempt_rate_s) { 39 __cfadbg_print_safe(preemption, "No CFA_DEFAULT_PREEMPTION in ENV\n"); 40 return __CFA_DEFAULT_PREEMPTION__; 41 } 42 43 char * endptr = 0p; 44 long int preempt_rate_l = strtol(preempt_rate_s, &endptr, 10); 45 if(preempt_rate_l < 0 || preempt_rate_l > 65535) { 46 __cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION out of range : %ld\n", preempt_rate_l); 47 return __CFA_DEFAULT_PREEMPTION__; 48 } 49 if('\0' != *endptr) { 50 __cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION not a decimal number : %s\n", preempt_rate_s); 51 return __CFA_DEFAULT_PREEMPTION__; 52 } 53 54 return preempt_rate_l`ms; 36 55 } 37 56 … … 125 144 // If there are still alarms pending, reset the timer 126 145 if( & (*alarms)`first ) { 127 __cfadbg_print_buffer_decl(preemption, " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);128 146 Duration delta = (*alarms)`first.alarm - currtime; 129 147 Duration capped = max(delta, 50`us); 130 // itimerval tim = { caped };131 // __cfaabi_dbg_print_buffer_local( " Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec);132 133 148 __kernel_set_timer( capped ); 134 149 }
Note:
See TracChangeset
for help on using the changeset viewer.