Changeset 4069faad
- Timestamp:
- May 1, 2020, 12:37:30 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- d45ed83
- Parents:
- 9987d79
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
benchmark/io/readv.cfa
r9987d79 r4069faad 30 30 unsigned long int buflen = 50; 31 31 32 cluster * the_cluster; 33 32 34 thread Reader {}; 35 void ?{}( Reader & this ) { 36 ((thread&)this){ "Reader Thread", *the_cluster }; 37 } 38 39 struct my_processor { 40 processor p; 41 }; 42 43 void ?{}( my_processor & this ) { 44 (this.p){ "I/O Processor", *the_cluster }; 45 } 46 33 47 void main( Reader & ) { 34 48 while(!__atomic_load_n(&run, __ATOMIC_RELAXED)) yield(); … … 38 52 39 53 while(__atomic_load_n(&run, __ATOMIC_RELAXED)) { 40 cfa_preadv2(fd, &iov, 1, 0, 0); 54 int r = cfa_preadv2(fd, &iov, 1, 0, 0); 55 if(r < 0) abort(strerror(-r)); 56 41 57 __atomic_fetch_add( &count, 1, __ATOMIC_SEQ_CST ); 42 58 } … … 44 60 45 61 int main(int argc, char * argv[]) { 46 #if !defined(__CFA_NO_STATISTICS__)47 print_stats_at_exit( *active_cluster() );48 #endif49 50 62 double duration = 5.0; 51 63 unsigned long int nthreads = 2; … … 117 129 } 118 130 119 intfd = open(__FILE__, 0);131 fd = open(__FILE__, 0); 120 132 if(fd < 0) { 121 133 fprintf(stderr, "Could not open source file\n"); … … 125 137 printf("Running %lu threads over %lu processors for %lf seconds\n", nthreads, nprocs, duration); 126 138 127 Time start, end;128 139 { 129 processor procs[nprocs - 1]; 140 Time start, end; 141 cluster cl = { "IO Cluster" }; 142 the_cluster = &cl; 143 #if !defined(__CFA_NO_STATISTICS__) 144 print_stats_at_exit( cl ); 145 #endif 130 146 { 131 Reader threads[nthreads]; 147 my_processor procs[nprocs]; 148 { 149 Reader threads[nthreads]; 132 150 133 printf("Starting\n"); 134 start = getTime(); 135 run = true; 136 do { 137 sleep(500`ms); 151 printf("Starting\n"); 152 start = getTime(); 153 run = true; 154 do { 155 sleep(500`ms); 156 end = getTime(); 157 } while( (end - start) < duration`s ); 158 run = false; 138 159 end = getTime(); 139 } while( (end - start) < duration`s ); 140 run = false; 141 end = getTime(); 160 printf("Done\n"); 161 } 142 162 } 163 printf("Took %ld ms\n", (end - start)`ms); 164 printf("Total reads: %'zu\n", count); 165 printf("Reads per second: %'lf\n", ((double)count) / (end - start)`s); 143 166 } 144 printf("Took %ld ms\n", (end - start)`ms);145 printf("Total reads: %'zu\n", count);146 printf("Reads per second: %'lf\n", ((double)count) / (end - start)`s);147 167 148 168 close(fd); 149 printf("Done\n");150 169 } -
libcfa/prelude/defines.hfa.in
r9987d79 r4069faad 16 16 #undef HAVE_LINUX_IO_URING_H 17 17 18 #define __CFA_IO_POLLING_USER__18 // #define __CFA_IO_POLLING_USER__ 19 19 // #define __CFA_IO_POLLING_KERNEL__ -
libcfa/src/bits/locks.hfa
r9987d79 r4069faad 113 113 114 114 struct __bin_sem_t { 115 bool signaled;116 115 pthread_mutex_t lock; 117 116 pthread_cond_t cond; 117 int val; 118 118 }; 119 119 120 120 static inline void ?{}(__bin_sem_t & this) with( this ) { 121 signaled = false;122 121 pthread_mutex_init(&lock, NULL); 123 122 pthread_cond_init (&cond, NULL); 123 val = 0; 124 124 } 125 125 … … 132 132 verify(__cfaabi_dbg_in_kernel()); 133 133 pthread_mutex_lock(&lock); 134 if(!signaled) { // this must be a loop, not if!134 while(val < 1) { 135 135 pthread_cond_wait(&cond, &lock); 136 136 } 137 signaled = false;137 val -= 1; 138 138 pthread_mutex_unlock(&lock); 139 139 } 140 140 141 141 static inline bool post(__bin_sem_t & this) with( this ) { 142 bool needs_signal = false; 143 142 144 pthread_mutex_lock(&lock); 143 bool needs_signal = !signaled; 144 signaled = true; 145 if(val < 1) { 146 val += 1; 147 pthread_cond_signal(&cond); 148 needs_signal = true; 149 } 145 150 pthread_mutex_unlock(&lock); 146 147 if (needs_signal) pthread_cond_signal(&cond);148 151 149 152 return needs_signal; -
libcfa/src/concurrency/io.cfa
r9987d79 r4069faad 14 14 // 15 15 16 // #define __CFA_DEBUG_PRINT_IO__ 17 16 18 #include "kernel.hfa" 17 19 … … 210 212 void __kernel_io_finish_start( cluster & this ) { 211 213 #if defined(__CFA_IO_POLLING_USER__) 212 (this.io.poller.fast){ this }; 214 __cfadbg_print_safe(io, "Kernel I/O : Creating fast poller for cluter %p\n", &this); 215 (this.io.poller.fast){ "Fast IO Poller", this }; 213 216 __thrd_start( this.io.poller.fast, main ); 214 217 #endif 215 218 216 219 // Create the poller thread 220 __cfadbg_print_safe(io, "Kernel I/O : Creating slow poller for cluter %p\n", &this); 217 221 this.io.poller.slow.stack = __create_pthread( &this.io.poller.slow.kthrd, __io_poller_slow, &this ); 218 222 } 219 223 220 224 void __kernel_io_prepare_stop( cluster & this ) { 225 __cfadbg_print_safe(io, "Kernel I/O : Stopping pollers for cluster\n", &this); 221 226 // Notify the poller thread of the shutdown 222 227 __atomic_store_n(&this.io.done, true, __ATOMIC_SEQ_CST); … … 233 238 free( this.io.poller.slow.stack ); 234 239 240 __cfadbg_print_safe(io, "Kernel I/O : Slow poller stopped for cluster\n", &this); 241 235 242 #if defined(__CFA_IO_POLLING_USER__) 236 243 // unpark the fast io_poller … … 238 245 239 246 ^(this.io.poller.fast){}; 247 248 __cfadbg_print_safe(io, "Kernel I/O : Fast poller stopped for cluster\n", &this); 240 249 #endif 241 250 } … … 324 333 325 334 struct io_user_data * data = (struct io_user_data *)cqe.user_data; 326 // __cfaabi_bits_print_safe( STDERR_FILENO, "Performed reading io cqe %p, result %d for %p\n", data, cqe.res, data->thrd );335 __cfadbg_print_safe( io, "Kernel I/O : Performed reading io cqe %p, result %d for %p\n", data, cqe.res, data->thrd ); 327 336 328 337 data->result = cqe.res; … … 369 378 int count = __drain_io( ring, &mask, 1, true ); 370 379 if(count > 0) { 380 __cfadbg_print_safe(io, "Kernel I/O : Moving to ring %p to fast poller\n", &ring); 371 381 __unpark( &ring.poller.fast.thrd __cfaabi_dbg_ctx2 ); 372 382 wait( ring.poller.sem ); … … 398 408 else { 399 409 // We didn't get anything baton pass to the slow poller 410 __cfadbg_print_safe(io, "Kernel I/O : Moving to ring %p to slow poller\n", &this.ring); 400 411 post( this.ring->poller.sem ); 401 412 park( __cfaabi_dbg_ctx ); … … 464 475 // Submit however, many entries need to be submitted 465 476 int ret = syscall( __NR_io_uring_enter, ring.fd, 1, 0, 0, 0p, 0); 466 // __cfaabi_bits_print_safe( STDERR_FILENO, "Performed io_submit, returned %d\n", ret );467 477 if( ret < 0 ) { 468 478 switch((int)errno) { … … 481 491 // Make sure that idx was submitted 482 492 // Be careful to not get false positive if we cycled the entire list or that someone else submitted for us 493 __cfadbg_print_safe( io, "Kernel I/O : Performed io_submit for %p, returned %d\n", active_thread(), ret ); 483 494 } 484 495 -
libcfa/src/concurrency/kernel.cfa
r9987d79 r4069faad 15 15 16 16 #define __cforall_thread__ 17 // #define __CFA_DEBUG_PRINT_RUNTIME_CORE__ 17 18 18 19 //C Includes … … 40 41 #include "invoke.h" 41 42 43 42 44 //----------------------------------------------------------------------------- 43 45 // Some assembly required … … 230 232 idle{}; 231 233 232 __cfa abi_dbg_print_safe("Kernel : Starting core %p\n", &this);234 __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this); 233 235 234 236 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 235 237 236 __cfa abi_dbg_print_safe("Kernel : core %p started\n", &this);238 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 237 239 } 238 240 239 241 void ^?{}(processor & this) with( this ){ 240 242 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { 241 __cfa abi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);243 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); 242 244 243 245 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); … … 289 291 verify(this); 290 292 291 __cfa abi_dbg_print_safe("Kernel : core %p starting\n", this);293 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 292 294 293 295 doregister(this->cltr, this); … … 297 299 preemption_scope scope = { this }; 298 300 299 __cfa abi_dbg_print_safe("Kernel : core %p started\n", this);301 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); 300 302 301 303 $thread * readyThread = 0p; … … 323 325 } 324 326 325 __cfa abi_dbg_print_safe("Kernel : core %p stopping\n", this);327 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 326 328 } 327 329 … … 330 332 V( this->terminated ); 331 333 332 __cfa abi_dbg_print_safe("Kernel : core %p terminated\n", this);334 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this); 333 335 334 336 // HACK : the coroutine context switch expects this_thread to be set … … 475 477 476 478 //We now have a proper context from which to schedule threads 477 __cfa abi_dbg_print_safe("Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);479 __cfadbg_print_safe(runtime_core, "Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx); 478 480 479 481 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't … … 486 488 487 489 // Main routine of the core returned, the core is now fully terminated 488 __cfa abi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner);490 __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner); 489 491 490 492 return 0p; … … 717 719 static void __kernel_startup(void) { 718 720 verify( ! kernelTLS.preemption_state.enabled ); 719 __cfa abi_dbg_print_safe("Kernel : Starting\n");721 __cfadbg_print_safe(runtime_core, "Kernel : Starting\n"); 720 722 721 723 __page_size = sysconf( _SC_PAGESIZE ); … … 728 730 (*mainCluster){"Main Cluster"}; 729 731 730 __cfa abi_dbg_print_safe("Kernel : Main cluster ready\n");732 __cfadbg_print_safe(runtime_core, "Kernel : Main cluster ready\n"); 731 733 732 734 // Start by initializing the main thread … … 738 740 (*mainThread){ &info }; 739 741 740 __cfa abi_dbg_print_safe("Kernel : Main thread ready\n");742 __cfadbg_print_safe(runtime_core, "Kernel : Main thread ready\n"); 741 743 742 744 … … 759 761 760 762 runner{ &this }; 761 __cfa abi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);763 __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner); 762 764 } 763 765 … … 834 836 ^(__cfa_dbg_global_clusters.lock){}; 835 837 836 __cfa abi_dbg_print_safe("Kernel : Shutdown complete\n");838 __cfadbg_print_safe(runtime_core, "Kernel : Shutdown complete\n"); 837 839 } 838 840 … … 859 861 860 862 // We are ready to sleep 861 __cfa abi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);863 __cfadbg_print_safe(runtime_core, "Kernel : Processor %p ready to sleep\n", this); 862 864 wait( idle ); 863 865 864 866 // We have woken up 865 __cfa abi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);867 __cfadbg_print_safe(runtime_core, "Kernel : Processor %p woke up and ready to run\n", this); 866 868 867 869 // Get ourself off the idle list … … 879 881 static bool __wake_one(cluster * this, __attribute__((unused)) bool force) { 880 882 // if we don't want to force check if we know it's false 881 if( !this->idles.head && !force ) return false;883 // if( !this->idles.head && !force ) return false; 882 884 883 885 // First, lock the cluster idle … … 892 894 893 895 // Wake them up 896 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this->idles.head); 894 897 post( this->idles.head->idle ); 895 898 … … 901 904 // Unconditionnaly wake a thread 902 905 static bool __wake_proc(processor * this) { 906 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this); 903 907 return post( this->idle ); 904 908 }
Note: See TracChangeset
for help on using the changeset viewer.