Changeset 7770cc8 for libcfa/src
- Timestamp:
- Nov 24, 2021, 9:47:56 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
- Children:
- 5235d49
- Parents:
- 94647b0b (diff), 3cc1111 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 18 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r94647b0b r7770cc8 133 133 } 134 134 135 void __cfa_io_flush( processor * proc) {135 bool __cfa_io_flush( processor * proc, bool wait ) { 136 136 /* paranoid */ verify( ! __preemption_enabled() ); 137 137 /* paranoid */ verify( proc ); … … 141 141 $io_context & ctx = *proc->io.ctx; 142 142 143 // for(i; 2) {144 // unsigned idx = proc->rdq.id + i;145 // cltr->ready_queue.lanes.tscs[idx].tv = -1ull;146 // }147 148 143 __ioarbiter_flush( ctx ); 149 144 150 145 __STATS__( true, io.calls.flush++; ) 151 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, 0, 0, (sigset_t *)0p, _NSIG / 8);146 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, wait ? 1 : 0, 0, (sigset_t *)0p, _NSIG / 8); 152 147 if( ret < 0 ) { 153 148 switch((int)errno) { … … 157 152 // Update statistics 158 153 __STATS__( false, io.calls.errors.busy ++; ) 159 // for(i; 2) { 160 // unsigned idx = proc->rdq.id + i; 161 // cltr->ready_queue.lanes.tscs[idx].tv = rdtscl(); 162 // } 163 return; 154 return false; 164 155 default: 165 156 abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) ); … … 182 173 183 174 ctx.proc->io.pending = false; 184 185 175 ready_schedule_lock(); 186 __cfa_io_drain( proc );176 bool ret = __cfa_io_drain( proc ); 187 177 ready_schedule_unlock(); 188 // for(i; 2) { 189 // unsigned idx = proc->rdq.id + i; 190 // cltr->ready_queue.lanes.tscs[idx].tv = rdtscl(); 191 // } 178 return ret; 192 179 } 193 180 … … 293 280 } 294 281 295 296 282 //============================================================================================= 297 283 // submission … … 311 297 // Make the sqes visible to the submitter 312 298 __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE); 313 sq.to_submit ++;299 sq.to_submit += have; 314 300 315 301 ctx->proc->io.pending = true; 316 302 ctx->proc->io.dirty = true; 317 303 if(sq.to_submit > 30 || !lazy) { 318 __cfa_io_flush( ctx->proc );304 __cfa_io_flush( ctx->proc, false ); 319 305 } 320 306 } … … 515 501 } 516 502 } 503 504 #if defined(IO_URING_IDLE) 505 bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd) { 506 $io_context * ctx = proc->io.ctx; 507 /* paranoid */ verify( ! __preemption_enabled() ); 508 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 509 /* paranoid */ verify( ctx ); 510 511 __u32 idx; 512 struct io_uring_sqe * sqe; 513 514 // We can proceed to the fast path 515 if( !__alloc(ctx, &idx, 1) ) return false; 516 517 // Allocation was successful 518 __fill( &sqe, 1, &idx, ctx ); 519 520 sqe->opcode = IORING_OP_READ; 521 sqe->user_data = (uintptr_t)&future; 522 sqe->flags = 0; 523 sqe->ioprio = 0; 524 sqe->fd = 0; 525 sqe->off = 0; 526 sqe->fsync_flags = 0; 527 sqe->__pad2[0] = 0; 528 sqe->__pad2[1] = 0; 529 sqe->__pad2[2] = 0; 530 sqe->addr = (uintptr_t)buf; 531 sqe->len = sizeof(uint64_t); 532 533 asm volatile("": : :"memory"); 534 535 /* paranoid */ verify( sqe->user_data == (uintptr_t)&future ); 536 __submit( ctx, &idx, 1, true ); 537 538 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 539 /* paranoid */ verify( ! __preemption_enabled() ); 540 } 541 #endif 517 542 #endif -
libcfa/src/concurrency/io/setup.cfa
r94647b0b r7770cc8 32 32 33 33 void __cfa_io_start( processor * proc ) {} 34 void __cfa_io_flush( processor * proc) {}34 bool __cfa_io_flush( processor * proc, bool ) {} 35 35 void __cfa_io_stop ( processor * proc ) {} 36 36 … … 111 111 this.ext_sq.empty = true; 112 112 (this.ext_sq.queue){}; 113 __io_uring_setup( this, cl.io.params, proc->idle );113 __io_uring_setup( this, cl.io.params, proc->idle_fd ); 114 114 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this); 115 115 } … … 220 220 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes); 221 221 222 // Step 4 : eventfd 223 // io_uring_register is so f*cking slow on some machine that it 224 // will never succeed if preemption isn't hard blocked 225 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 226 227 __disable_interrupts_hard(); 228 229 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 230 if (ret < 0) { 231 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 232 } 233 234 __enable_interrupts_hard(); 235 236 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); 222 #if !defined(IO_URING_IDLE) 223 // Step 4 : eventfd 224 // io_uring_register is so f*cking slow on some machine that it 225 // will never succeed if preemption isn't hard blocked 226 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 227 228 __disable_interrupts_hard(); 229 230 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 231 if (ret < 0) { 232 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 233 } 234 235 __enable_interrupts_hard(); 236 237 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); 238 #endif 237 239 238 240 // some paranoid checks -
libcfa/src/concurrency/io/types.hfa
r94647b0b r7770cc8 185 185 186 186 // Wait for the future to be fulfilled 187 bool wait ( io_future_t & this ) {188 return wait(this.self);189 }187 bool wait ( io_future_t & this ) { return wait (this.self); } 188 void reset ( io_future_t & this ) { return reset (this.self); } 189 bool available( io_future_t & this ) { return available(this.self); } 190 190 } -
libcfa/src/concurrency/kernel.cfa
r94647b0b r7770cc8 34 34 #include "strstream.hfa" 35 35 #include "device/cpu.hfa" 36 #include "io/types.hfa" 36 37 37 38 //Private includes … … 124 125 static void __wake_one(cluster * cltr); 125 126 126 static void mark_idle (__cluster_proc_list & idles, processor & proc); 127 static void idle_sleep(processor * proc, io_future_t & future, char buf[]); 128 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 127 129 static void mark_awake(__cluster_proc_list & idles, processor & proc); 128 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );129 130 130 131 extern void __cfa_io_start( processor * ); 131 132 extern bool __cfa_io_drain( processor * ); 132 extern void __cfa_io_flush( processor *);133 extern bool __cfa_io_flush( processor *, bool wait ); 133 134 extern void __cfa_io_stop ( processor * ); 134 135 static inline bool __maybe_io_drain( processor * ); 136 137 #if defined(IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H) 138 extern bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd); 139 #endif 135 140 136 141 extern void __disable_interrupts_hard(); … … 148 153 /* paranoid */ verify( __preemption_enabled() ); 149 154 } 155 150 156 151 157 //============================================================================================= … … 163 169 verify(this); 164 170 171 io_future_t future; // used for idle sleep when io_uring is present 172 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 173 char buf[sizeof(uint64_t)]; 174 165 175 __cfa_io_start( this ); 166 176 … … 196 206 197 207 if( !readyThread ) { 198 __cfa_io_flush( this ); 208 __cfa_io_flush( this, false ); 209 199 210 readyThread = __next_thread_slow( this->cltr ); 200 211 } … … 210 221 211 222 // Push self to idle stack 212 mark_idle(this->cltr->procs, * this);223 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; 213 224 214 225 // Confirm the ready-queue is empty … … 226 237 } 227 238 228 #if !defined(__CFA_NO_STATISTICS__) 229 if(this->print_halts) { 230 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 239 idle_sleep( this, future, buf ); 240 241 // We were woken up, remove self from idle 242 mark_awake(this->cltr->procs, * this); 243 244 // DON'T just proceed, start looking again 245 continue MAIN_LOOP; 246 } 247 248 /* paranoid */ verify( readyThread ); 249 250 // Reset io dirty bit 251 this->io.dirty = false; 252 253 // We found a thread run it 254 __run_thread(this, readyThread); 255 256 // Are we done? 257 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 258 259 if(this->io.pending && !this->io.dirty) { 260 __cfa_io_flush( this, false ); 261 } 262 263 #else 264 #warning new kernel loop 265 SEARCH: { 266 /* paranoid */ verify( ! __preemption_enabled() ); 267 268 // First, lock the scheduler since we are searching for a thread 269 ready_schedule_lock(); 270 271 // Try to get the next thread 272 readyThread = pop_fast( this->cltr ); 273 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 274 275 // If we can't find a thread, might as well flush any outstanding I/O 276 if(this->io.pending) { __cfa_io_flush( this, false ); } 277 278 // Spin a little on I/O, just in case 279 for(5) { 280 __maybe_io_drain( this ); 281 readyThread = pop_fast( this->cltr ); 282 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 283 } 284 285 // no luck, try stealing a few times 286 for(5) { 287 if( __maybe_io_drain( this ) ) { 288 readyThread = pop_fast( this->cltr ); 289 } else { 290 readyThread = pop_slow( this->cltr ); 231 291 } 232 #endif 233 234 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 292 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 293 } 294 295 // still no luck, search for a thread 296 readyThread = pop_search( this->cltr ); 297 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 298 299 // Don't block if we are done 300 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) { 301 ready_schedule_unlock(); 302 break MAIN_LOOP; 303 } 304 305 __STATS( __tls_stats()->ready.sleep.halts++; ) 306 307 // Push self to idle stack 308 ready_schedule_unlock(); 309 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH; 310 ready_schedule_lock(); 311 312 // Confirm the ready-queue is empty 313 __maybe_io_drain( this ); 314 readyThread = pop_search( this->cltr ); 315 ready_schedule_unlock(); 316 317 if( readyThread ) { 318 // A thread was found, cancel the halt 319 mark_awake(this->cltr->procs, * this); 320 321 __STATS( __tls_stats()->ready.sleep.cancels++; ) 322 323 // continue the main loop 324 break SEARCH; 325 } 326 327 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) 328 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 235 329 236 330 { 237 331 eventfd_t val; 238 ssize_t ret = read( this->idle , &val, sizeof(val) );332 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 239 333 if(ret < 0) { 240 334 switch((int)errno) { … … 252 346 } 253 347 254 #if !defined(__CFA_NO_STATISTICS__) 255 if(this->print_halts) { 256 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 257 } 258 #endif 348 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) 259 349 260 350 // We were woken up, remove self from idle … … 265 355 } 266 356 267 /* paranoid */ verify( readyThread );268 269 // Reset io dirty bit270 this->io.dirty = false;271 272 // We found a thread run it273 __run_thread(this, readyThread);274 275 // Are we done?276 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;277 278 if(this->io.pending && !this->io.dirty) {279 __cfa_io_flush( this );280 }281 282 #else283 #warning new kernel loop284 SEARCH: {285 /* paranoid */ verify( ! __preemption_enabled() );286 287 // First, lock the scheduler since we are searching for a thread288 ready_schedule_lock();289 290 // Try to get the next thread291 readyThread = pop_fast( this->cltr );292 if(readyThread) { ready_schedule_unlock(); break SEARCH; }293 294 // If we can't find a thread, might as well flush any outstanding I/O295 if(this->io.pending) { __cfa_io_flush( this ); }296 297 // Spin a little on I/O, just in case298 for(5) {299 __maybe_io_drain( this );300 readyThread = pop_fast( this->cltr );301 if(readyThread) { ready_schedule_unlock(); break SEARCH; }302 }303 304 // no luck, try stealing a few times305 for(5) {306 if( __maybe_io_drain( this ) ) {307 readyThread = pop_fast( this->cltr );308 } else {309 readyThread = pop_slow( this->cltr );310 }311 if(readyThread) { ready_schedule_unlock(); break SEARCH; }312 }313 314 // still no luck, search for a thread315 readyThread = pop_search( this->cltr );316 if(readyThread) { ready_schedule_unlock(); break SEARCH; }317 318 // Don't block if we are done319 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;320 321 __STATS( __tls_stats()->ready.sleep.halts++; )322 323 // Push self to idle stack324 ready_schedule_unlock();325 mark_idle(this->cltr->procs, * this);326 ready_schedule_lock();327 328 // Confirm the ready-queue is empty329 __maybe_io_drain( this );330 readyThread = pop_search( this->cltr );331 ready_schedule_unlock();332 333 if( readyThread ) {334 // A thread was found, cancel the halt335 mark_awake(this->cltr->procs, * this);336 337 __STATS( __tls_stats()->ready.sleep.cancels++; )338 339 // continue the main loop340 break SEARCH;341 }342 343 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )344 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);345 346 {347 eventfd_t val;348 ssize_t ret = read( this->idle, &val, sizeof(val) );349 if(ret < 0) {350 switch((int)errno) {351 case EAGAIN:352 #if EAGAIN != EWOULDBLOCK353 case EWOULDBLOCK:354 #endif355 case EINTR:356 // No need to do anything special here, just assume it's a legitimate wake-up357 break;358 default:359 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );360 }361 }362 }363 364 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )365 366 // We were woken up, remove self from idle367 mark_awake(this->cltr->procs, * this);368 369 // DON'T just proceed, start looking again370 continue MAIN_LOOP;371 }372 373 357 RUN_THREAD: 374 358 /* paranoid */ verify( ! __preemption_enabled() ); … … 385 369 386 370 if(this->io.pending && !this->io.dirty) { 387 __cfa_io_flush( this );371 __cfa_io_flush( this, false ); 388 372 } 389 373 … … 758 742 759 743 // Check if there is a sleeping processor 760 processor * p; 761 unsigned idle; 762 unsigned total; 763 [idle, total, p] = query_idles(this->procs); 744 int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST); 764 745 765 746 // If no one is sleeping, we are done 766 if( idle== 0 ) return;747 if( fd == 0 ) return; 767 748 768 749 // We found a processor, wake it up 769 750 eventfd_t val; 770 751 val = 1; 771 eventfd_write( p->idle, val );752 eventfd_write( fd, val ); 772 753 773 754 #if !defined(__CFA_NO_STATISTICS__) … … 794 775 eventfd_t val; 795 776 val = 1; 796 eventfd_write( this->idle , val );777 eventfd_write( this->idle_fd, val ); 797 778 __enable_interrupts_checked(); 798 779 } 799 780 800 static void mark_idle(__cluster_proc_list & this, processor & proc) { 801 /* paranoid */ verify( ! __preemption_enabled() ); 802 lock( this ); 781 static void idle_sleep(processor * this, io_future_t & future, char buf[]) { 782 #if !defined(IO_URING_IDLE) || !defined(CFA_HAVE_LINUX_IO_URING_H) 783 #if !defined(__CFA_NO_STATISTICS__) 784 if(this->print_halts) { 785 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 786 } 787 #endif 788 789 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 790 791 { 792 eventfd_t val; 793 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 794 if(ret < 0) { 795 switch((int)errno) { 796 case EAGAIN: 797 #if EAGAIN != EWOULDBLOCK 798 case EWOULDBLOCK: 799 #endif 800 case EINTR: 801 // No need to do anything special here, just assume it's a legitimate wake-up 802 break; 803 default: 804 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 805 } 806 } 807 } 808 809 #if !defined(__CFA_NO_STATISTICS__) 810 if(this->print_halts) { 811 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 812 } 813 #endif 814 #else 815 #if !defined(CFA_HAVE_IORING_OP_READ) 816 #error this is only implemented if the read is present 817 #endif 818 // Do we already have a pending read 819 if(available(future)) { 820 // There is no pending read, we need to add one 821 reset(future); 822 823 __kernel_read(this, future, buf, this->idle_fd ); 824 } 825 826 __cfa_io_flush( this, true ); 827 #endif 828 } 829 830 static bool mark_idle(__cluster_proc_list & this, processor & proc) { 831 /* paranoid */ verify( ! __preemption_enabled() ); 832 if(!try_lock( this )) return false; 803 833 this.idle++; 804 834 /* paranoid */ verify( this.idle <= this.total ); 805 835 remove(proc); 806 836 insert_first(this.idles, proc); 837 838 __atomic_store_n(&this.fd, proc.idle_fd, __ATOMIC_SEQ_CST); 807 839 unlock( this ); 808 840 /* paranoid */ verify( ! __preemption_enabled() ); 841 842 return true; 809 843 } 810 844 … … 816 850 remove(proc); 817 851 insert_last(this.actives, proc); 852 853 { 854 int fd = 0; 855 if(!this.idles`isEmpty) fd = this.idles`first.idle_fd; 856 __atomic_store_n(&this.fd, fd, __ATOMIC_SEQ_CST); 857 } 858 818 859 unlock( this ); 819 /* paranoid */ verify( ! __preemption_enabled() );820 }821 822 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {823 /* paranoid */ verify( ! __preemption_enabled() );824 /* paranoid */ verify( ready_schedule_islocked() );825 826 for() {827 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);828 if( 1 == (l % 2) ) { Pause(); continue; }829 unsigned idle = this.idle;830 unsigned total = this.total;831 processor * proc = &this.idles`first;832 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it833 asm volatile("": : :"memory");834 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }835 return [idle, total, proc];836 }837 838 /* paranoid */ verify( ready_schedule_islocked() );839 860 /* paranoid */ verify( ! __preemption_enabled() ); 840 861 } … … 898 919 if(head == tail) return false; 899 920 #if OLD_MAIN 900 ready_schedule_lock();901 ret = __cfa_io_drain( proc );902 ready_schedule_unlock();921 ready_schedule_lock(); 922 ret = __cfa_io_drain( proc ); 923 ready_schedule_unlock(); 903 924 #else 904 925 ret = __cfa_io_drain( proc ); 905 #endif926 #endif 906 927 #endif 907 928 return ret; … … 939 960 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 940 961 /* paranoid */ verify( it->local_data->this_stats ); 962 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it ); 941 963 __tally_stats( cltr->stats, it->local_data->this_stats ); 942 964 it = &(*it)`next; … … 948 970 // this doesn't solve all problems but does solve many 949 971 // so it's probably good enough 972 disable_interrupts(); 950 973 uint_fast32_t last_size = ready_mutate_lock(); 951 974 … … 955 978 // Unlock the RWlock 956 979 ready_mutate_unlock( last_size ); 980 enable_interrupts(); 957 981 } 958 982 -
libcfa/src/concurrency/kernel.hfa
r94647b0b r7770cc8 100 100 101 101 // Idle lock (kernel semaphore) 102 int idle ;102 int idle_fd; 103 103 104 104 // Termination synchronisation (user semaphore) … … 195 195 struct __cluster_proc_list { 196 196 // Spin lock protecting the queue 197 volatile uint64_t lock; 197 __spinlock_t lock; 198 199 // FD to use to wake a processor 200 volatile int fd; 198 201 199 202 // Total number of processors -
libcfa/src/concurrency/kernel/startup.cfa
r94647b0b r7770cc8 100 100 // Other Forward Declarations 101 101 extern void __wake_proc(processor *); 102 extern int cfa_main_returned; // from interpose.cfa 102 103 103 104 //----------------------------------------------------------------------------- … … 268 269 269 270 static void __kernel_shutdown(void) { 271 if(!cfa_main_returned) return; 270 272 /* paranoid */ verify( __preemption_enabled() ); 271 273 disable_interrupts(); … … 525 527 this.local_data = 0p; 526 528 527 this.idle = eventfd(0, 0);528 if (idle < 0) {529 this.idle_fd = eventfd(0, 0); 530 if (idle_fd < 0) { 529 531 abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno)); 530 532 } … … 540 542 // Not a ctor, it just preps the destruction but should not destroy members 541 543 static void deinit(processor & this) { 542 close(this.idle );544 close(this.idle_fd); 543 545 } 544 546 … … 582 584 // Cluster 583 585 static void ?{}(__cluster_proc_list & this) { 584 this. lock= 0;586 this.fd = 0; 585 587 this.idle = 0; 586 588 this.total = 0; -
libcfa/src/concurrency/kernel_private.hfa
r94647b0b r7770cc8 39 39 } 40 40 41 // #define IO_URING_IDLE 42 41 43 //----------------------------------------------------------------------------- 42 44 // Scheduler … … 149 151 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE); 150 152 } 151 152 153 154 155 153 156 154 //----------------------------------------------------------------------- … … 268 266 ready_schedule_lock(); 269 267 270 // Simple counting lock, acquired, acquired by incrementing the counter 271 // to an odd number 272 for() { 273 uint64_t l = this.lock; 274 if( 275 (0 == (l % 2)) 276 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) 277 ) return; 278 Pause(); 279 } 280 281 /* paranoid */ verify( ! __preemption_enabled() ); 268 lock( this.lock __cfaabi_dbg_ctx2 ); 269 270 /* paranoid */ verify( ! __preemption_enabled() ); 271 } 272 273 static inline bool try_lock(__cluster_proc_list & this) { 274 /* paranoid */ verify( ! __preemption_enabled() ); 275 276 // Start by locking the global RWlock so that we know no-one is 277 // adding/removing processors while we mess with the idle lock 278 ready_schedule_lock(); 279 280 if(try_lock( this.lock __cfaabi_dbg_ctx2 )) { 281 // success 282 /* paranoid */ verify( ! __preemption_enabled() ); 283 return true; 284 } 285 286 // failed to lock 287 ready_schedule_unlock(); 288 289 /* paranoid */ verify( ! __preemption_enabled() ); 290 return false; 282 291 } 283 292 … … 285 294 /* paranoid */ verify( ! __preemption_enabled() ); 286 295 287 /* paranoid */ verify( 1 == (this.lock % 2) ); 288 // Simple couting lock, release by incrementing to an even number 289 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); 296 unlock(this.lock); 290 297 291 298 // Release the global lock, which we acquired when locking -
libcfa/src/concurrency/monitor.hfa
r94647b0b r7770cc8 65 65 free( th ); 66 66 } 67 68 static inline forall( T & | sized(T) | { void ^?{}( T & mutex ); } ) 69 void adelete( T arr[] ) { 70 if ( arr ) { // ignore null 71 size_t dim = malloc_size( arr ) / sizeof( T ); 72 for ( int i = dim - 1; i >= 0; i -= 1 ) { // reverse allocation order, must be unsigned 73 ^(arr[i]){}; // run destructor 74 } // for 75 free( arr ); 76 } // if 77 } // adelete 67 78 68 79 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/mutex_stmt.hfa
r94647b0b r7770cc8 1 1 #include "bits/algorithm.hfa" 2 #include <assert.h> 3 #include "invoke.h" 4 #include "stdlib.hfa" 5 #include <stdio.h> 2 #include "bits/defs.hfa" 6 3 7 4 //----------------------------------------------------------------------------- -
libcfa/src/device/cpu.cfa
r94647b0b r7770cc8 144 144 // Count number of cpus in the system 145 145 static int count_cpus(void) { 146 const char * fpath = "/sys/devices/system/cpu/ present";146 const char * fpath = "/sys/devices/system/cpu/online"; 147 147 int fd = open(fpath, 0, O_RDONLY); 148 148 /* paranoid */ verifyf(fd >= 0, "Could not open file %s", fpath); -
libcfa/src/fstream.cfa
r94647b0b r7770cc8 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Sep 21 21:51:38202113 // Update Count : 46012 // Last Modified On : Sun Oct 10 11:23:05 2021 13 // Update Count : 512 14 14 // 15 15 … … 28 28 #define IO_MSG "I/O error: " 29 29 30 void ?{}( ofstream & os, void * file ) with(os) { 30 // private 31 void ?{}( ofstream & os, void * file ) with( os ) { 31 32 file$ = file; 32 33 sepDefault$ = true; … … 35 36 prt$ = false; 36 37 sawNL$ = false; 37 acquired$ = false;38 38 sepSetCur$( os, sepGet( os ) ); 39 39 sepSet( os, " " ); … … 41 41 } // ?{} 42 42 43 // private 44 bool sepPrt$( ofstream & os ) { setNL$( os, false ); return os.sepOnOff$; } 45 void sepReset$( ofstream & os ) { os.sepOnOff$ = os.sepDefault$; } 46 void sepReset$( ofstream & os, bool reset ) { os.sepDefault$ = reset; os.sepOnOff$ = os.sepDefault$; } 47 const char * sepGetCur$( ofstream & os ) { return os.sepCur$; } 48 void sepSetCur$( ofstream & os, const char sepCur[] ) { os.sepCur$ = sepCur; } 49 bool getNL$( ofstream & os ) { return os.sawNL$; } 50 void setNL$( ofstream & os, bool state ) { os.sawNL$ = state; } 51 bool getANL$( ofstream & os ) { return os.nlOnOff$; } 52 bool getPrt$( ofstream & os ) { return os.prt$; } 53 void setPrt$( ofstream & os, bool state ) { os.prt$ = state; } 43 inline bool sepPrt$( ofstream & os ) { setNL$( os, false ); return os.sepOnOff$; } 44 inline void sepReset$( ofstream & os ) { os.sepOnOff$ = os.sepDefault$; } 45 inline void sepReset$( ofstream & os, bool reset ) { os.sepDefault$ = reset; os.sepOnOff$ = os.sepDefault$; } 46 inline const char * sepGetCur$( ofstream & os ) { return os.sepCur$; } 47 inline void sepSetCur$( ofstream & os, const char sepCur[] ) { os.sepCur$ = sepCur; } 48 inline bool getNL$( ofstream & os ) { return os.sawNL$; } 49 inline void setNL$( ofstream & os, bool state ) { os.sawNL$ = state; } 50 inline bool getANL$( ofstream & os ) { return os.nlOnOff$; } 51 inline bool getPrt$( ofstream & os ) { return os.prt$; } 52 inline void setPrt$( ofstream & os, bool state ) { os.prt$ = state; } 53 54 inline void lock( ofstream & os ) with( os ) { lock( os.lock$ ); } 55 inline void unlock( ofstream & os ) { unlock( os.lock$ ); } 54 56 55 57 // public 56 58 void ?{}( ofstream & os ) { os.file$ = 0p; } 57 58 void ?{}( ofstream & os, const char name[], const char mode[] ) { 59 open( os, name, mode ); 60 } // ?{} 61 62 void ?{}( ofstream & os, const char name[] ) { 63 open( os, name, "w" ); 64 } // ?{} 65 66 void ^?{}( ofstream & os ) { 67 close( os ); 68 } // ^?{} 59 void ?{}( ofstream & os, const char name[], const char mode[] ) { open( os, name, mode ); } 60 void ?{}( ofstream & os, const char name[] ) { open( os, name, "w" ); } 61 void ^?{}( ofstream & os ) { close( os ); } 69 62 70 63 void sepOn( ofstream & os ) { os.sepOnOff$ = ! getNL$( os ); } … … 107 100 if ( &os == &exit ) exit( EXIT_FAILURE ); 108 101 if ( &os == &abort ) abort(); 109 if ( os.acquired$ ) { os.acquired$ = false; release( os ); }110 102 } // ends 111 103 112 bool fail( ofstream & os ) { 113 return os.file$ == 0 || ferror( (FILE *)(os.file$) ); 114 } // fail 115 116 void clear( ofstream & os ) { 117 clearerr( (FILE *)(os.file$) ); 118 } // clear 119 120 int flush( ofstream & os ) { 121 return fflush( (FILE *)(os.file$) ); 122 } // flush 104 bool fail( ofstream & os ) { return os.file$ == 0 || ferror( (FILE *)(os.file$) ); } 105 void clear( ofstream & os ) { clearerr( (FILE *)(os.file$) ); } 106 int flush( ofstream & os ) { return fflush( (FILE *)(os.file$) ); } 123 107 124 108 void open( ofstream & os, const char name[], const char mode[] ) { 125 FILE * file = fopen( name, mode ); 109 FILE * file; 110 for ( cnt; 10 ) { 111 errno = 0; 112 file = fopen( name, mode ); 113 if ( file != 0p || errno != EINTR ) break; // timer interrupt ? 114 if ( cnt == 9 ) abort( "ofstream open EINTR spinning exceeded" ); 115 } // for 126 116 if ( file == 0p ) { 127 117 throw (Open_Failure){ os }; … … 131 121 } // open 132 122 133 void open( ofstream & os, const char name[] ) { 134 open( os, name, "w" ); 135 } // open 136 137 void close( ofstream & os ) with(os) { 123 void open( ofstream & os, const char name[] ) { open( os, name, "w" ); } 124 125 void close( ofstream & os ) with( os ) { 138 126 if ( (FILE *)(file$) == 0p ) return; 139 127 if ( (FILE *)(file$) == (FILE *)stdout || (FILE *)(file$) == (FILE *)stderr ) return; 140 128 141 if ( fclose( (FILE *)(file$) ) == EOF ) { 129 int ret; 130 for ( cnt; 10 ) { 131 errno = 0; 132 ret = fclose( (FILE *)(file$) ); 133 if ( ret != EOF || errno != EINTR ) break; // timer interrupt ? 134 if ( cnt == 9 ) abort( "ofstream open EINTR spinning exceeded" ); 135 } // for 136 if ( ret == EOF ) { 142 137 throw (Close_Failure){ os }; 143 138 // abort | IO_MSG "close output" | nl | strerror( errno ); 144 139 } // if 145 file$ = 0p; 140 file$ = 0p; // safety after close 146 141 } // close 147 142 … … 162 157 va_list args; 163 158 va_start( args, format ); 164 int len = vfprintf( (FILE *)(os.file$), format, args ); 159 160 int len; 161 for ( cnt; 10 ) { 162 errno = 0; 163 len = vfprintf( (FILE *)(os.file$), format, args ); 164 if ( len != EOF || errno != EINTR ) break; // timer interrupt ? 165 if ( cnt == 9 ) abort( "ofstream fmt EINTR spinning exceeded" ); 166 } // for 165 167 if ( len == EOF ) { 166 168 if ( ferror( (FILE *)(os.file$) ) ) { … … 175 177 } // fmt 176 178 177 inline void acquire( ofstream & os ) with(os) {178 lock( lock$ ); // may increase recursive lock179 if ( ! acquired$ ) acquired$ = true; // not locked ?180 else unlock( lock$ ); // unwind recursive lock at start181 } // acquire182 183 inline void release( ofstream & os ) {184 unlock( os.lock$ );185 } // release186 187 void ?{}( osacquire & acq, ofstream & os ) { lock( os.lock$ ); &acq.os = &os; }188 void ^?{}( osacquire & acq ) { release( acq.os ); }189 190 179 static ofstream soutFile = { (FILE *)stdout }; 191 180 ofstream & sout = soutFile, & stdout = soutFile; … … 205 194 flush( os ); 206 195 return os; 207 // (ofstream &)(os | '\n');208 // setPrt$( os, false ); // turn off209 // setNL$( os, true );210 // flush( os );211 // return sepOff( os ); // prepare for next line212 196 } // nl 213 197 … … 217 201 218 202 // private 219 void ?{}( ifstream & is, void * file ) with( is) {203 void ?{}( ifstream & is, void * file ) with( is ) { 220 204 file$ = file; 221 205 nlOnOff$ = false; 222 acquired$ = false; 223 } // ?{} 206 } // ?{} 207 208 bool getANL$( ifstream & os ) { return os.nlOnOff$; } 209 210 inline void lock( ifstream & os ) with( os ) { lock( os.lock$ ); } 211 inline void unlock( ifstream & os ) { unlock( os.lock$ ); } 224 212 225 213 // public 226 214 void ?{}( ifstream & is ) { is.file$ = 0p; } 227 228 void ?{}( ifstream & is, const char name[], const char mode[] ) { 229 open( is, name, mode ); 230 } // ?{} 231 232 void ?{}( ifstream & is, const char name[] ) { 233 open( is, name, "r" ); 234 } // ?{} 235 236 void ^?{}( ifstream & is ) { 237 close( is ); 238 } // ^?{} 215 void ?{}( ifstream & is, const char name[], const char mode[] ) { open( is, name, mode ); } 216 void ?{}( ifstream & is, const char name[] ) { open( is, name, "r" ); } 217 void ^?{}( ifstream & is ) { close( is ); } 218 219 bool fail( ifstream & is ) { return is.file$ == 0p || ferror( (FILE *)(is.file$) ); } 220 void clear( ifstream & is ) { clearerr( (FILE *)(is.file$) ); } 239 221 240 222 void nlOn( ifstream & os ) { os.nlOnOff$ = true; } 241 223 void nlOff( ifstream & os ) { os.nlOnOff$ = false; } 242 bool getANL( ifstream & os ) { return os.nlOnOff$; } 243 244 bool fail( ifstream & is ) { 245 return is.file$ == 0p || ferror( (FILE *)(is.file$) ); 246 } // fail 247 248 void clear( ifstream & is ) { 249 clearerr( (FILE *)(is.file$) ); 250 } // clear 251 252 void ends( ifstream & is ) { 253 if ( is.acquired$ ) { is.acquired$ = false; release( is ); } 254 } // ends 255 256 bool eof( ifstream & is ) { 257 return feof( (FILE *)(is.file$) ); 258 } // eof 224 225 void ends( ifstream & is ) {} 226 227 bool eof( ifstream & is ) { return feof( (FILE *)(is.file$) ) != 0; } 259 228 260 229 void open( ifstream & is, const char name[], const char mode[] ) { 261 FILE * file = fopen( name, mode ); 230 FILE * file; 231 for ( cnt; 10 ) { 232 errno = 0; 233 file = fopen( name, mode ); 234 if ( file != 0p || errno != EINTR ) break; // timer interrupt ? 235 if ( cnt == 9 ) abort( "ifstream open EINTR spinning exceeded" ); 236 } // for 262 237 if ( file == 0p ) { 263 238 throw (Open_Failure){ is }; … … 267 242 } // open 268 243 269 void open( ifstream & is, const char name[] ) { 270 open( is, name, "r" ); 271 } // open 272 273 void close( ifstream & is ) with(is) { 244 void open( ifstream & is, const char name[] ) { open( is, name, "r" ); } 245 246 void close( ifstream & is ) with( is ) { 274 247 if ( (FILE *)(file$) == 0p ) return; 275 248 if ( (FILE *)(file$) == (FILE *)stdin ) return; 276 249 277 if ( fclose( (FILE *)(file$) ) == EOF ) { 250 int ret; 251 for ( cnt; 10 ) { 252 errno = 0; 253 ret = fclose( (FILE *)(file$) ); 254 if ( ret != EOF || errno != EINTR ) break; // timer interrupt ? 255 if ( cnt == 9 ) abort( "ifstream close EINTR spinning exceeded" ); 256 } // for 257 if ( ret == EOF ) { 278 258 throw (Close_Failure){ is }; 279 259 // abort | IO_MSG "close input" | nl | strerror( errno ); 280 260 } // if 281 file$ = 0p; 261 file$ = 0p; // safety after close 282 262 } // close 283 263 … … 308 288 int fmt( ifstream & is, const char format[], ... ) { 309 289 va_list args; 310 311 290 va_start( args, format ); 312 int len = vfscanf( (FILE *)(is.file$), format, args ); 291 292 int len; 293 for () { // no check for EINTR limit waiting for keyboard input 294 errno = 0; 295 len = vfscanf( (FILE *)(is.file$), format, args ); 296 if ( len != EOF || errno != EINTR ) break; // timer interrupt ? 297 } // for 313 298 if ( len == EOF ) { 314 299 if ( ferror( (FILE *)(is.file$) ) ) { … … 319 304 return len; 320 305 } // fmt 321 322 inline void acquire( ifstream & is ) with(is) {323 lock( lock$ ); // may increase recursive lock324 if ( ! acquired$ ) acquired$ = true; // not locked ?325 else unlock( lock$ ); // unwind recursive lock at start326 } // acquire327 328 inline void release( ifstream & is ) {329 unlock( is.lock$ );330 } // release331 332 void ?{}( isacquire & acq, ifstream & is ) { lock( is.lock$ ); &acq.is = &is; }333 void ^?{}( isacquire & acq ) { release( acq.is ); }334 306 335 307 static ifstream sinFile = { (FILE *)stdin }; -
libcfa/src/fstream.hfa
r94647b0b r7770cc8 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jul 28 07:35:50202113 // Update Count : 2 3412 // Last Modified On : Sun Oct 10 09:37:32 2021 13 // Update Count : 243 14 14 // 15 15 … … 36 36 char tupleSeparator$[ofstream_sepSize]; 37 37 multiple_acquisition_lock lock$; 38 bool acquired$;39 38 }; // ofstream 40 39 … … 52 51 bool getPrt$( ofstream & ); 53 52 void setPrt$( ofstream &, bool ); 53 54 void lock( ofstream & ); 55 void unlock( ofstream & ); 54 56 55 57 // public … … 75 77 void open( ofstream &, const char name[] ); 76 78 void close( ofstream & ); 79 77 80 ofstream & write( ofstream &, const char data[], size_t size ); 78 79 void acquire( ofstream & );80 void release( ofstream & );81 82 void lock( ofstream & );83 void unlock( ofstream & );84 85 struct osacquire {86 ofstream & os;87 };88 void ?{}( osacquire & acq, ofstream & );89 void ^?{}( osacquire & acq );90 81 91 82 void ?{}( ofstream & ); … … 110 101 bool nlOnOff$; 111 102 multiple_acquisition_lock lock$; 112 bool acquired$;113 103 }; // ifstream 114 104 115 105 // Satisfies istream 116 106 107 // private 108 bool getANL$( ifstream & ); 109 110 void lock( ifstream & ); 111 void unlock( ifstream & ); 112 117 113 // public 118 114 void nlOn( ifstream & ); 119 115 void nlOff( ifstream & ); 120 bool getANL( ifstream & );121 116 void ends( ifstream & ); 122 117 int fmt( ifstream &, const char format[], ... ) __attribute__(( format(scanf, 2, 3) )); … … 128 123 void open( ifstream & is, const char name[] ); 129 124 void close( ifstream & is ); 125 130 126 ifstream & read( ifstream & is, char data[], size_t size ); 131 127 ifstream & ungetc( ifstream & is, char c ); 132 133 void acquire( ifstream & is );134 void release( ifstream & is );135 136 struct isacquire {137 ifstream & is;138 };139 void ?{}( isacquire & acq, ifstream & is );140 void ^?{}( isacquire & acq );141 128 142 129 void ?{}( ifstream & is ); -
libcfa/src/heap.cfa
r94647b0b r7770cc8 102 102 } // prtUnfreed 103 103 104 extern int cfa_main_returned; // from bootloader.cf104 extern int cfa_main_returned; // from interpose.cfa 105 105 extern "C" { 106 106 void heapAppStart() { // called by __cfaabi_appready_startup -
libcfa/src/interpose.cfa
r94647b0b r7770cc8 94 94 } __cabi_libc; 95 95 96 int cfa_main_returned; 97 96 98 extern "C" { 97 99 void __cfaabi_interpose_startup( void ) { 98 100 const char *version = 0p; 101 cfa_main_returned = 0; 99 102 100 103 preload_libgcc(); -
libcfa/src/iostream.cfa
r94647b0b r7770cc8 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S at May 15 09:39:21202113 // Update Count : 134 212 // Last Modified On : Sun Oct 10 09:28:17 2021 13 // Update Count : 1345 14 14 // 15 15 … … 398 398 return os; 399 399 } // nlOff 400 } // distribution401 402 forall( ostype & | ostream( ostype ) ) {403 ostype & acquire( ostype & os ) {404 acquire( os ); // call void returning405 return os;406 } // acquire407 400 } // distribution 408 401 … … 829 822 fmt( is, "%c", &temp ); // must pass pointer through varg to fmt 830 823 // do not overwrite parameter with newline unless appropriate 831 if ( temp != '\n' || getANL ( is ) ) { c = temp; break; }824 if ( temp != '\n' || getANL$( is ) ) { c = temp; break; } 832 825 if ( eof( is ) ) break; 833 826 } // for … … 1035 1028 return is; 1036 1029 } // nlOff 1037 } // distribution1038 1039 forall( istype & | istream( istype ) ) {1040 istype & acquire( istype & is ) {1041 acquire( is ); // call void returning1042 return is;1043 } // acquire1044 1030 } // distribution 1045 1031 -
libcfa/src/iostream.hfa
r94647b0b r7770cc8 10 10 // Created On : Wed May 27 17:56:53 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Apr 28 20:37:56202113 // Update Count : 40 112 // Last Modified On : Sun Oct 10 10:02:07 2021 13 // Update Count : 407 14 14 // 15 15 … … 58 58 void close( ostype & ); 59 59 ostype & write( ostype &, const char [], size_t ); 60 void acquire( ostype & ); // concurrent access61 60 }; // ostream 62 61 … … 142 141 ostype & nlOn( ostype & ); 143 142 ostype & nlOff( ostype & ); 144 } // distribution145 146 forall( ostype & | ostream( ostype ) ) {147 ostype & acquire( ostype & );148 143 } // distribution 149 144 … … 296 291 297 292 trait basic_istream( istype & ) { 298 bool getANL( istype & ); // get scan newline (on/off) 293 // private 294 bool getANL$( istype & ); // get scan newline (on/off) 295 // public 299 296 void nlOn( istype & ); // read newline 300 297 void nlOff( istype & ); // scan newline 301 302 298 void ends( istype & os ); // end of output statement 303 299 int fmt( istype &, const char format[], ... ) __attribute__(( format(scanf, 2, 3) )); … … 312 308 void close( istype & is ); 313 309 istype & read( istype &, char [], size_t ); 314 void acquire( istype & ); // concurrent access315 310 }; // istream 316 311 … … 379 374 } // distribution 380 375 381 forall( istype & | istream( istype ) ) {382 istype & acquire( istype & );383 } // distribution384 385 376 // *********************************** manipulators *********************************** 386 377 -
libcfa/src/strstream.cfa
r94647b0b r7770cc8 10 10 // Created On : Thu Apr 22 22:24:35 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Apr 27 20:59:53202113 // Update Count : 7812 // Last Modified On : Sun Oct 10 16:13:20 2021 13 // Update Count : 101 14 14 // 15 15 16 16 #include "strstream.hfa" 17 #include "fstream.hfa" // abort 17 18 18 19 #include <stdio.h> // vsnprintf … … 30 31 31 32 // private 32 bool sepPrt$( ostrstream & os ) { setNL$( os, false ); return os.sepOnOff$; }33 void sepReset$( ostrstream & os ) { os.sepOnOff$ = os.sepDefault$; }34 void sepReset$( ostrstream & os, bool reset ) { os.sepDefault$ = reset; os.sepOnOff$ = os.sepDefault$; }35 const char * sepGetCur$( ostrstream & os ) { return os.sepCur$; }36 void sepSetCur$( ostrstream & os, const char sepCur[] ) { os.sepCur$ = sepCur; }37 bool getNL$( ostrstream & os ) { return os.sawNL$; }38 void setNL$( ostrstream & os, bool state ) { os.sawNL$ = state; }39 bool getANL$( ostrstream & os ) { return os.nlOnOff$; }40 bool getPrt$( ostrstream & os ) { return os.prt$; }41 void setPrt$( ostrstream & os, bool state ) { os.prt$ = state; }33 inline bool sepPrt$( ostrstream & os ) { setNL$( os, false ); return os.sepOnOff$; } 34 inline void sepReset$( ostrstream & os ) { os.sepOnOff$ = os.sepDefault$; } 35 inline void sepReset$( ostrstream & os, bool reset ) { os.sepDefault$ = reset; os.sepOnOff$ = os.sepDefault$; } 36 inline const char * sepGetCur$( ostrstream & os ) { return os.sepCur$; } 37 inline void sepSetCur$( ostrstream & os, const char sepCur[] ) { os.sepCur$ = sepCur; } 38 inline bool getNL$( ostrstream & os ) { return os.sawNL$; } 39 inline void setNL$( ostrstream & os, bool state ) { os.sawNL$ = state; } 40 inline bool getANL$( ostrstream & os ) { return os.nlOnOff$; } 41 inline bool getPrt$( ostrstream & os ) { return os.prt$; } 42 inline void setPrt$( ostrstream & os, bool state ) { os.prt$ = state; } 42 43 43 44 // public … … 128 129 // *********************************** istrstream *********************************** 129 130 131 // private 132 bool getANL$( istrstream & is ) { return is.nlOnOff$; } 130 133 131 134 // public … … 136 139 } // ?{} 137 140 138 bool getANL( istrstream & is ) { return is.nlOnOff$; }139 141 void nlOn( istrstream & is ) { is.nlOnOff$ = true; } 140 142 void nlOff( istrstream & is ) { is.nlOnOff$ = false; } 141 143 142 void ends( istrstream & is ) { 143 } // ends 144 void ends( istrstream & is ) {} 145 bool eof( istrstream & is ) { return false; } 144 146 145 int eof( istrstream & is ) { 146 return 0; 147 } // eof 147 int fmt( istrstream & is, const char format[], ... ) with(is) { 148 va_list args; 149 va_start( args, format ); 150 // THIS DOES NOT WORK BECAUSE VSSCANF RETURNS NUMBER OF VALUES READ VERSUS BUFFER POSITION SCANNED. 151 int len = vsscanf( buf$ + cursor$, format, args ); 152 va_end( args ); 153 if ( len == EOF ) { 154 abort | IO_MSG "invalid read"; 155 } // if 156 // SKULLDUGGERY: This hack skips over characters read by vsscanf by moving to the next whitespace but it does not 157 // handle C reads with wdi manipulators that leave the cursor at a non-whitespace character. 158 for ( ; buf$[cursor$] != ' ' && buf$[cursor$] != '\t' && buf$[cursor$] != '\0'; cursor$ += 1 ) { 159 //printf( "X \'%c\'\n", buf$[cursor$] ); 160 } // for 161 if ( buf$[cursor$] != '\0' ) cursor$ += 1; // advance to whitespace 162 return len; 163 } // fmt 148 164 149 165 istrstream &ungetc( istrstream & is, char c ) { … … 154 170 } // ungetc 155 171 156 int fmt( istrstream & is, const char format[], ... ) {157 va_list args;158 va_start( args, format );159 // This does not work because vsscanf does not return buffer position.160 int len = vsscanf( is.buf$ + is.cursor$, format, args );161 va_end( args );162 if ( len == EOF ) {163 int j;164 printf( "X %d%n\n", len, &j );165 } // if166 is.cursor$ += len;167 return len;168 } // fmt169 170 172 // Local Variables: // 171 173 // tab-width: 4 // -
libcfa/src/strstream.hfa
r94647b0b r7770cc8 10 10 // Created On : Thu Apr 22 22:20:59 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Apr 27 20:58:50202113 // Update Count : 4 112 // Last Modified On : Sun Oct 10 10:14:22 2021 13 // Update Count : 47 14 14 // 15 15 … … 85 85 // Satisfies basic_istream 86 86 87 // private 88 bool getANL$( istrstream & ); 89 87 90 // public 88 bool getANL( istrstream & );89 91 void nlOn( istrstream & ); 90 92 void nlOff( istrstream & ); 91 93 void ends( istrstream & ); 94 92 95 int fmt( istrstream &, const char format[], ... ) __attribute__(( format(scanf, 2, 3) )); 93 istrstream & ungetc( istrstream & is, char c);94 int eof( istrstream & is);96 istrstream & ungetc( istrstream &, char ); 97 bool eof( istrstream & ); 95 98 96 void ?{}( istrstream & is, char buf[] );99 void ?{}( istrstream &, char buf[] ); 97 100 98 101 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.