Changeset 26544f9 for libcfa/src
- Timestamp:
- Dec 15, 2022, 12:08:44 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- e716aec
- Parents:
- 1ab773e0
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r1ab773e0 r26544f9 85 85 static io_context$ * __ioarbiter_allocate( io_arbiter$ & this, __u32 idxs[], __u32 want ); 86 86 static void __ioarbiter_submit( io_context$ * , __u32 idxs[], __u32 have, bool lazy ); 87 static void __ioarbiter_flush ( io_context$ & );87 static void __ioarbiter_flush ( io_context$ &, bool kernel ); 88 88 static inline void __ioarbiter_notify( io_context$ & ctx ); 89 89 //============================================================================================= … … 94 94 extern void __kernel_unpark( thread$ * thrd, unpark_hint ); 95 95 96 static inline void __post(oneshot & this, bool kernel, unpark_hint hint) { 97 thread$ * t = post( this, false ); 98 if(kernel) __kernel_unpark( t, hint ); 99 else unpark( t, hint ); 100 } 101 102 // actual system call of io uring 103 // wrap so everything that needs to happen around it is always done 104 // i.e., stats, book keeping, sqe reclamation, etc. 96 105 static void ioring_syscsll( struct io_context$ & ctx, unsigned int min_comp, unsigned int flags ) { 97 106 __STATS__( true, io.calls.flush++; ) 98 107 int ret; 99 108 for() { 109 // do the system call in a loop, repeat on interrupts 100 110 ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8); 101 111 if( ret < 0 ) { … … 120 130 /* paranoid */ verify( ctx.sq.to_submit >= ret ); 121 131 122 ctx.sq.to_submit -= ret; 132 // keep track of how many still need submitting 133 __atomic_fetch_sub(&ctx.sq.to_submit, ret, __ATOMIC_SEQ_CST); 123 134 124 135 /* paranoid */ verify( ctx.sq.to_submit <= *ctx.sq.num ); … … 129 140 /* paranoid */ verify( ! __preemption_enabled() ); 130 141 142 // mark that there is no pending io left 131 143 __atomic_store_n(&ctx.proc->io.pending, false, __ATOMIC_RELAXED); 132 144 } 133 145 146 // try to acquire an io context for draining, helping means we never *need* to drain, we can always do it later 134 147 static bool try_acquire( io_context$ * ctx ) __attribute__((nonnull(1))) { 135 148 /* paranoid */ verify( ! __preemption_enabled() ); … … 138 151 139 152 { 153 // if there is nothing to drain there is no point in acquiring anything 140 154 const __u32 head = *ctx->cq.head; 141 155 const __u32 tail = *ctx->cq.tail; … … 144 158 } 145 159 146 // Drain the queue 147 if(!__atomic_try_acquire(&ctx->cq.lock)) { 160 // try a simple spinlock acquire, it's likely there are completions to drain 161 if(!__atomic_try_acquire(&ctx->cq.try_lock)) { 162 // some other processor already has it 148 163 __STATS__( false, io.calls.locked++; ) 149 164 return false; 150 165 } 151 166 167 // acquired!! 152 168 return true; 153 169 } 154 170 171 // actually drain the completion 155 172 static bool __cfa_do_drain( io_context$ * ctx, cluster * cltr ) __attribute__((nonnull(1, 2))) { 156 173 /* paranoid */ verify( ! __preemption_enabled() ); 157 174 /* paranoid */ verify( ready_schedule_islocked() ); 158 /* paranoid */ verify( ctx->cq.lock == true ); 159 175 /* paranoid */ verify( ctx->cq.try_lock == true ); 176 177 // get all the invariants and initial state 160 178 const __u32 mask = *ctx->cq.mask; 161 179 const __u32 num = *ctx->cq.num; … … 166 184 for() { 167 185 // re-read the head and tail in case it already changed. 186 // count the difference between the two 168 187 const __u32 head = *ctx->cq.head; 169 188 const __u32 tail = *ctx->cq.tail; … … 171 190 __STATS__( false, io.calls.drain++; io.calls.completed += count; ) 172 191 192 // for everything between head and tail, drain it 173 193 for(i; count) { 174 194 unsigned idx = (head + i) & mask; … … 177 197 /* paranoid */ verify(&cqe); 178 198 199 // find the future in the completion 179 200 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; 180 201 // __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); 181 202 203 // don't directly fulfill the future, preemption is disabled so we need to use kernel_unpark 182 204 __kernel_unpark( fulfil( *future, cqe.res, false ), UNPARK_LOCAL ); 183 205 } 184 206 207 // update the timestamps accordingly 208 // keep a local copy so we can update the relaxed copy 185 209 ts_next = ctx->cq.ts = rdtscl(); 186 210 … … 190 214 ctx->proc->idle_wctx.drain_time = ts_next; 191 215 216 // we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel. 192 217 if(likely(count < num)) break; 193 218 219 // the ring buffer was full, there could be more stuff in the kernel. 194 220 ioring_syscsll( *ctx, 0, IORING_ENTER_GETEVENTS); 195 221 } … … 199 225 /* paranoid */ verify( ! __preemption_enabled() ); 200 226 201 __atomic_unlock(&ctx->cq.lock); 202 227 // everything is drained, we can release the lock 228 __atomic_unlock(&ctx->cq.try_lock); 229 230 // update the relaxed timestamp 203 231 touch_tsc( cltr->sched.io.tscs, ctx->cq.id, ts_prev, ts_next, false ); 204 232 … … 206 234 } 207 235 236 // call from a processor to flush 237 // contains all the bookkeeping a proc must do, not just the barebones flushing logic 238 void __cfa_do_flush( io_context$ & ctx, bool kernel ) { 239 /* paranoid */ verify( ! __preemption_enabled() ); 240 241 // flush any external requests 242 ctx.sq.last_external = false; // clear the external bit, the arbiter will reset it if needed 243 __ioarbiter_flush( ctx, kernel ); 244 245 // if submitting must be submitted, do the system call 246 if(ctx.sq.to_submit != 0) { 247 ioring_syscsll(ctx, 0, 0); 248 } 249 } 250 251 // call from a processor to drain 252 // contains all the bookkeeping a proc must do, not just the barebones draining logic 208 253 bool __cfa_io_drain( struct processor * proc ) { 209 254 bool local = false; 210 255 bool remote = false; 211 256 257 // make sure no ones creates/destroys io contexts 212 258 ready_schedule_lock(); 213 259 … … 217 263 /* paranoid */ verify( ctx ); 218 264 265 // Help if needed 219 266 with(cltr->sched) { 220 267 const size_t ctxs_count = io.count; … … 230 277 const unsigned long long ctsc = rdtscl(); 231 278 279 // only help once every other time 280 // pick a target when not helping 232 281 if(proc->io.target == UINT_MAX) { 233 282 uint64_t chaos = __tls_rand(); 283 // choose who to help and whether to accept helping far processors 234 284 unsigned ext = chaos & 0xff; 235 285 unsigned other = (chaos >> 8) % (ctxs_count); 236 286 287 // if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it 237 288 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) { 238 289 proc->io.target = other; … … 240 291 } 241 292 else { 293 // a target was picked last time, help it 242 294 const unsigned target = proc->io.target; 243 295 /* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX ); 296 // make sure the target hasn't stopped existing since last time 244 297 HELP: if(target < ctxs_count) { 298 // calculate it's age and how young it could be before we give ip on helping 245 299 const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false); 246 300 const __readyQ_avg_t age = moving_average(ctsc, io.tscs[target].t.tv, io.tscs[target].t.ma, false); 247 301 __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no"); 302 // is the target older than the cutoff, recall 0 is oldest and bigger ints are younger 248 303 if(age <= cutoff) break HELP; 249 304 250 if(!try_acquire(io.data[target])) break HELP; 251 305 // attempt to help the submission side 306 __cfa_do_flush( *io.data[target], true ); 307 308 // attempt to help the completion side 309 if(!try_acquire(io.data[target])) break HELP; // already acquire no help needed 310 311 // actually help 252 312 if(!__cfa_do_drain( io.data[target], cltr )) break HELP; 253 313 314 // track we did help someone 254 315 remote = true; 255 316 __STATS__( true, io.calls.helped++; ) 256 317 } 318 319 // reset the target 257 320 proc->io.target = UINT_MAX; 258 321 } 259 322 } 260 261 323 262 324 // Drain the local queue … … 270 332 271 333 ready_schedule_unlock(); 334 335 // return true if some completion entry, local or remote, was drained 272 336 return local || remote; 273 337 } 274 338 339 340 341 // call from a processor to flush 342 // contains all the bookkeeping a proc must do, not just the barebones flushing logic 275 343 bool __cfa_io_flush( struct processor * proc ) { 276 344 /* paranoid */ verify( ! __preemption_enabled() ); … … 278 346 /* paranoid */ verify( proc->io.ctx ); 279 347 280 io_context$ & ctx = *proc->io.ctx; 281 282 __ioarbiter_flush( ctx ); 283 284 if(ctx.sq.to_submit != 0) { 285 ioring_syscsll(ctx, 0, 0); 286 287 } 288 348 __cfa_do_flush( *proc->io.ctx, false ); 349 350 // also drain since some stuff will immediately complete 289 351 return __cfa_io_drain( proc ); 290 352 } … … 393 455 //============================================================================================= 394 456 // submission 395 static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have) { 457 // barebones logic to submit a group of sqes 458 static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) { 459 if(!lock) 460 lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 ); 396 461 // We can proceed to the fast path 397 462 // Get the right objects … … 408 473 // Make the sqes visible to the submitter 409 474 __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE); 410 sq.to_submit += have; 411 475 __atomic_fetch_add(&sq.to_submit, have, __ATOMIC_SEQ_CST); 476 477 // set the bit to mark things need to be flushed 412 478 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_RELAXED); 413 479 __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED); 414 } 415 480 481 if(!lock) 482 unlock( ctx->ext_sq.lock ); 483 } 484 485 // submission logic + maybe flushing 416 486 static inline void __submit( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy) { 417 487 __sub_ring_t & sq = ctx->sq; 418 __submit_only(ctx, idxs, have );488 __submit_only(ctx, idxs, have, false); 419 489 420 490 if(sq.to_submit > 30) { … … 428 498 } 429 499 500 // call from a processor to flush 501 // might require arbitration if the thread was migrated after the allocation 430 502 void cfa_io_submit( struct io_context$ * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public { 431 503 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); … … 441 513 if( ctx == inctx ) // We have the right instance? 442 514 { 515 // yes! fast submit 443 516 __submit(ctx, idxs, have, lazy); 444 517 … … 507 580 __atomic_store_n(&ctx.sq.free_ring.tail, ftail + count, __ATOMIC_SEQ_CST); 508 581 582 // notify the allocator that new allocations can be made 509 583 __ioarbiter_notify(ctx); 510 584 … … 557 631 } 558 632 633 // notify the arbiter that new allocations are available 559 634 static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) { 560 635 /* paranoid */ verify( !empty(this.pending.queue) ); 561 636 /* paranoid */ verify( __preemption_enabled() ); 637 638 // mutual exclusion is needed 562 639 lock( this.pending.lock __cfaabi_dbg_ctx2 ); 563 640 { 641 __cfadbg_print_safe(io, "Kernel I/O : notifying\n"); 642 643 // as long as there are pending allocations try to satisfy them 644 // for simplicity do it in FIFO order 564 645 while( !empty(this.pending.queue) ) { 565 __cfadbg_print_safe(io, "Kernel I/O : notifying\n");646 // get first pending allocs 566 647 __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head; 567 648 __pending_alloc & pa = (__pending_alloc&)head( this.pending.queue ); 568 649 650 // check if we have enough to satisfy the request 569 651 if( have > pa.want ) goto DONE; 652 653 // if there are enough allocations it means we can drop the request 570 654 drop( this.pending.queue ); 571 655 572 656 /* paranoid */__attribute__((unused)) bool ret = 573 657 658 // actually do the alloc 574 659 __alloc(ctx, pa.idxs, pa.want); 575 660 576 661 /* paranoid */ verify( ret ); 577 662 663 // write out which context statisfied the request and post 664 // this 578 665 pa.ctx = ctx; 579 580 666 post( pa.waitctx ); 581 667 } … … 585 671 } 586 672 unlock( this.pending.lock ); 587 } 588 673 674 /* paranoid */ verify( __preemption_enabled() ); 675 } 676 677 // short hand to avoid the mutual exclusion of the pending is empty regardless 589 678 static void __ioarbiter_notify( io_context$ & ctx ) { 590 if(!empty( ctx.arbiter->pending )) { 591 __ioarbiter_notify( *ctx.arbiter, &ctx ); 592 } 593 } 594 595 // Simply append to the pending 679 if(empty( ctx.arbiter->pending )) return; 680 __ioarbiter_notify( *ctx.arbiter, &ctx ); 681 } 682 683 // Submit from outside the local processor: append to the outstanding list 596 684 static void __ioarbiter_submit( io_context$ * ctx, __u32 idxs[], __u32 have, bool lazy ) { 597 685 __cfadbg_print_safe(io, "Kernel I/O : submitting %u from the arbiter to context %u\n", have, ctx->fd); … … 599 687 __cfadbg_print_safe(io, "Kernel I/O : waiting to submit %u\n", have); 600 688 689 // create the intrusive object to append 601 690 __external_io ei; 602 691 ei.idxs = idxs; … … 604 693 ei.lazy = lazy; 605 694 695 // enqueue the io 606 696 bool we = enqueue(ctx->ext_sq, (__outstanding_io&)ei); 607 697 698 // mark pending 608 699 __atomic_store_n(&ctx->proc->io.pending, true, __ATOMIC_SEQ_CST); 609 700 701 // if this is the first to be enqueued, signal the processor in an attempt to speed up flushing 702 // if it's not the first enqueue, a signal is already in transit 610 703 if( we ) { 611 704 sigval_t value = { PREEMPT_IO }; 612 705 __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value); 613 } 614 706 __STATS__( false, io.flush.signal += 1; ) 707 } 708 __STATS__( false, io.submit.extr += 1; ) 709 710 // to avoid dynamic allocation/memory reclamation headaches, wait for it to have been submitted 615 711 wait( ei.waitctx ); 616 712 … … 618 714 } 619 715 620 static void __ioarbiter_flush( io_context$ & ctx ) { 621 if(!empty( ctx.ext_sq )) { 622 __STATS__( false, io.flush.external += 1; ) 623 624 __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n"); 625 626 lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 ); 627 { 628 while( !empty(ctx.ext_sq.queue) ) { 629 __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue ); 630 631 __submit_only(&ctx, ei.idxs, ei.have); 632 633 post( ei.waitctx ); 634 } 635 636 ctx.ext_sq.empty = true; 716 // flush the io arbiter: move all external io operations to the submission ring 717 static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) { 718 // if there are no external operations just return 719 if(empty( ctx.ext_sq )) return; 720 721 // stats and logs 722 __STATS__( false, io.flush.external += 1; ) 723 __cfadbg_print_safe(io, "Kernel I/O : arbiter flushing\n"); 724 725 // this can happen from multiple processors, mutual exclusion is needed 726 lock( ctx.ext_sq.lock __cfaabi_dbg_ctx2 ); 727 { 728 // pop each operation one at a time. 729 // There is no wait morphing because of the io sq ring 730 while( !empty(ctx.ext_sq.queue) ) { 731 // drop the element from the queue 732 __external_io & ei = (__external_io&)drop( ctx.ext_sq.queue ); 733 734 // submit it 735 __submit_only(&ctx, ei.idxs, ei.have, true); 736 737 // wake the thread that was waiting on it 738 // since this can both be called from kernel and user, check the flag before posting 739 __post( ei.waitctx, kernel, UNPARK_LOCAL ); 637 740 } 638 unlock(ctx.ext_sq.lock ); 741 742 // mark the queue as empty 743 ctx.ext_sq.empty = true; 744 ctx.sq.last_external = true; 745 } 746 unlock(ctx.ext_sq.lock ); 747 } 748 749 extern "C" { 750 // debug functions used for gdb 751 // io_uring doesn't yet support gdb soe the kernel-shared data structures aren't viewable in gdb 752 // these functions read the data that gdb can't and should be removed once the support is added 753 static __u32 __cfagdb_cq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.head; } 754 static __u32 __cfagdb_cq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.tail; } 755 static __u32 __cfagdb_cq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->cq.mask; } 756 static __u32 __cfagdb_sq_head( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.head; } 757 static __u32 __cfagdb_sq_tail( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.kring.tail; } 758 static __u32 __cfagdb_sq_mask( io_context$ * ctx ) __attribute__((nonnull(1),used,noinline)) { return *ctx->sq.mask; } 759 760 // fancier version that reads an sqe and copies it out. 761 static struct io_uring_sqe __cfagdb_sq_at( io_context$ * ctx, __u32 at ) __attribute__((nonnull(1),used,noinline)) { 762 __u32 ax = at & *ctx->sq.mask; 763 __u32 ix = ctx->sq.kring.array[ax]; 764 return ctx->sq.sqes[ix]; 639 765 } 640 766 } -
libcfa/src/concurrency/io/setup.cfa
r1ab773e0 r26544f9 216 216 217 217 // completion queue 218 cq. lock= false;218 cq.try_lock = false; 219 219 cq.id = MAX; 220 220 cq.ts = rdtscl(); -
libcfa/src/concurrency/io/types.hfa
r1ab773e0 r26544f9 37 37 //----------------------------------------------------------------------- 38 38 // Ring Data structure 39 struct __sub_ring_t { 39 // represent the io_uring submission ring which contains operations that will be sent to io_uring for processing 40 struct __sub_ring_t { 41 // lock needed because remote processors might need to flush the instance 42 __spinlock_t lock; 43 40 44 struct { 41 45 // Head and tail of the ring (associated with array) … … 58 62 59 63 // number of sqes to submit on next system call. 60 __u32 to_submit;64 volatile __u32 to_submit; 61 65 62 66 // number of entries and mask to go with it … … 77 81 void * ring_ptr; 78 82 size_t ring_sz; 79 }; 80 83 84 // for debug purposes, whether or not the last flush was due to a arbiter flush 85 bool last_external; 86 }; 87 88 // represent the io_uring completion ring which contains operations that have completed 81 89 struct __cmp_ring_t { 82 volatile bool lock; 83 90 // needed because remote processors can help drain the buffer 91 volatile bool try_lock; 92 93 // id of the ring, used for the helping/topology algorithms 84 94 unsigned id; 85 95 96 // timestamp from last time it was drained 86 97 unsigned long long ts; 87 98 … … 105 116 }; 106 117 118 // struct representing an io operation that still needs processing 119 // actual operations are expected to inherit from this 107 120 struct __outstanding_io { 121 // intrusive link fields 108 122 inline Colable; 123 124 // primitive on which to block until the io is processed 109 125 oneshot waitctx; 110 126 }; 111 127 static inline __outstanding_io *& Next( __outstanding_io * n ) { return (__outstanding_io *)Next( (Colable *)n ); } 112 128 129 // queue of operations that are outstanding 113 130 struct __outstanding_io_queue { 131 // spinlock for protection 132 // TODO: changing to a lock that blocks, I haven't examined whether it should be a kernel or user lock 114 133 __spinlock_t lock; 134 135 // the actual queue 115 136 Queue(__outstanding_io) queue; 137 138 // volatile used to avoid the need for taking the lock if it's empty 116 139 volatile bool empty; 117 140 }; 118 141 142 // struct representing an operation that was submitted 119 143 struct __external_io { 144 // inherits from outstanding io 120 145 inline __outstanding_io; 146 147 // pointer and count to an array of ids to be submitted 121 148 __u32 * idxs; 122 149 __u32 have; 150 151 // whether or not these can be accumulated before flushing the buffer 123 152 bool lazy; 124 153 }; 125 154 126 155 // complete io_context, contains all the data for io submission and completion 127 156 struct __attribute__((aligned(64))) io_context$ { 157 // arbiter, used in cases where threads for migrated at unfortunate moments 128 158 io_arbiter$ * arbiter; 159 160 // which prcessor the context is tied to 129 161 struct processor * proc; 130 162 163 // queue of io submissions that haven't beeen processed. 131 164 __outstanding_io_queue ext_sq; 132 165 166 // io_uring ring data structures 133 167 struct __sub_ring_t sq; 134 168 struct __cmp_ring_t cq; 169 170 // flag the io_uring rings where created with 135 171 __u32 ring_flags; 172 173 // file descriptor that identifies the io_uring instance 136 174 int fd; 137 175 }; 138 176 177 // short hand to check when the io_context was last processed (io drained) 139 178 static inline unsigned long long ts(io_context$ *& this) { 140 179 const __u32 head = *this->cq.head; 141 180 const __u32 tail = *this->cq.tail; 142 181 182 // if there is no pending completions, just pretend it's infinetely recent 143 183 if(head == tail) return ULLONG_MAX; 144 184 … … 146 186 } 147 187 188 // structure represeting allocations that couldn't succeed locally 148 189 struct __pending_alloc { 190 // inherit from outstanding io 149 191 inline __outstanding_io; 192 193 // array and size of the desired allocation 150 194 __u32 * idxs; 151 195 __u32 want; 196 197 // output param, the context the io was allocated from 152 198 io_context$ * ctx; 153 199 }; 154 200 201 // arbiter that handles cases where the context tied to the local processor is unable to satisfy the io 155 202 monitor __attribute__((aligned(64))) io_arbiter$ { 203 // contains a queue of io for pending allocations 156 204 __outstanding_io_queue pending; 157 205 }; -
libcfa/src/concurrency/kernel.cfa
r1ab773e0 r26544f9 258 258 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this); 259 259 } 260 261 __cfa_io_flush( this ); 262 __cfa_io_drain( this ); 260 263 261 264 post( this->terminated );
Note: See TracChangeset
for help on using the changeset viewer.