source: libcfa/src/concurrency/ready_queue.cfa@ b71b6df

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since b71b6df was 7a2972b9, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

ready queue can now toggle between

  • lock-based queue
  • mpsc_queue a.k.a. nemesis queue

slightly messy implementation, some clean up needed.

  • Property mode set to 100644
File size: 18.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_READY_QUEUE__
18
19// #define USE_SNZI
20// #define USE_MPSC
21
22#include "bits/defs.hfa"
23#include "kernel_private.hfa"
24
25#define _GNU_SOURCE
26#include "stdlib.hfa"
27#include "math.hfa"
28
29#include <unistd.h>
30
31#include "snzi.hfa"
32#include "ready_subqueue.hfa"
33
34static const size_t cache_line_size = 64;
35
36// No overriden function, no environment variable, no define
37// fall back to a magic number
38#ifndef __CFA_MAX_PROCESSORS__
39 #define __CFA_MAX_PROCESSORS__ 1024
40#endif
41
42#define BIAS 4
43
44// returns the maximum number of processors the RWLock support
45__attribute__((weak)) unsigned __max_processors() {
46 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
47 if(!max_cores_s) {
48 __cfadbg_print_nolock(ready_queue, "No CFA_MAX_PROCESSORS in ENV\n");
49 return __CFA_MAX_PROCESSORS__;
50 }
51
52 char * endptr = 0p;
53 long int max_cores_l = strtol(max_cores_s, &endptr, 10);
54 if(max_cores_l < 1 || max_cores_l > 65535) {
55 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS out of range : %ld\n", max_cores_l);
56 return __CFA_MAX_PROCESSORS__;
57 }
58 if('\0' != *endptr) {
59 __cfadbg_print_nolock(ready_queue, "CFA_MAX_PROCESSORS not a decimal number : %s\n", max_cores_s);
60 return __CFA_MAX_PROCESSORS__;
61 }
62
63 return max_cores_l;
64}
65
66//=======================================================================
67// Cluster wide reader-writer lock
68//=======================================================================
69void ?{}(__scheduler_RWLock_t & this) {
70 this.max = __max_processors();
71 this.alloc = 0;
72 this.ready = 0;
73 this.lock = false;
74 this.data = alloc(this.max);
75
76 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) );
77 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) );
78 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc));
79 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready));
80
81}
82void ^?{}(__scheduler_RWLock_t & this) {
83 free(this.data);
84}
85
86void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) {
87 this.handle = proc;
88 this.lock = false;
89 #ifdef __CFA_WITH_VERIFY__
90 this.owned = false;
91 #endif
92}
93
94//=======================================================================
95// Lock-Free registering/unregistering of threads
96unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
97 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc);
98
99 // Step - 1 : check if there is already space in the data
100 uint_fast32_t s = ready;
101
102 // Check among all the ready
103 for(uint_fast32_t i = 0; i < s; i++) {
104 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it
105 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null
106 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
107 /*paranoid*/ verify(i < ready);
108 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size));
109 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0);
110 return i;
111 }
112 }
113
114 if(max <= alloc) abort("Trying to create more than %ud processors", __scheduler_lock->max);
115
116 // Step - 2 : F&A to get a new spot in the array.
117 uint_fast32_t n = __atomic_fetch_add(&alloc, 1, __ATOMIC_SEQ_CST);
118 if(max <= n) abort("Trying to create more than %ud processors", __scheduler_lock->max);
119
120 // Step - 3 : Mark space as used and then publish it.
121 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n];
122 (*storage){ proc };
123 while() {
124 unsigned copy = n;
125 if( __atomic_load_n(&ready, __ATOMIC_RELAXED) == n
126 && __atomic_compare_exchange_n(&ready, &copy, n + 1, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
127 break;
128 Pause();
129 }
130
131 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p done, id %lu\n", proc, n);
132
133 // Return new spot.
134 /*paranoid*/ verify(n < ready);
135 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size));
136 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0);
137 return n;
138}
139
140void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) {
141 unsigned id = proc->id;
142 /*paranoid*/ verify(id < ready);
143 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED));
144 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE);
145
146 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc);
147}
148
149//-----------------------------------------------------------------------
150// Writer side : acquire when changing the ready queue, e.g. adding more
151// queues or removing them.
152uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) {
153 /* paranoid */ verify( ! __preemption_enabled() );
154
155 // Step 1 : lock global lock
156 // It is needed to avoid processors that register mid Critical-Section
157 // to simply lock their own lock and enter.
158 __atomic_acquire( &lock );
159
160 // Step 2 : lock per-proc lock
161 // Processors that are currently being registered aren't counted
162 // but can't be in read_lock or in the critical section.
163 // All other processors are counted
164 uint_fast32_t s = ready;
165 for(uint_fast32_t i = 0; i < s; i++) {
166 __atomic_acquire( &data[i].lock );
167 }
168
169 /* paranoid */ verify( ! __preemption_enabled() );
170 return s;
171}
172
173void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) {
174 /* paranoid */ verify( ! __preemption_enabled() );
175
176 // Step 1 : release local locks
177 // This must be done while the global lock is held to avoid
178 // threads that where created mid critical section
179 // to race to lock their local locks and have the writer
180 // immidiately unlock them
181 // Alternative solution : return s in write_lock and pass it to write_unlock
182 for(uint_fast32_t i = 0; i < last_s; i++) {
183 verify(data[i].lock);
184 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE);
185 }
186
187 // Step 2 : release global lock
188 /*paranoid*/ assert(true == lock);
189 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE);
190
191 /* paranoid */ verify( ! __preemption_enabled() );
192}
193
194//=======================================================================
195// Cforall Reqdy Queue used for scheduling
196//=======================================================================
197void ?{}(__ready_queue_t & this) with (this) {
198 lanes.data = 0p;
199 lanes.count = 0;
200}
201
202void ^?{}(__ready_queue_t & this) with (this) {
203 verify( 1 == lanes.count );
204 #ifdef USE_SNZI
205 verify( !query( snzi ) );
206 #endif
207 free(lanes.data);
208}
209
210//-----------------------------------------------------------------------
211__attribute__((hot)) bool query(struct cluster * cltr) {
212 #ifdef USE_SNZI
213 return query(cltr->ready_queue.snzi);
214 #endif
215 return true;
216}
217
218static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred) {
219 unsigned i;
220 bool local;
221 #if defined(BIAS)
222 unsigned rlow = r % BIAS;
223 unsigned rhigh = r / BIAS;
224 if((0 != rlow) && preferred >= 0) {
225 // (BIAS - 1) out of BIAS chances
226 // Use perferred queues
227 i = preferred + (rhigh % 4);
228 local = true;
229 }
230 else {
231 // 1 out of BIAS chances
232 // Use all queues
233 i = rhigh;
234 local = false;
235 }
236 #else
237 i = r;
238 local = false;
239 #endif
240 return [i, local];
241}
242
243//-----------------------------------------------------------------------
244__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
245 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
246
247 #if !defined(__CFA_NO_STATISTICS__)
248 const bool external = (!kernelTLS().this_proc_id->full_proc) || (cltr != kernelTLS().this_processor->cltr);
249 #endif
250
251 // write timestamp
252 thrd->link.ts = rdtscl();
253
254 bool first = false;
255 __attribute__((unused)) bool local;
256 __attribute__((unused)) int preferred;
257 #if defined(BIAS)
258 preferred =
259 //*
260 kernelTLS().this_processor ? kernelTLS().this_processor->cltr_id : -1;
261 /*/
262 thrd->link.preferred * 4;
263 //*/
264 #endif
265
266 // Try to pick a lane and lock it
267 unsigned i;
268 do {
269 // Pick the index of a lane
270 // unsigned r = __tls_rand();
271 unsigned r = __tls_rand_fwd();
272 [i, local] = idx_from_r(r, preferred);
273
274 i %= __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
275
276 #if !defined(__CFA_NO_STATISTICS__)
277 if(external) {
278 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.local, 1, __ATOMIC_RELAXED);
279 __atomic_fetch_add(&cltr->stats->ready.pick.ext.attempt, 1, __ATOMIC_RELAXED);
280 }
281 else {
282 if(local) __tls_stats()->ready.pick.push.local++;
283 __tls_stats()->ready.pick.push.attempt++;
284 }
285 #endif
286
287 #if defined(USE_MPSC)
288 // mpsc always succeeds
289 } while( false );
290 #else
291 // If we can't lock it retry
292 } while( !__atomic_try_acquire( &lanes.data[i].lock ) );
293 #endif
294
295 // Actually push it
296 #ifdef USE_SNZI
297 bool lane_first =
298 #endif
299
300 push(lanes.data[i], thrd);
301
302 #ifdef USE_SNZI
303 // If this lane used to be empty we need to do more
304 if(lane_first) {
305 // Check if the entire queue used to be empty
306 first = !query(snzi);
307
308 // Update the snzi
309 arrive( snzi, i );
310 }
311 #endif
312
313 #if !defined(USE_MPSC)
314 // Unlock and return
315 __atomic_unlock( &lanes.data[i].lock );
316 #endif
317
318 // Mark the current index in the tls rng instance as having an item
319 __tls_rand_advance_bck();
320
321 __cfadbg_print_safe(ready_queue, "Kernel : Pushed %p on cluster %p (idx: %u, mask %llu, first %d)\n", thrd, cltr, i, used.mask[0], lane_first);
322
323 // Update statistics
324 #if !defined(__CFA_NO_STATISTICS__)
325 if(external) {
326 if(local) __atomic_fetch_add(&cltr->stats->ready.pick.ext.lsuccess, 1, __ATOMIC_RELAXED);
327 __atomic_fetch_add(&cltr->stats->ready.pick.ext.success, 1, __ATOMIC_RELAXED);
328 }
329 else {
330 if(local) __tls_stats()->ready.pick.push.lsuccess++;
331 __tls_stats()->ready.pick.push.success++;
332 }
333 #endif
334
335 // return whether or not the list was empty before this push
336 return first;
337}
338
339static struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j);
340static struct $thread * try_pop(struct cluster * cltr, unsigned i);
341
342// Pop from the ready queue from a given cluster
343__attribute__((hot)) $thread * pop(struct cluster * cltr) with (cltr->ready_queue) {
344 /* paranoid */ verify( lanes.count > 0 );
345 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
346 int preferred;
347 #if defined(BIAS)
348 // Don't bother trying locally too much
349 preferred = kernelTLS().this_processor->cltr_id;
350 #endif
351
352
353 // As long as the list is not empty, try finding a lane that isn't empty and pop from it
354 #ifdef USE_SNZI
355 while( query(snzi) ) {
356 #else
357 for(25) {
358 #endif
359 // Pick two lists at random
360 // unsigned ri = __tls_rand();
361 // unsigned rj = __tls_rand();
362 unsigned ri = __tls_rand_bck();
363 unsigned rj = __tls_rand_bck();
364
365 unsigned i, j;
366 __attribute__((unused)) bool locali, localj;
367 [i, locali] = idx_from_r(ri, preferred);
368 [j, localj] = idx_from_r(rj, preferred);
369
370 #if !defined(__CFA_NO_STATISTICS__)
371 if(locali && localj) {
372 __tls_stats()->ready.pick.pop.local++;
373 }
374 #endif
375
376 i %= count;
377 j %= count;
378
379 // try popping from the 2 picked lists
380 struct $thread * thrd = try_pop(cltr, i, j);
381 if(thrd) {
382 #if defined(BIAS) && !defined(__CFA_NO_STATISTICS__)
383 if( locali || localj ) __tls_stats()->ready.pick.pop.lsuccess++;
384 #endif
385 return thrd;
386 }
387 }
388
389 // All lanes where empty return 0p
390 return 0p;
391}
392
393__attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
394 /* paranoid */ verify( lanes.count > 0 );
395 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
396 unsigned offset = __tls_rand();
397 for(i; count) {
398 unsigned idx = (offset + i) % count;
399 struct $thread * thrd = try_pop(cltr, idx);
400 if(thrd) {
401 return thrd;
402 }
403 }
404
405 // All lanes where empty return 0p
406 return 0p;
407}
408
409
410//-----------------------------------------------------------------------
411// Given 2 indexes, pick the list with the oldest push an try to pop from it
412static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j) with (cltr->ready_queue) {
413 #if !defined(__CFA_NO_STATISTICS__)
414 __tls_stats()->ready.pick.pop.attempt++;
415 #endif
416
417 // Pick the bet list
418 int w = i;
419 if( __builtin_expect(!is_empty(lanes.data[j]), true) ) {
420 w = (ts(lanes.data[i]) < ts(lanes.data[j])) ? i : j;
421 }
422
423 return try_pop(cltr, w);
424}
425
426static inline struct $thread * try_pop(struct cluster * cltr, unsigned w) with (cltr->ready_queue) {
427 // Get relevant elements locally
428 __intrusive_lane_t & lane = lanes.data[w];
429
430 // If list looks empty retry
431 if( is_empty(lane) ) return 0p;
432
433 // If we can't get the lock retry
434 if( !__atomic_try_acquire(&lane.lock) ) return 0p;
435
436
437 // If list is empty, unlock and retry
438 if( is_empty(lane) ) {
439 __atomic_unlock(&lane.lock);
440 return 0p;
441 }
442
443 // Actually pop the list
444 struct $thread * thrd;
445 thrd = pop(lane);
446
447 /* paranoid */ verify(thrd);
448 /* paranoid */ verify(lane.lock);
449
450 #ifdef USE_SNZI
451 // If this was the last element in the lane
452 if(emptied) {
453 depart( snzi, w );
454 }
455 #endif
456
457 // Unlock and return
458 __atomic_unlock(&lane.lock);
459
460 // Update statistics
461 #if !defined(__CFA_NO_STATISTICS__)
462 __tls_stats()->ready.pick.pop.success++;
463 #endif
464
465 // Update the thread bias
466 thrd->link.preferred = w / 4;
467
468 // return the popped thread
469 return thrd;
470}
471//-----------------------------------------------------------------------
472
473bool remove_head(struct cluster * cltr, struct $thread * thrd) with (cltr->ready_queue) {
474 for(i; lanes.count) {
475 __intrusive_lane_t & lane = lanes.data[i];
476
477 bool removed = false;
478
479 __atomic_acquire(&lane.lock);
480 if(head(lane)->link.next == thrd) {
481 $thread * pthrd;
482 pthrd = pop(lane);
483
484 /* paranoid */ verify( pthrd == thrd );
485
486 removed = true;
487 #ifdef USE_SNZI
488 if(emptied) {
489 depart( snzi, i );
490 }
491 #endif
492 }
493 __atomic_unlock(&lane.lock);
494
495 if( removed ) return true;
496 }
497 return false;
498}
499
500//-----------------------------------------------------------------------
501
502static void check( __ready_queue_t & q ) with (q) {
503 #if defined(__CFA_WITH_VERIFY__) && !defined(USE_MPSC)
504 {
505 for( idx ; lanes.count ) {
506 __intrusive_lane_t & sl = lanes.data[idx];
507 assert(!lanes.data[idx].lock);
508
509 assert(head(sl)->link.prev == 0p );
510 assert(head(sl)->link.next->link.prev == head(sl) );
511 assert(tail(sl)->link.next == 0p );
512 assert(tail(sl)->link.prev->link.next == tail(sl) );
513
514 if(is_empty(sl)) {
515 assert(tail(sl)->link.prev == head(sl));
516 assert(head(sl)->link.next == tail(sl));
517 } else {
518 assert(tail(sl)->link.prev != head(sl));
519 assert(head(sl)->link.next != tail(sl));
520 }
521 }
522 }
523 #endif
524}
525
526// Call this function of the intrusive list was moved using memcpy
527// fixes the list so that the pointers back to anchors aren't left dangling
528static inline void fix(__intrusive_lane_t & ll) {
529 #if !defined(USE_MPSC)
530 // if the list is not empty then follow he pointer and fix its reverse
531 if(!is_empty(ll)) {
532 head(ll)->link.next->link.prev = head(ll);
533 tail(ll)->link.prev->link.next = tail(ll);
534 }
535 // Otherwise just reset the list
536 else {
537 verify(tail(ll)->link.next == 0p);
538 tail(ll)->link.prev = head(ll);
539 head(ll)->link.next = tail(ll);
540 verify(head(ll)->link.prev == 0p);
541 }
542 #endif
543}
544
545// Grow the ready queue
546unsigned ready_queue_grow(struct cluster * cltr, int target) {
547 unsigned preferred;
548 size_t ncount;
549
550 /* paranoid */ verify( ready_mutate_islocked() );
551 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue\n");
552
553 // Make sure that everything is consistent
554 /* paranoid */ check( cltr->ready_queue );
555
556 // grow the ready queue
557 with( cltr->ready_queue ) {
558 #ifdef USE_SNZI
559 ^(snzi){};
560 #endif
561
562 // Find new count
563 // Make sure we always have atleast 1 list
564 if(target >= 2) {
565 ncount = target * 4;
566 preferred = ncount - 4;
567 } else {
568 ncount = 1;
569 preferred = 0;
570 }
571
572 // Allocate new array (uses realloc and memcpies the data)
573 lanes.data = alloc( ncount, lanes.data`realloc );
574
575 // Fix the moved data
576 for( idx; (size_t)lanes.count ) {
577 fix(lanes.data[idx]);
578 }
579
580 // Construct new data
581 for( idx; (size_t)lanes.count ~ ncount) {
582 (lanes.data[idx]){};
583 }
584
585 // Update original
586 lanes.count = ncount;
587
588 #ifdef USE_SNZI
589 // Re-create the snzi
590 snzi{ log2( lanes.count / 8 ) };
591 for( idx; (size_t)lanes.count ) {
592 if( !is_empty(lanes.data[idx]) ) {
593 arrive(snzi, idx);
594 }
595 }
596 #endif
597 }
598
599 // Make sure that everything is consistent
600 /* paranoid */ check( cltr->ready_queue );
601
602 __cfadbg_print_safe(ready_queue, "Kernel : Growing ready queue done\n");
603
604 /* paranoid */ verify( ready_mutate_islocked() );
605 return preferred;
606}
607
608// Shrink the ready queue
609void ready_queue_shrink(struct cluster * cltr, int target) {
610 /* paranoid */ verify( ready_mutate_islocked() );
611 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue\n");
612
613 // Make sure that everything is consistent
614 /* paranoid */ check( cltr->ready_queue );
615
616 with( cltr->ready_queue ) {
617 #ifdef USE_SNZI
618 ^(snzi){};
619 #endif
620
621 // Remember old count
622 size_t ocount = lanes.count;
623
624 // Find new count
625 // Make sure we always have atleast 1 list
626 lanes.count = target >= 2 ? target * 4: 1;
627 /* paranoid */ verify( ocount >= lanes.count );
628 /* paranoid */ verify( lanes.count == target * 4 || target < 2 );
629
630 // for printing count the number of displaced threads
631 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
632 __attribute__((unused)) size_t displaced = 0;
633 #endif
634
635 // redistribute old data
636 for( idx; (size_t)lanes.count ~ ocount) {
637 // Lock is not strictly needed but makes checking invariants much easier
638 __attribute__((unused)) bool locked = __atomic_try_acquire(&lanes.data[idx].lock);
639 verify(locked);
640
641 // As long as we can pop from this lane to push the threads somewhere else in the queue
642 while(!is_empty(lanes.data[idx])) {
643 struct $thread * thrd;
644 thrd = pop(lanes.data[idx]);
645
646 push(cltr, thrd);
647
648 // for printing count the number of displaced threads
649 #if defined(__CFA_DEBUG_PRINT__) || defined(__CFA_DEBUG_PRINT_READY_QUEUE__)
650 displaced++;
651 #endif
652 }
653
654 // Unlock the lane
655 __atomic_unlock(&lanes.data[idx].lock);
656
657 // TODO print the queue statistics here
658
659 ^(lanes.data[idx]){};
660 }
661
662 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue displaced %zu threads\n", displaced);
663
664 // Allocate new array (uses realloc and memcpies the data)
665 lanes.data = alloc( lanes.count, lanes.data`realloc );
666
667 // Fix the moved data
668 for( idx; (size_t)lanes.count ) {
669 fix(lanes.data[idx]);
670 }
671
672 #ifdef USE_SNZI
673 // Re-create the snzi
674 snzi{ log2( lanes.count / 8 ) };
675 for( idx; (size_t)lanes.count ) {
676 if( !is_empty(lanes.data[idx]) ) {
677 arrive(snzi, idx);
678 }
679 }
680 #endif
681 }
682
683 // Make sure that everything is consistent
684 /* paranoid */ check( cltr->ready_queue );
685
686 __cfadbg_print_safe(ready_queue, "Kernel : Shrinking ready queue done\n");
687 /* paranoid */ verify( ready_mutate_islocked() );
688}
Note: See TracBrowser for help on using the repository browser.