1 | //
|
---|
2 | // Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
|
---|
3 | //
|
---|
4 | // The contents of this file are covered under the licence agreement in the
|
---|
5 | // file "LICENCE" distributed with Cforall.
|
---|
6 | //
|
---|
7 | // ready_queue.cfa --
|
---|
8 | //
|
---|
9 | // Author : Thierry Delisle
|
---|
10 | // Created On : Mon Nov dd 16:29:18 2019
|
---|
11 | // Last Modified By :
|
---|
12 | // Last Modified On :
|
---|
13 | // Update Count :
|
---|
14 | //
|
---|
15 |
|
---|
16 | #define __cforall_thread__
|
---|
17 | #define _GNU_SOURCE
|
---|
18 |
|
---|
19 | // #define __CFA_DEBUG_PRINT_READY_QUEUE__
|
---|
20 |
|
---|
21 |
|
---|
22 | #define USE_AWARE_STEALING
|
---|
23 |
|
---|
24 | #include "bits/defs.hfa"
|
---|
25 | #include "device/cpu.hfa"
|
---|
26 | #include "kernel/cluster.hfa"
|
---|
27 | #include "kernel/private.hfa"
|
---|
28 |
|
---|
29 | // #include <errno.h>
|
---|
30 | // #include <unistd.h>
|
---|
31 |
|
---|
32 | #include "ready_subqueue.hfa"
|
---|
33 |
|
---|
34 | static const size_t cache_line_size = 64;
|
---|
35 |
|
---|
36 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
37 | #define __STATS(...) __VA_ARGS__
|
---|
38 | #else
|
---|
39 | #define __STATS(...)
|
---|
40 | #endif
|
---|
41 |
|
---|
42 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
|
---|
43 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
|
---|
44 | static inline struct thread$ * search(struct cluster * cltr);
|
---|
45 |
|
---|
46 | //=======================================================================
|
---|
47 | // Cforall Ready Queue used for scheduling
|
---|
48 | //=======================================================================
|
---|
49 | // void ?{}(__ready_queue_t & this) with (this) {
|
---|
50 | // lanes.data = 0p;
|
---|
51 | // lanes.tscs = 0p;
|
---|
52 | // lanes.caches = 0p;
|
---|
53 | // lanes.count = 0;
|
---|
54 | // }
|
---|
55 |
|
---|
56 | // void ^?{}(__ready_queue_t & this) with (this) {
|
---|
57 | // free(lanes.data);
|
---|
58 | // free(lanes.tscs);
|
---|
59 | // free(lanes.caches);
|
---|
60 | // }
|
---|
61 |
|
---|
62 | //-----------------------------------------------------------------------
|
---|
63 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) {
|
---|
64 | processor * const proc = kernelTLS().this_processor;
|
---|
65 | const bool external = (!proc) || (cltr != proc->cltr);
|
---|
66 | const bool remote = hint == UNPARK_REMOTE;
|
---|
67 | const size_t lanes_count = readyQ.count;
|
---|
68 |
|
---|
69 | /* paranoid */ verify( __shard_factor.readyq > 0 );
|
---|
70 | /* paranoid */ verify( lanes_count > 0 );
|
---|
71 |
|
---|
72 | unsigned i;
|
---|
73 | if( external || remote ) {
|
---|
74 | // Figure out where thread was last time and make sure it's valid
|
---|
75 | /* paranoid */ verify(thrd->preferred >= 0);
|
---|
76 | unsigned start = thrd->preferred * __shard_factor.readyq;
|
---|
77 | if(start < lanes_count) {
|
---|
78 | do {
|
---|
79 | unsigned r = __tls_rand();
|
---|
80 | i = start + (r % __shard_factor.readyq);
|
---|
81 | /* paranoid */ verify( i < lanes_count );
|
---|
82 | // If we can't lock it retry
|
---|
83 | } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
|
---|
84 | } else {
|
---|
85 | do {
|
---|
86 | i = __tls_rand() % lanes_count;
|
---|
87 | } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
|
---|
88 | }
|
---|
89 | } else {
|
---|
90 | do {
|
---|
91 | unsigned r = proc->rdq.its++;
|
---|
92 | i = proc->rdq.id + (r % __shard_factor.readyq);
|
---|
93 | /* paranoid */ verify( i < lanes_count );
|
---|
94 | // If we can't lock it retry
|
---|
95 | } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
|
---|
96 | }
|
---|
97 |
|
---|
98 | // Actually push it
|
---|
99 | push(readyQ.data[i], thrd);
|
---|
100 |
|
---|
101 | // Unlock and return
|
---|
102 | __atomic_unlock( &readyQ.data[i].lock );
|
---|
103 |
|
---|
104 | #if !defined(__CFA_NO_STATISTICS__)
|
---|
105 | if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
|
---|
106 | else __tls_stats()->ready.push.local.success++;
|
---|
107 | #endif
|
---|
108 | }
|
---|
109 |
|
---|
110 | __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) {
|
---|
111 | const size_t lanes_count = readyQ.count;
|
---|
112 |
|
---|
113 | /* paranoid */ verify( __shard_factor.readyq > 0 );
|
---|
114 | /* paranoid */ verify( lanes_count > 0 );
|
---|
115 | /* paranoid */ verify( kernelTLS().this_processor );
|
---|
116 | /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count );
|
---|
117 |
|
---|
118 | processor * const proc = kernelTLS().this_processor;
|
---|
119 | unsigned this = proc->rdq.id;
|
---|
120 | /* paranoid */ verify( this < lanes_count );
|
---|
121 | __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
|
---|
122 |
|
---|
123 | // Figure out the current cache is
|
---|
124 | const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq);
|
---|
125 | const unsigned long long ctsc = rdtscl();
|
---|
126 |
|
---|
127 | if(proc->rdq.target == MAX) {
|
---|
128 | uint64_t chaos = __tls_rand();
|
---|
129 | unsigned ext = chaos & 0xff;
|
---|
130 | unsigned other = (chaos >> 8) % (lanes_count);
|
---|
131 |
|
---|
132 | if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) {
|
---|
133 | proc->rdq.target = other;
|
---|
134 | }
|
---|
135 | }
|
---|
136 | else {
|
---|
137 | const unsigned target = proc->rdq.target;
|
---|
138 | __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
|
---|
139 | /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
|
---|
140 | if(target < lanes_count) {
|
---|
141 | const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
|
---|
142 | const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma);
|
---|
143 | __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
|
---|
144 | if(age > cutoff) {
|
---|
145 | thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
|
---|
146 | if(t) return t;
|
---|
147 | }
|
---|
148 | }
|
---|
149 | proc->rdq.target = MAX;
|
---|
150 | }
|
---|
151 |
|
---|
152 | for(__shard_factor.readyq) {
|
---|
153 | unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq);
|
---|
154 | if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
|
---|
155 | }
|
---|
156 |
|
---|
157 | // All lanes where empty return 0p
|
---|
158 | return 0p;
|
---|
159 |
|
---|
160 | }
|
---|
161 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) {
|
---|
162 | unsigned i = __tls_rand() % (cltr->sched.readyQ.count);
|
---|
163 | return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
|
---|
164 | }
|
---|
165 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
|
---|
166 | return search(cltr);
|
---|
167 | }
|
---|
168 |
|
---|
169 | //=======================================================================
|
---|
170 | // Various Ready Queue utilities
|
---|
171 | //=======================================================================
|
---|
172 | // these function work the same or almost the same
|
---|
173 | // whether they are using work-stealing or relaxed fifo scheduling
|
---|
174 |
|
---|
175 | //-----------------------------------------------------------------------
|
---|
176 | // try to pop from a lane given by index w
|
---|
177 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
|
---|
178 | /* paranoid */ verify( w < readyQ.count );
|
---|
179 | __STATS( stats.attempt++; )
|
---|
180 |
|
---|
181 | // Get relevant elements locally
|
---|
182 | __intrusive_lane_t & lane = readyQ.data[w];
|
---|
183 |
|
---|
184 | // If list looks empty retry
|
---|
185 | if( is_empty(lane) ) {
|
---|
186 | return 0p;
|
---|
187 | }
|
---|
188 |
|
---|
189 | // If we can't get the lock retry
|
---|
190 | if( !__atomic_try_acquire(&lane.lock) ) {
|
---|
191 | return 0p;
|
---|
192 | }
|
---|
193 |
|
---|
194 | // If list is empty, unlock and retry
|
---|
195 | if( is_empty(lane) ) {
|
---|
196 | __atomic_unlock(&lane.lock);
|
---|
197 | return 0p;
|
---|
198 | }
|
---|
199 |
|
---|
200 | // Actually pop the list
|
---|
201 | struct thread$ * thrd;
|
---|
202 | unsigned long long ts_prev = ts(lane);
|
---|
203 | unsigned long long ts_next;
|
---|
204 | [thrd, ts_next] = pop(lane);
|
---|
205 |
|
---|
206 | /* paranoid */ verify(thrd);
|
---|
207 | /* paranoid */ verify(ts_next);
|
---|
208 | /* paranoid */ verify(lane.lock);
|
---|
209 |
|
---|
210 | // Unlock and return
|
---|
211 | __atomic_unlock(&lane.lock);
|
---|
212 |
|
---|
213 | // Update statistics
|
---|
214 | __STATS( stats.success++; )
|
---|
215 |
|
---|
216 | touch_tsc(readyQ.tscs, w, ts_prev, ts_next);
|
---|
217 |
|
---|
218 | thrd->preferred = w / __shard_factor.readyq;
|
---|
219 |
|
---|
220 | // return the popped thread
|
---|
221 | return thrd;
|
---|
222 | }
|
---|
223 |
|
---|
224 | //-----------------------------------------------------------------------
|
---|
225 | // try to pop from any lanes making sure you don't miss any threads push
|
---|
226 | // before the start of the function
|
---|
227 | static inline struct thread$ * search(struct cluster * cltr) {
|
---|
228 | const size_t lanes_count = cltr->sched.readyQ.count;
|
---|
229 | /* paranoid */ verify( lanes_count > 0 );
|
---|
230 | unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED );
|
---|
231 | unsigned offset = __tls_rand();
|
---|
232 | for(i; count) {
|
---|
233 | unsigned idx = (offset + i) % count;
|
---|
234 | struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
|
---|
235 | if(thrd) {
|
---|
236 | return thrd;
|
---|
237 | }
|
---|
238 | }
|
---|
239 |
|
---|
240 | // All lanes where empty return 0p
|
---|
241 | return 0p;
|
---|
242 | }
|
---|
243 |
|
---|
244 | //-----------------------------------------------------------------------
|
---|
245 | // get preferred ready for new thread
|
---|
246 | unsigned ready_queue_new_preferred() {
|
---|
247 | unsigned pref = MAX;
|
---|
248 | if(struct thread$ * thrd = publicTLS_get( this_thread )) {
|
---|
249 | pref = thrd->preferred;
|
---|
250 | }
|
---|
251 |
|
---|
252 | return pref;
|
---|
253 | }
|
---|
254 |
|
---|
255 | //-----------------------------------------------------------------------
|
---|
256 | // Given 2 indexes, pick the list with the oldest push an try to pop from it
|
---|
257 | static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
|
---|
258 | // Pick the bet list
|
---|
259 | int w = i;
|
---|
260 | if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) {
|
---|
261 | w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j;
|
---|
262 | }
|
---|
263 |
|
---|
264 | return try_pop(cltr, w __STATS(, stats));
|
---|
265 | }
|
---|