source: libcfa/src/concurrency/ready_queue.cfa@ dbe2533

ADT ast-experimental enum pthread-emulation qualifiedEnum
Last change on this file since dbe2533 was 708ae38, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Some more cleanup and grow/shrink now readjusts io timestamps.
(They are still unused).

  • Property mode set to 100644
File size: 8.6 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19// #define __CFA_DEBUG_PRINT_READY_QUEUE__
20
21
22#define USE_AWARE_STEALING
23
24#include "bits/defs.hfa"
25#include "device/cpu.hfa"
26#include "kernel/cluster.hfa"
27#include "kernel/private.hfa"
28
29#include "limits.hfa"
30
31// #include <errno.h>
32// #include <unistd.h>
33
34#include "ready_subqueue.hfa"
35
36static const size_t cache_line_size = 64;
37
38#if !defined(__CFA_NO_STATISTICS__)
39 #define __STATS(...) __VA_ARGS__
40#else
41 #define __STATS(...)
42#endif
43
44static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
45static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
46static inline struct thread$ * search(struct cluster * cltr);
47
48//=======================================================================
49// Cforall Ready Queue used for scheduling
50//=======================================================================
51// void ?{}(__ready_queue_t & this) with (this) {
52// lanes.data = 0p;
53// lanes.tscs = 0p;
54// lanes.caches = 0p;
55// lanes.count = 0;
56// }
57
58// void ^?{}(__ready_queue_t & this) with (this) {
59// free(lanes.data);
60// free(lanes.tscs);
61// free(lanes.caches);
62// }
63
64//-----------------------------------------------------------------------
65__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) {
66 processor * const proc = kernelTLS().this_processor;
67 const bool external = (!proc) || (cltr != proc->cltr);
68 const bool remote = hint == UNPARK_REMOTE;
69 const size_t lanes_count = readyQ.count;
70
71 /* paranoid */ verify( __shard_factor.readyq > 0 );
72 /* paranoid */ verify( lanes_count > 0 );
73
74 unsigned i;
75 if( external || remote ) {
76 // Figure out where thread was last time and make sure it's valid
77 /* paranoid */ verify(thrd->preferred >= 0);
78 unsigned start = thrd->preferred * __shard_factor.readyq;
79 if(start < lanes_count) {
80 do {
81 unsigned r = __tls_rand();
82 i = start + (r % __shard_factor.readyq);
83 /* paranoid */ verify( i < lanes_count );
84 // If we can't lock it retry
85 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
86 } else {
87 do {
88 i = __tls_rand() % lanes_count;
89 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
90 }
91 } else {
92 do {
93 unsigned r = proc->rdq.its++;
94 i = proc->rdq.id + (r % __shard_factor.readyq);
95 /* paranoid */ verify( i < lanes_count );
96 // If we can't lock it retry
97 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
98 }
99
100 // Actually push it
101 push(readyQ.data[i], thrd);
102
103 // Unlock and return
104 __atomic_unlock( &readyQ.data[i].lock );
105
106 #if !defined(__CFA_NO_STATISTICS__)
107 if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
108 else __tls_stats()->ready.push.local.success++;
109 #endif
110}
111
112__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) {
113 const size_t lanes_count = readyQ.count;
114
115 /* paranoid */ verify( __shard_factor.readyq > 0 );
116 /* paranoid */ verify( lanes_count > 0 );
117 /* paranoid */ verify( kernelTLS().this_processor );
118 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count );
119
120 processor * const proc = kernelTLS().this_processor;
121 unsigned this = proc->rdq.id;
122 /* paranoid */ verify( this < lanes_count );
123 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
124
125 // Figure out the current cache is
126 const unsigned this_cache = cache_id(cltr, this / __shard_factor.readyq);
127 const unsigned long long ctsc = rdtscl();
128
129 if(proc->rdq.target == MAX) {
130 uint64_t chaos = __tls_rand();
131 unsigned ext = chaos & 0xff;
132 unsigned other = (chaos >> 8) % (lanes_count);
133
134 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) {
135 proc->rdq.target = other;
136 }
137 }
138 else {
139 const unsigned target = proc->rdq.target;
140 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
141 /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
142 if(target < lanes_count) {
143 const unsigned long long cutoff = calc_cutoff(ctsc, proc, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
144 const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma);
145 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
146 if(age > cutoff) {
147 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
148 if(t) return t;
149 }
150 }
151 proc->rdq.target = MAX;
152 }
153
154 for(__shard_factor.readyq) {
155 unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq);
156 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
157 }
158
159 // All lanes where empty return 0p
160 return 0p;
161
162}
163__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) {
164 unsigned i = __tls_rand() % (cltr->sched.readyQ.count);
165 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
166}
167__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
168 return search(cltr);
169}
170
171//=======================================================================
172// Various Ready Queue utilities
173//=======================================================================
174// these function work the same or almost the same
175// whether they are using work-stealing or relaxed fifo scheduling
176
177//-----------------------------------------------------------------------
178// try to pop from a lane given by index w
179static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
180 /* paranoid */ verify( w < readyQ.count );
181 __STATS( stats.attempt++; )
182
183 // Get relevant elements locally
184 __intrusive_lane_t & lane = readyQ.data[w];
185
186 // If list looks empty retry
187 if( is_empty(lane) ) {
188 return 0p;
189 }
190
191 // If we can't get the lock retry
192 if( !__atomic_try_acquire(&lane.lock) ) {
193 return 0p;
194 }
195
196 // If list is empty, unlock and retry
197 if( is_empty(lane) ) {
198 __atomic_unlock(&lane.lock);
199 return 0p;
200 }
201
202 // Actually pop the list
203 struct thread$ * thrd;
204 unsigned long long tsc_before = ts(lane);
205 unsigned long long tsv;
206 [thrd, tsv] = pop(lane);
207
208 /* paranoid */ verify(thrd);
209 /* paranoid */ verify(tsv);
210 /* paranoid */ verify(lane.lock);
211
212 // Unlock and return
213 __atomic_unlock(&lane.lock);
214
215 // Update statistics
216 __STATS( stats.success++; )
217
218 if (tsv != MAX) {
219 unsigned long long now = rdtscl();
220 unsigned long long pma = __atomic_load_n(&readyQ.tscs[w].ma, __ATOMIC_RELAXED);
221 __atomic_store_n(&readyQ.tscs[w].tv, tsv, __ATOMIC_RELAXED);
222 __atomic_store_n(&readyQ.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
223 }
224
225 thrd->preferred = w / __shard_factor.readyq;
226
227 // return the popped thread
228 return thrd;
229}
230
231//-----------------------------------------------------------------------
232// try to pop from any lanes making sure you don't miss any threads push
233// before the start of the function
234static inline struct thread$ * search(struct cluster * cltr) {
235 const size_t lanes_count = cltr->sched.readyQ.count;
236 /* paranoid */ verify( lanes_count > 0 );
237 unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED );
238 unsigned offset = __tls_rand();
239 for(i; count) {
240 unsigned idx = (offset + i) % count;
241 struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
242 if(thrd) {
243 return thrd;
244 }
245 }
246
247 // All lanes where empty return 0p
248 return 0p;
249}
250
251//-----------------------------------------------------------------------
252// get preferred ready for new thread
253unsigned ready_queue_new_preferred() {
254 unsigned pref = MAX;
255 if(struct thread$ * thrd = publicTLS_get( this_thread )) {
256 pref = thrd->preferred;
257 }
258
259 return pref;
260}
261
262//-----------------------------------------------------------------------
263// Given 2 indexes, pick the list with the oldest push an try to pop from it
264static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
265 // Pick the bet list
266 int w = i;
267 if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) {
268 w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j;
269 }
270
271 return try_pop(cltr, w __STATS(, stats));
272}
Note: See TracBrowser for help on using the repository browser.