source: libcfa/src/concurrency/ready_queue.cfa@ 33b7d49

ADT ast-experimental enum pthread-emulation qualifiedEnum
Last change on this file since 33b7d49 was bfb9bf5, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Fixed some warnings

  • Property mode set to 100644
File size: 9.0 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2019 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// ready_queue.cfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov dd 16:29:18 2019
11// Last Modified By :
12// Last Modified On :
13// Update Count :
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19// #define __CFA_DEBUG_PRINT_READY_QUEUE__
20
21
22#define USE_AWARE_STEALING
23
24#include "bits/defs.hfa"
25#include "device/cpu.hfa"
26#include "kernel_private.hfa"
27
28#include "limits.hfa"
29
30// #include <errno.h>
31// #include <unistd.h>
32
33#include "ready_subqueue.hfa"
34
35static const size_t cache_line_size = 64;
36
37#if !defined(__CFA_NO_STATISTICS__)
38 #define __STATS(...) __VA_ARGS__
39#else
40 #define __STATS(...)
41#endif
42
43static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
44static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
45static inline struct thread$ * search(struct cluster * cltr);
46
47//=======================================================================
48// Cforall Ready Queue used for scheduling
49//=======================================================================
50// void ?{}(__ready_queue_t & this) with (this) {
51// lanes.data = 0p;
52// lanes.tscs = 0p;
53// lanes.caches = 0p;
54// lanes.count = 0;
55// }
56
57// void ^?{}(__ready_queue_t & this) with (this) {
58// free(lanes.data);
59// free(lanes.tscs);
60// free(lanes.caches);
61// }
62
63//-----------------------------------------------------------------------
64__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint) with (cltr->sched) {
65 processor * const proc = kernelTLS().this_processor;
66 const bool external = (!proc) || (cltr != proc->cltr);
67 const bool remote = hint == UNPARK_REMOTE;
68 const size_t lanes_count = readyQ.count;
69
70 /* paranoid */ verify( __shard_factor.readyq > 0 );
71 /* paranoid */ verify( lanes_count > 0 );
72
73 unsigned i;
74 if( external || remote ) {
75 // Figure out where thread was last time and make sure it's valid
76 /* paranoid */ verify(thrd->preferred >= 0);
77 unsigned start = thrd->preferred * __shard_factor.readyq;
78 if(start < lanes_count) {
79 do {
80 unsigned r = __tls_rand();
81 i = start + (r % __shard_factor.readyq);
82 /* paranoid */ verify( i < lanes_count );
83 // If we can't lock it retry
84 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
85 } else {
86 do {
87 i = __tls_rand() % lanes_count;
88 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
89 }
90 } else {
91 do {
92 unsigned r = proc->rdq.its++;
93 i = proc->rdq.id + (r % __shard_factor.readyq);
94 /* paranoid */ verify( i < lanes_count );
95 // If we can't lock it retry
96 } while( !__atomic_try_acquire( &readyQ.data[i].lock ) );
97 }
98
99 // Actually push it
100 push(readyQ.data[i], thrd);
101
102 // Unlock and return
103 __atomic_unlock( &readyQ.data[i].lock );
104
105 #if !defined(__CFA_NO_STATISTICS__)
106 if(unlikely(external || remote)) __atomic_fetch_add(&cltr->stats->ready.push.extrn.success, 1, __ATOMIC_RELAXED);
107 else __tls_stats()->ready.push.local.success++;
108 #endif
109}
110
111__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr) with (cltr->sched) {
112 const size_t lanes_count = readyQ.count;
113
114 /* paranoid */ verify( __shard_factor.readyq > 0 );
115 /* paranoid */ verify( lanes_count > 0 );
116 /* paranoid */ verify( kernelTLS().this_processor );
117 /* paranoid */ verify( kernelTLS().this_processor->rdq.id < lanes_count );
118
119 processor * const proc = kernelTLS().this_processor;
120 unsigned this = proc->rdq.id;
121 /* paranoid */ verify( this < lanes_count );
122 __cfadbg_print_safe(ready_queue, "Kernel : pop from %u\n", this);
123
124 // Figure out the current cpu and make sure it is valid
125 const int cpu = __kernel_getcpu();
126 /* paranoid */ verify(cpu >= 0);
127 /* paranoid */ verify(cpu < cpu_info.hthrd_count);
128 unsigned this_cache = cpu_info.llc_map[cpu].cache;
129
130 // Super important: don't write the same value over and over again
131 // We want to maximise our chances that his particular values stays in cache
132 if(caches[this / __shard_factor.readyq].id != this_cache)
133 __atomic_store_n(&caches[this / __shard_factor.readyq].id, this_cache, __ATOMIC_RELAXED);
134
135 const unsigned long long ctsc = rdtscl();
136
137 if(proc->rdq.target == MAX) {
138 uint64_t chaos = __tls_rand();
139 unsigned ext = chaos & 0xff;
140 unsigned other = (chaos >> 8) % (lanes_count);
141
142 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.readyq].id, __ATOMIC_RELAXED) == this_cache) {
143 proc->rdq.target = other;
144 }
145 }
146 else {
147 const unsigned target = proc->rdq.target;
148 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
149 /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
150 if(target < lanes_count) {
151 const unsigned long long cutoff = calc_cutoff(ctsc, proc, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
152 const unsigned long long age = moving_average(ctsc, readyQ.tscs[target].tv, readyQ.tscs[target].ma);
153 __cfadbg_print_safe(ready_queue, "Kernel : Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, this, age, cutoff, age > cutoff ? "yes" : "no");
154 if(age > cutoff) {
155 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
156 if(t) return t;
157 }
158 }
159 proc->rdq.target = MAX;
160 }
161
162 for(__shard_factor.readyq) {
163 unsigned i = this + (proc->rdq.itr++ % __shard_factor.readyq);
164 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
165 }
166
167 // All lanes where empty return 0p
168 return 0p;
169
170}
171__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) {
172 unsigned i = __tls_rand() % (cltr->sched.readyQ.count);
173 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
174}
175__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
176 return search(cltr);
177}
178
179//=======================================================================
180// Various Ready Queue utilities
181//=======================================================================
182// these function work the same or almost the same
183// whether they are using work-stealing or relaxed fifo scheduling
184
185//-----------------------------------------------------------------------
186// try to pop from a lane given by index w
187static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
188 /* paranoid */ verify( w < readyQ.count );
189 __STATS( stats.attempt++; )
190
191 // Get relevant elements locally
192 __intrusive_lane_t & lane = readyQ.data[w];
193
194 // If list looks empty retry
195 if( is_empty(lane) ) {
196 return 0p;
197 }
198
199 // If we can't get the lock retry
200 if( !__atomic_try_acquire(&lane.lock) ) {
201 return 0p;
202 }
203
204 // If list is empty, unlock and retry
205 if( is_empty(lane) ) {
206 __atomic_unlock(&lane.lock);
207 return 0p;
208 }
209
210 // Actually pop the list
211 struct thread$ * thrd;
212 unsigned long long tsc_before = ts(lane);
213 unsigned long long tsv;
214 [thrd, tsv] = pop(lane);
215
216 /* paranoid */ verify(thrd);
217 /* paranoid */ verify(tsv);
218 /* paranoid */ verify(lane.lock);
219
220 // Unlock and return
221 __atomic_unlock(&lane.lock);
222
223 // Update statistics
224 __STATS( stats.success++; )
225
226 if (tsv != MAX) {
227 unsigned long long now = rdtscl();
228 unsigned long long pma = __atomic_load_n(&readyQ.tscs[w].ma, __ATOMIC_RELAXED);
229 __atomic_store_n(&readyQ.tscs[w].tv, tsv, __ATOMIC_RELAXED);
230 __atomic_store_n(&readyQ.tscs[w].ma, moving_average(now, tsc_before, pma), __ATOMIC_RELAXED);
231 }
232
233 thrd->preferred = w / __shard_factor.readyq;
234
235 // return the popped thread
236 return thrd;
237}
238
239//-----------------------------------------------------------------------
240// try to pop from any lanes making sure you don't miss any threads push
241// before the start of the function
242static inline struct thread$ * search(struct cluster * cltr) {
243 const size_t lanes_count = cltr->sched.readyQ.count;
244 /* paranoid */ verify( lanes_count > 0 );
245 unsigned count = __atomic_load_n( &lanes_count, __ATOMIC_RELAXED );
246 unsigned offset = __tls_rand();
247 for(i; count) {
248 unsigned idx = (offset + i) % count;
249 struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
250 if(thrd) {
251 return thrd;
252 }
253 }
254
255 // All lanes where empty return 0p
256 return 0p;
257}
258
259//-----------------------------------------------------------------------
260// get preferred ready for new thread
261unsigned ready_queue_new_preferred() {
262 unsigned pref = MAX;
263 if(struct thread$ * thrd = publicTLS_get( this_thread )) {
264 pref = thrd->preferred;
265 }
266
267 return pref;
268}
269
270//-----------------------------------------------------------------------
271// Given 2 indexes, pick the list with the oldest push an try to pop from it
272static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->sched) {
273 // Pick the bet list
274 int w = i;
275 if( __builtin_expect(!is_empty(readyQ.data[j]), true) ) {
276 w = (ts(readyQ.data[i]) < ts(readyQ.data[j])) ? i : j;
277 }
278
279 return try_pop(cltr, w __STATS(, stats));
280}
Note: See TracBrowser for help on using the repository browser.