| 1 | // | 
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo | 
|---|
| 3 | // | 
|---|
| 4 | // The contents of this file are covered under the licence agreement in the | 
|---|
| 5 | // file "LICENCE" distributed with Cforall. | 
|---|
| 6 | // | 
|---|
| 7 | // locks.hfa -- PUBLIC | 
|---|
| 8 | // Runtime locks that used with the runtime thread system. | 
|---|
| 9 | // | 
|---|
| 10 | // Author           : Colby Alexander Parsons | 
|---|
| 11 | // Created On       : Thu Jan 21 19:46:50 2021 | 
|---|
| 12 | // Last Modified By : | 
|---|
| 13 | // Last Modified On : | 
|---|
| 14 | // Update Count     : | 
|---|
| 15 | // | 
|---|
| 16 |  | 
|---|
| 17 | #pragma once | 
|---|
| 18 |  | 
|---|
| 19 | #include <stdbool.h> | 
|---|
| 20 |  | 
|---|
| 21 | #include "bits/weakso_locks.hfa" | 
|---|
| 22 | #include "containers/queueLockFree.hfa" | 
|---|
| 23 | #include "limits.hfa" | 
|---|
| 24 | #include "thread.hfa" | 
|---|
| 25 |  | 
|---|
| 26 | #include "time_t.hfa" | 
|---|
| 27 | #include "time.hfa" | 
|---|
| 28 |  | 
|---|
| 29 | //----------------------------------------------------------------------------- | 
|---|
| 30 | // Semaphores | 
|---|
| 31 |  | 
|---|
| 32 | // '0-nary' semaphore | 
|---|
| 33 | // Similar to a counting semaphore except the value of one is never reached | 
|---|
| 34 | // as a consequence, a V() that would bring the value to 1 *spins* until | 
|---|
| 35 | // a P consumes it | 
|---|
| 36 | struct Semaphore0nary { | 
|---|
| 37 | __spinlock_t lock; // needed to protect | 
|---|
| 38 | mpsc_queue($thread) queue; | 
|---|
| 39 | }; | 
|---|
| 40 |  | 
|---|
| 41 | static inline bool P(Semaphore0nary & this, $thread * thrd) { | 
|---|
| 42 | /* paranoid */ verify(!(thrd->seqable.next)); | 
|---|
| 43 | /* paranoid */ verify(!(thrd`next)); | 
|---|
| 44 |  | 
|---|
| 45 | push(this.queue, thrd); | 
|---|
| 46 | return true; | 
|---|
| 47 | } | 
|---|
| 48 |  | 
|---|
| 49 | static inline bool P(Semaphore0nary & this) { | 
|---|
| 50 | $thread * thrd = active_thread(); | 
|---|
| 51 | P(this, thrd); | 
|---|
| 52 | park(); | 
|---|
| 53 | return true; | 
|---|
| 54 | } | 
|---|
| 55 |  | 
|---|
| 56 | static inline $thread * V(Semaphore0nary & this, bool doUnpark = true) { | 
|---|
| 57 | $thread * next; | 
|---|
| 58 | lock(this.lock __cfaabi_dbg_ctx2); | 
|---|
| 59 | for (;;) { | 
|---|
| 60 | next = pop(this.queue); | 
|---|
| 61 | if (next) break; | 
|---|
| 62 | Pause(); | 
|---|
| 63 | } | 
|---|
| 64 | unlock(this.lock); | 
|---|
| 65 |  | 
|---|
| 66 | if (doUnpark) unpark(next); | 
|---|
| 67 | return next; | 
|---|
| 68 | } | 
|---|
| 69 |  | 
|---|
| 70 | // Wrapper used on top of any sempahore to avoid potential locking | 
|---|
| 71 | struct BinaryBenaphore { | 
|---|
| 72 | volatile ssize_t counter; | 
|---|
| 73 | }; | 
|---|
| 74 |  | 
|---|
| 75 | static inline { | 
|---|
| 76 | void ?{}(BinaryBenaphore & this) { this.counter = 0; } | 
|---|
| 77 | void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; } | 
|---|
| 78 | void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; } | 
|---|
| 79 |  | 
|---|
| 80 | // returns true if no blocking needed | 
|---|
| 81 | bool P(BinaryBenaphore & this) { | 
|---|
| 82 | return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0; | 
|---|
| 83 | } | 
|---|
| 84 |  | 
|---|
| 85 | bool tryP(BinaryBenaphore & this) { | 
|---|
| 86 | ssize_t c = this.counter; | 
|---|
| 87 | /* paranoid */ verify( c > MIN ); | 
|---|
| 88 | return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); | 
|---|
| 89 | } | 
|---|
| 90 |  | 
|---|
| 91 | // returns true if notify needed | 
|---|
| 92 | bool V(BinaryBenaphore & this) { | 
|---|
| 93 | ssize_t c = 0; | 
|---|
| 94 | for () { | 
|---|
| 95 | /* paranoid */ verify( this.counter < MAX ); | 
|---|
| 96 | if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { | 
|---|
| 97 | if (c == 0) return true; | 
|---|
| 98 | /* paranoid */ verify(c < 0); | 
|---|
| 99 | return false; | 
|---|
| 100 | } else { | 
|---|
| 101 | if (c == 1) return true; | 
|---|
| 102 | /* paranoid */ verify(c < 1); | 
|---|
| 103 | Pause(); | 
|---|
| 104 | } | 
|---|
| 105 | } | 
|---|
| 106 | } | 
|---|
| 107 | } | 
|---|
| 108 |  | 
|---|
| 109 | // Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore | 
|---|
| 110 | struct ThreadBenaphore { | 
|---|
| 111 | BinaryBenaphore ben; | 
|---|
| 112 | Semaphore0nary  sem; | 
|---|
| 113 | }; | 
|---|
| 114 |  | 
|---|
| 115 | static inline void ?{}(ThreadBenaphore & this) {} | 
|---|
| 116 | static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; } | 
|---|
| 117 | static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; } | 
|---|
| 118 |  | 
|---|
| 119 | static inline bool P(ThreadBenaphore & this)              { return P(this.ben) ? false : P(this.sem); } | 
|---|
| 120 | static inline bool tryP(ThreadBenaphore & this)           { return tryP(this.ben); } | 
|---|
| 121 | static inline bool P(ThreadBenaphore & this, bool wait)   { return wait ? P(this) : tryP(this); } | 
|---|
| 122 |  | 
|---|
| 123 | static inline $thread * V(ThreadBenaphore & this, bool doUnpark = true) { | 
|---|
| 124 | if (V(this.ben)) return 0p; | 
|---|
| 125 | return V(this.sem, doUnpark); | 
|---|
| 126 | } | 
|---|
| 127 |  | 
|---|
| 128 | //----------------------------------------------------------------------------- | 
|---|
| 129 | // Semaphore | 
|---|
| 130 | struct semaphore { | 
|---|
| 131 | __spinlock_t lock; | 
|---|
| 132 | int count; | 
|---|
| 133 | __queue_t($thread) waiting; | 
|---|
| 134 | }; | 
|---|
| 135 |  | 
|---|
| 136 | void  ?{}(semaphore & this, int count = 1); | 
|---|
| 137 | void ^?{}(semaphore & this); | 
|---|
| 138 | bool   P (semaphore & this); | 
|---|
| 139 | bool   V (semaphore & this); | 
|---|
| 140 | bool   V (semaphore & this, unsigned count); | 
|---|
| 141 | $thread * V (semaphore & this, bool ); | 
|---|
| 142 |  | 
|---|
| 143 | //---------- | 
|---|
| 144 | struct single_acquisition_lock { | 
|---|
| 145 | inline blocking_lock; | 
|---|
| 146 | }; | 
|---|
| 147 |  | 
|---|
| 148 | static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };} | 
|---|
| 149 | static inline void ^?{}( single_acquisition_lock & this ) {} | 
|---|
| 150 | static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); } | 
|---|
| 151 | static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); } | 
|---|
| 152 | static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); } | 
|---|
| 153 | static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } | 
|---|
| 154 | static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } | 
|---|
| 155 | static inline void   on_notify( single_acquisition_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); } | 
|---|
| 156 |  | 
|---|
| 157 | //---------- | 
|---|
| 158 | struct owner_lock { | 
|---|
| 159 | inline blocking_lock; | 
|---|
| 160 | }; | 
|---|
| 161 |  | 
|---|
| 162 | static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };} | 
|---|
| 163 | static inline void ^?{}( owner_lock & this ) {} | 
|---|
| 164 | static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); } | 
|---|
| 165 | static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); } | 
|---|
| 166 | static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); } | 
|---|
| 167 | static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } | 
|---|
| 168 | static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } | 
|---|
| 169 | static inline void   on_notify( owner_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); } | 
|---|
| 170 |  | 
|---|
| 171 | struct fast_lock { | 
|---|
| 172 | $thread * volatile owner; | 
|---|
| 173 | ThreadBenaphore sem; | 
|---|
| 174 | }; | 
|---|
| 175 |  | 
|---|
| 176 | static inline bool $try_lock(fast_lock & this, $thread * thrd) { | 
|---|
| 177 | $thread * exp = 0p; | 
|---|
| 178 | return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); | 
|---|
| 179 | } | 
|---|
| 180 |  | 
|---|
| 181 | static inline void lock( fast_lock & this ) __attribute__((artificial)); | 
|---|
| 182 | static inline void lock( fast_lock & this ) { | 
|---|
| 183 | $thread * thrd = active_thread(); | 
|---|
| 184 | /* paranoid */verify(thrd != this.owner); | 
|---|
| 185 |  | 
|---|
| 186 | for (;;) { | 
|---|
| 187 | if ($try_lock(this, thrd)) return; | 
|---|
| 188 | P(this.sem); | 
|---|
| 189 | } | 
|---|
| 190 | } | 
|---|
| 191 |  | 
|---|
| 192 | static inline bool try_lock( fast_lock & this ) __attribute__((artificial)); | 
|---|
| 193 | static inline bool try_lock ( fast_lock & this ) { | 
|---|
| 194 | $thread * thrd = active_thread(); | 
|---|
| 195 | /* paranoid */ verify(thrd != this.owner); | 
|---|
| 196 | return $try_lock(this, thrd); | 
|---|
| 197 | } | 
|---|
| 198 |  | 
|---|
| 199 | static inline $thread * unlock( fast_lock & this ) __attribute__((artificial)); | 
|---|
| 200 | static inline $thread * unlock( fast_lock & this ) { | 
|---|
| 201 | /* paranoid */ verify(active_thread() == this.owner); | 
|---|
| 202 |  | 
|---|
| 203 | // open 'owner' before unlocking anyone | 
|---|
| 204 | // so new and unlocked threads don't park incorrectly. | 
|---|
| 205 | // This may require additional fencing on ARM. | 
|---|
| 206 | this.owner = 0p; | 
|---|
| 207 |  | 
|---|
| 208 | return V(this.sem); | 
|---|
| 209 | } | 
|---|
| 210 |  | 
|---|
| 211 | static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; } | 
|---|
| 212 | static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); } | 
|---|
| 213 | static inline void on_notify( fast_lock &, struct $thread * t ) { unpark(t); } | 
|---|
| 214 |  | 
|---|
| 215 | struct mcs_node { | 
|---|
| 216 | mcs_node * volatile next; | 
|---|
| 217 | single_sem sem; | 
|---|
| 218 | }; | 
|---|
| 219 |  | 
|---|
| 220 | static inline void ?{}(mcs_node & this) { this.next = 0p; } | 
|---|
| 221 |  | 
|---|
| 222 | static inline mcs_node * volatile & ?`next ( mcs_node * node ) { | 
|---|
| 223 | return node->next; | 
|---|
| 224 | } | 
|---|
| 225 |  | 
|---|
| 226 | struct mcs_lock { | 
|---|
| 227 | mcs_queue(mcs_node) queue; | 
|---|
| 228 | }; | 
|---|
| 229 |  | 
|---|
| 230 | static inline void lock(mcs_lock & l, mcs_node & n) { | 
|---|
| 231 | if(push(l.queue, &n)) | 
|---|
| 232 | wait(n.sem); | 
|---|
| 233 | } | 
|---|
| 234 |  | 
|---|
| 235 | static inline void unlock(mcs_lock & l, mcs_node & n) { | 
|---|
| 236 | mcs_node * next = advance(l.queue, &n); | 
|---|
| 237 | if(next) post(next->sem); | 
|---|
| 238 | } | 
|---|
| 239 |  | 
|---|
| 240 | //----------------------------------------------------------------------------- | 
|---|
| 241 | // is_blocking_lock | 
|---|
| 242 | trait is_blocking_lock(L & | sized(L)) { | 
|---|
| 243 | // For synchronization locks to use when acquiring | 
|---|
| 244 | void on_notify( L &, struct $thread * ); | 
|---|
| 245 |  | 
|---|
| 246 | // For synchronization locks to use when releasing | 
|---|
| 247 | size_t on_wait( L & ); | 
|---|
| 248 |  | 
|---|
| 249 | // to set recursion count after getting signalled; | 
|---|
| 250 | void on_wakeup( L &, size_t recursion ); | 
|---|
| 251 | }; | 
|---|
| 252 |  | 
|---|
| 253 | //----------------------------------------------------------------------------- | 
|---|
| 254 | // info_thread | 
|---|
| 255 | // the info thread is a wrapper around a thread used | 
|---|
| 256 | // to store extra data for use in the condition variable | 
|---|
| 257 | forall(L & | is_blocking_lock(L)) { | 
|---|
| 258 | struct info_thread; | 
|---|
| 259 |  | 
|---|
| 260 | // for use by sequence | 
|---|
| 261 | info_thread(L) *& Back( info_thread(L) * this ); | 
|---|
| 262 | info_thread(L) *& Next( info_thread(L) * this ); | 
|---|
| 263 | } | 
|---|
| 264 |  | 
|---|
| 265 | //----------------------------------------------------------------------------- | 
|---|
| 266 | // Synchronization Locks | 
|---|
| 267 | forall(L & | is_blocking_lock(L)) { | 
|---|
| 268 | struct condition_variable { | 
|---|
| 269 | // Spin lock used for mutual exclusion | 
|---|
| 270 | __spinlock_t lock; | 
|---|
| 271 |  | 
|---|
| 272 | // List of blocked threads | 
|---|
| 273 | Sequence( info_thread(L) ) blocked_threads; | 
|---|
| 274 |  | 
|---|
| 275 | // Count of current blocked threads | 
|---|
| 276 | int count; | 
|---|
| 277 | }; | 
|---|
| 278 |  | 
|---|
| 279 | void  ?{}( condition_variable(L) & this ); | 
|---|
| 280 | void ^?{}( condition_variable(L) & this ); | 
|---|
| 281 |  | 
|---|
| 282 | bool notify_one( condition_variable(L) & this ); | 
|---|
| 283 | bool notify_all( condition_variable(L) & this ); | 
|---|
| 284 |  | 
|---|
| 285 | uintptr_t front( condition_variable(L) & this ); | 
|---|
| 286 |  | 
|---|
| 287 | bool empty  ( condition_variable(L) & this ); | 
|---|
| 288 | int  counter( condition_variable(L) & this ); | 
|---|
| 289 |  | 
|---|
| 290 | void wait( condition_variable(L) & this ); | 
|---|
| 291 | void wait( condition_variable(L) & this, uintptr_t info ); | 
|---|
| 292 | bool wait( condition_variable(L) & this, Duration duration ); | 
|---|
| 293 | bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ); | 
|---|
| 294 |  | 
|---|
| 295 | void wait( condition_variable(L) & this, L & l ); | 
|---|
| 296 | void wait( condition_variable(L) & this, L & l, uintptr_t info ); | 
|---|
| 297 | bool wait( condition_variable(L) & this, L & l, Duration duration ); | 
|---|
| 298 | bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ); | 
|---|
| 299 | } | 
|---|