| 1 | //
 | 
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
 | 
|---|
| 3 | //
 | 
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
 | 
|---|
| 5 | // file "LICENCE" distributed with Cforall.
 | 
|---|
| 6 | //
 | 
|---|
| 7 | // locks.hfa -- PUBLIC
 | 
|---|
| 8 | // Runtime locks that used with the runtime thread system.
 | 
|---|
| 9 | //
 | 
|---|
| 10 | // Author           : Colby Alexander Parsons
 | 
|---|
| 11 | // Created On       : Thu Jan 21 19:46:50 2021
 | 
|---|
| 12 | // Last Modified By :
 | 
|---|
| 13 | // Last Modified On :
 | 
|---|
| 14 | // Update Count     :
 | 
|---|
| 15 | //
 | 
|---|
| 16 | 
 | 
|---|
| 17 | #pragma once
 | 
|---|
| 18 | 
 | 
|---|
| 19 | #include <stdbool.h>
 | 
|---|
| 20 | #include <stdio.h>
 | 
|---|
| 21 | 
 | 
|---|
| 22 | #include "bits/weakso_locks.hfa"
 | 
|---|
| 23 | #include "containers/queueLockFree.hfa"
 | 
|---|
| 24 | #include "containers/list.hfa"
 | 
|---|
| 25 | 
 | 
|---|
| 26 | #include "limits.hfa"
 | 
|---|
| 27 | #include "thread.hfa"
 | 
|---|
| 28 | 
 | 
|---|
| 29 | #include "time_t.hfa"
 | 
|---|
| 30 | #include "time.hfa"
 | 
|---|
| 31 | 
 | 
|---|
| 32 | //-----------------------------------------------------------------------------
 | 
|---|
| 33 | // Semaphores
 | 
|---|
| 34 | 
 | 
|---|
| 35 | // '0-nary' semaphore
 | 
|---|
| 36 | // Similar to a counting semaphore except the value of one is never reached
 | 
|---|
| 37 | // as a consequence, a V() that would bring the value to 1 *spins* until
 | 
|---|
| 38 | // a P consumes it
 | 
|---|
| 39 | struct Semaphore0nary {
 | 
|---|
| 40 |         __spinlock_t lock; // needed to protect
 | 
|---|
| 41 |         mpsc_queue(thread$) queue;
 | 
|---|
| 42 | };
 | 
|---|
| 43 | 
 | 
|---|
| 44 | static inline bool P(Semaphore0nary & this, thread$ * thrd) {
 | 
|---|
| 45 |         /* paranoid */ verify(!thrd`next);
 | 
|---|
| 46 |         /* paranoid */ verify(!(&(*thrd)`next));
 | 
|---|
| 47 | 
 | 
|---|
| 48 |         push(this.queue, thrd);
 | 
|---|
| 49 |         return true;
 | 
|---|
| 50 | }
 | 
|---|
| 51 | 
 | 
|---|
| 52 | static inline bool P(Semaphore0nary & this) {
 | 
|---|
| 53 |     thread$ * thrd = active_thread();
 | 
|---|
| 54 |     P(this, thrd);
 | 
|---|
| 55 |     park();
 | 
|---|
| 56 |     return true;
 | 
|---|
| 57 | }
 | 
|---|
| 58 | 
 | 
|---|
| 59 | static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) {
 | 
|---|
| 60 |         thread$ * next;
 | 
|---|
| 61 |         lock(this.lock __cfaabi_dbg_ctx2);
 | 
|---|
| 62 |                 for (;;) {
 | 
|---|
| 63 |                         next = pop(this.queue);
 | 
|---|
| 64 |                         if (next) break;
 | 
|---|
| 65 |                         Pause();
 | 
|---|
| 66 |                 }
 | 
|---|
| 67 |         unlock(this.lock);
 | 
|---|
| 68 | 
 | 
|---|
| 69 |         if (doUnpark) unpark(next);
 | 
|---|
| 70 |         return next;
 | 
|---|
| 71 | }
 | 
|---|
| 72 | 
 | 
|---|
| 73 | // Wrapper used on top of any sempahore to avoid potential locking
 | 
|---|
| 74 | struct BinaryBenaphore {
 | 
|---|
| 75 |         volatile ssize_t counter;
 | 
|---|
| 76 | };
 | 
|---|
| 77 | 
 | 
|---|
| 78 | static inline {
 | 
|---|
| 79 |         void ?{}(BinaryBenaphore & this) { this.counter = 0; }
 | 
|---|
| 80 |         void ?{}(BinaryBenaphore & this, zero_t) { this.counter = 0; }
 | 
|---|
| 81 |         void ?{}(BinaryBenaphore & this, one_t ) { this.counter = 1; }
 | 
|---|
| 82 | 
 | 
|---|
| 83 |         // returns true if no blocking needed
 | 
|---|
| 84 |         bool P(BinaryBenaphore & this) {
 | 
|---|
| 85 |                 return __atomic_fetch_sub(&this.counter, 1, __ATOMIC_SEQ_CST) > 0;
 | 
|---|
| 86 |         }
 | 
|---|
| 87 | 
 | 
|---|
| 88 |         bool tryP(BinaryBenaphore & this) {
 | 
|---|
| 89 |                 ssize_t c = this.counter;
 | 
|---|
| 90 |                 /* paranoid */ verify( c > MIN );
 | 
|---|
| 91 |                 return (c >= 1) && __atomic_compare_exchange_n(&this.counter, &c, c-1, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
 | 
|---|
| 92 |         }
 | 
|---|
| 93 | 
 | 
|---|
| 94 |         // returns true if notify needed
 | 
|---|
| 95 |         bool V(BinaryBenaphore & this) {
 | 
|---|
| 96 |                 ssize_t c = 0;
 | 
|---|
| 97 |                 for () {
 | 
|---|
| 98 |                         /* paranoid */ verify( this.counter < MAX );
 | 
|---|
| 99 |                         if (__atomic_compare_exchange_n(&this.counter, &c, c+1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
 | 
|---|
| 100 |                                 if (c == 0) return true;
 | 
|---|
| 101 |                                 /* paranoid */ verify(c < 0);
 | 
|---|
| 102 |                                 return false;
 | 
|---|
| 103 |                         } else {
 | 
|---|
| 104 |                                 if (c == 1) return true;
 | 
|---|
| 105 |                                 /* paranoid */ verify(c < 1);
 | 
|---|
| 106 |                                 Pause();
 | 
|---|
| 107 |                         }
 | 
|---|
| 108 |                 }
 | 
|---|
| 109 |         }
 | 
|---|
| 110 | }
 | 
|---|
| 111 | 
 | 
|---|
| 112 | // Binary Semaphore based on the BinaryBenaphore on top of the 0-nary Semaphore
 | 
|---|
| 113 | struct ThreadBenaphore {
 | 
|---|
| 114 |         BinaryBenaphore ben;
 | 
|---|
| 115 |         Semaphore0nary  sem;
 | 
|---|
| 116 | };
 | 
|---|
| 117 | 
 | 
|---|
| 118 | static inline void ?{}(ThreadBenaphore & this) {}
 | 
|---|
| 119 | static inline void ?{}(ThreadBenaphore & this, zero_t) { (this.ben){ 0 }; }
 | 
|---|
| 120 | static inline void ?{}(ThreadBenaphore & this, one_t ) { (this.ben){ 1 }; }
 | 
|---|
| 121 | 
 | 
|---|
| 122 | static inline bool P(ThreadBenaphore & this)              { return P(this.ben) ? false : P(this.sem); }
 | 
|---|
| 123 | static inline bool tryP(ThreadBenaphore & this)           { return tryP(this.ben); }
 | 
|---|
| 124 | static inline bool P(ThreadBenaphore & this, bool wait)   { return wait ? P(this) : tryP(this); }
 | 
|---|
| 125 | 
 | 
|---|
| 126 | static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) {
 | 
|---|
| 127 |         if (V(this.ben)) return 0p;
 | 
|---|
| 128 |         return V(this.sem, doUnpark);
 | 
|---|
| 129 | }
 | 
|---|
| 130 | 
 | 
|---|
| 131 | //-----------------------------------------------------------------------------
 | 
|---|
| 132 | // Semaphore
 | 
|---|
| 133 | struct semaphore {
 | 
|---|
| 134 |         __spinlock_t lock;
 | 
|---|
| 135 |         int count;
 | 
|---|
| 136 |         __queue_t(thread$) waiting;
 | 
|---|
| 137 | };
 | 
|---|
| 138 | 
 | 
|---|
| 139 | void  ?{}(semaphore & this, int count = 1);
 | 
|---|
| 140 | void ^?{}(semaphore & this);
 | 
|---|
| 141 | bool   P (semaphore & this);
 | 
|---|
| 142 | bool   V (semaphore & this);
 | 
|---|
| 143 | bool   V (semaphore & this, unsigned count);
 | 
|---|
| 144 | thread$ * V (semaphore & this, bool );
 | 
|---|
| 145 | 
 | 
|---|
| 146 | //----------
 | 
|---|
| 147 | struct single_acquisition_lock {
 | 
|---|
| 148 |         inline blocking_lock;
 | 
|---|
| 149 | };
 | 
|---|
| 150 | 
 | 
|---|
| 151 | static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
 | 
|---|
| 152 | static inline void ^?{}( single_acquisition_lock & this ) {}
 | 
|---|
| 153 | static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
 | 
|---|
| 154 | static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
 | 
|---|
| 155 | static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
 | 
|---|
| 156 | static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
 | 
|---|
| 157 | static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
 | 
|---|
| 158 | static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
 | 
|---|
| 159 | 
 | 
|---|
| 160 | //----------
 | 
|---|
| 161 | struct owner_lock {
 | 
|---|
| 162 |         inline blocking_lock;
 | 
|---|
| 163 | };
 | 
|---|
| 164 | 
 | 
|---|
| 165 | static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
 | 
|---|
| 166 | static inline void ^?{}( owner_lock & this ) {}
 | 
|---|
| 167 | static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
 | 
|---|
| 168 | static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
 | 
|---|
| 169 | static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
 | 
|---|
| 170 | static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
 | 
|---|
| 171 | static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
 | 
|---|
| 172 | static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
 | 
|---|
| 173 | 
 | 
|---|
| 174 | struct fast_lock {
 | 
|---|
| 175 |         thread$ * volatile owner;
 | 
|---|
| 176 |         ThreadBenaphore sem;
 | 
|---|
| 177 | };
 | 
|---|
| 178 | 
 | 
|---|
| 179 | static inline void ?{}(fast_lock & this) { this.owner = 0p; }
 | 
|---|
| 180 | 
 | 
|---|
| 181 | static inline bool $try_lock(fast_lock & this, thread$ * thrd) {
 | 
|---|
| 182 |     thread$ * exp = 0p;
 | 
|---|
| 183 |     return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
 | 
|---|
| 184 | }
 | 
|---|
| 185 | 
 | 
|---|
| 186 | static inline void lock( fast_lock & this ) __attribute__((artificial));
 | 
|---|
| 187 | static inline void lock( fast_lock & this ) {
 | 
|---|
| 188 |         thread$ * thrd = active_thread();
 | 
|---|
| 189 |         /* paranoid */verify(thrd != this.owner);
 | 
|---|
| 190 | 
 | 
|---|
| 191 |         for (;;) {
 | 
|---|
| 192 |                 if ($try_lock(this, thrd)) return;
 | 
|---|
| 193 |                 P(this.sem);
 | 
|---|
| 194 |         }
 | 
|---|
| 195 | }
 | 
|---|
| 196 | 
 | 
|---|
| 197 | static inline bool try_lock( fast_lock & this ) __attribute__((artificial));
 | 
|---|
| 198 | static inline bool try_lock ( fast_lock & this ) {
 | 
|---|
| 199 |         thread$ * thrd = active_thread();
 | 
|---|
| 200 |         /* paranoid */ verify(thrd != this.owner);
 | 
|---|
| 201 |         return $try_lock(this, thrd);
 | 
|---|
| 202 | }
 | 
|---|
| 203 | 
 | 
|---|
| 204 | static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial));
 | 
|---|
| 205 | static inline thread$ * unlock( fast_lock & this ) {
 | 
|---|
| 206 |         /* paranoid */ verify(active_thread() == this.owner);
 | 
|---|
| 207 | 
 | 
|---|
| 208 |         // open 'owner' before unlocking anyone
 | 
|---|
| 209 |         // so new and unlocked threads don't park incorrectly.
 | 
|---|
| 210 |         // This may require additional fencing on ARM.
 | 
|---|
| 211 |         this.owner = 0p;
 | 
|---|
| 212 | 
 | 
|---|
| 213 |         return V(this.sem);
 | 
|---|
| 214 | }
 | 
|---|
| 215 | 
 | 
|---|
| 216 | static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }
 | 
|---|
| 217 | static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }
 | 
|---|
| 218 | static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); }
 | 
|---|
| 219 | 
 | 
|---|
| 220 | struct mcs_node {
 | 
|---|
| 221 |         mcs_node * volatile next;
 | 
|---|
| 222 |         single_sem sem;
 | 
|---|
| 223 | };
 | 
|---|
| 224 | 
 | 
|---|
| 225 | static inline void ?{}(mcs_node & this) { this.next = 0p; }
 | 
|---|
| 226 | 
 | 
|---|
| 227 | static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
 | 
|---|
| 228 |         return node->next;
 | 
|---|
| 229 | }
 | 
|---|
| 230 | 
 | 
|---|
| 231 | struct mcs_lock {
 | 
|---|
| 232 |         mcs_queue(mcs_node) queue;
 | 
|---|
| 233 | };
 | 
|---|
| 234 | 
 | 
|---|
| 235 | static inline void lock(mcs_lock & l, mcs_node & n) {
 | 
|---|
| 236 |         if(push(l.queue, &n))
 | 
|---|
| 237 |                 wait(n.sem);
 | 
|---|
| 238 | }
 | 
|---|
| 239 | 
 | 
|---|
| 240 | static inline void unlock(mcs_lock & l, mcs_node & n) {
 | 
|---|
| 241 |         mcs_node * next = advance(l.queue, &n);
 | 
|---|
| 242 |         if(next) post(next->sem);
 | 
|---|
| 243 | }
 | 
|---|
| 244 | 
 | 
|---|
| 245 | struct linear_backoff_then_block_lock {
 | 
|---|
| 246 |         // Spin lock used for mutual exclusion
 | 
|---|
| 247 |         __spinlock_t spinlock;
 | 
|---|
| 248 | 
 | 
|---|
| 249 |         // Current thread owning the lock
 | 
|---|
| 250 |         struct thread$ * owner;
 | 
|---|
| 251 | 
 | 
|---|
| 252 |         // List of blocked threads
 | 
|---|
| 253 |         dlist( thread$ ) blocked_threads;
 | 
|---|
| 254 | 
 | 
|---|
| 255 |         // Used for comparing and exchanging
 | 
|---|
| 256 |         volatile size_t lock_value;
 | 
|---|
| 257 | 
 | 
|---|
| 258 |         // used for linear backoff spinning
 | 
|---|
| 259 |         int spin_start;
 | 
|---|
| 260 |         int spin_end;
 | 
|---|
| 261 |         int spin_count;
 | 
|---|
| 262 | 
 | 
|---|
| 263 |         // after unsuccessful linear backoff yield this many times
 | 
|---|
| 264 |         int yield_count;
 | 
|---|
| 265 | };
 | 
|---|
| 266 | 
 | 
|---|
| 267 | static inline void  ?{}( linear_backoff_then_block_lock & this, int spin_start, int spin_end, int spin_count, int yield_count ) {
 | 
|---|
| 268 |         this.spinlock{};
 | 
|---|
| 269 |         this.blocked_threads{};
 | 
|---|
| 270 |         this.lock_value = 0;
 | 
|---|
| 271 |         this.spin_start = spin_start;
 | 
|---|
| 272 |         this.spin_end = spin_end;
 | 
|---|
| 273 |         this.spin_count = spin_count;
 | 
|---|
| 274 |         this.yield_count = yield_count;
 | 
|---|
| 275 | }
 | 
|---|
| 276 | static inline void  ?{}( linear_backoff_then_block_lock & this ) { this{4, 1024, 16, 0}; }
 | 
|---|
| 277 | static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
 | 
|---|
| 278 | static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
 | 
|---|
| 279 | static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
 | 
|---|
| 280 | 
 | 
|---|
| 281 | static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
 | 
|---|
| 282 |         if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
 | 
|---|
| 283 |                 owner = active_thread();
 | 
|---|
| 284 |                 return true;
 | 
|---|
| 285 |         }
 | 
|---|
| 286 |         return false;
 | 
|---|
| 287 | }
 | 
|---|
| 288 | 
 | 
|---|
| 289 | static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
 | 
|---|
| 290 | 
 | 
|---|
| 291 | static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
 | 
|---|
| 292 |         if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
 | 
|---|
| 293 |                 owner = active_thread();
 | 
|---|
| 294 |                 return true;
 | 
|---|
| 295 |         }
 | 
|---|
| 296 |         return false;
 | 
|---|
| 297 | }
 | 
|---|
| 298 | 
 | 
|---|
| 299 | static inline bool block(linear_backoff_then_block_lock & this) with(this) {
 | 
|---|
| 300 |         lock( spinlock __cfaabi_dbg_ctx2 );
 | 
|---|
| 301 |         if (lock_value != 2) {
 | 
|---|
| 302 |                 unlock( spinlock );
 | 
|---|
| 303 |                 return true;
 | 
|---|
| 304 |         }
 | 
|---|
| 305 |         insert_last( blocked_threads, *active_thread() );
 | 
|---|
| 306 |         unlock( spinlock );
 | 
|---|
| 307 |         park( );
 | 
|---|
| 308 |         return true;
 | 
|---|
| 309 | }
 | 
|---|
| 310 | 
 | 
|---|
| 311 | static inline bool lock(linear_backoff_then_block_lock & this) with(this) {
 | 
|---|
| 312 |         // if owner just return
 | 
|---|
| 313 |         if (active_thread() == owner) return true;
 | 
|---|
| 314 |         size_t compare_val = 0;
 | 
|---|
| 315 |         int spin = spin_start;
 | 
|---|
| 316 |         // linear backoff
 | 
|---|
| 317 |         for( ;; ) {
 | 
|---|
| 318 |                 compare_val = 0;
 | 
|---|
| 319 |                 if (internal_try_lock(this, compare_val)) return true;
 | 
|---|
| 320 |                 if (2 == compare_val) break;
 | 
|---|
| 321 |                 for (int i = 0; i < spin; i++) Pause();
 | 
|---|
| 322 |                 if (spin >= spin_end) break;
 | 
|---|
| 323 |                 spin += spin;
 | 
|---|
| 324 |         }
 | 
|---|
| 325 | 
 | 
|---|
| 326 |         if(2 != compare_val && try_lock_contention(this)) return true;
 | 
|---|
| 327 |         // block until signalled
 | 
|---|
| 328 |         while (block(this)) if(try_lock_contention(this)) return true;
 | 
|---|
| 329 | 
 | 
|---|
| 330 |         // this should never be reached as block(this) always returns true
 | 
|---|
| 331 |         return false;
 | 
|---|
| 332 | }
 | 
|---|
| 333 | 
 | 
|---|
| 334 | static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
 | 
|---|
| 335 |         verify(lock_value > 0);
 | 
|---|
| 336 |     owner = 0p;
 | 
|---|
| 337 |     if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
 | 
|---|
| 338 |         lock( spinlock __cfaabi_dbg_ctx2 );
 | 
|---|
| 339 |         thread$ * t = &try_pop_front( blocked_threads );
 | 
|---|
| 340 |         unlock( spinlock );
 | 
|---|
| 341 |         unpark( t );
 | 
|---|
| 342 | }
 | 
|---|
| 343 | 
 | 
|---|
| 344 | static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
 | 
|---|
| 345 | static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
 | 
|---|
| 346 | static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
 | 
|---|
| 347 | 
 | 
|---|
| 348 | //-----------------------------------------------------------------------------
 | 
|---|
| 349 | // is_blocking_lock
 | 
|---|
| 350 | trait is_blocking_lock(L & | sized(L)) {
 | 
|---|
| 351 |         // For synchronization locks to use when acquiring
 | 
|---|
| 352 |         void on_notify( L &, struct thread$ * );
 | 
|---|
| 353 | 
 | 
|---|
| 354 |         // For synchronization locks to use when releasing
 | 
|---|
| 355 |         size_t on_wait( L & );
 | 
|---|
| 356 | 
 | 
|---|
| 357 |         // to set recursion count after getting signalled;
 | 
|---|
| 358 |         void on_wakeup( L &, size_t recursion );
 | 
|---|
| 359 | };
 | 
|---|
| 360 | 
 | 
|---|
| 361 | //-----------------------------------------------------------------------------
 | 
|---|
| 362 | // // info_thread
 | 
|---|
| 363 | // // the info thread is a wrapper around a thread used
 | 
|---|
| 364 | // // to store extra data for use in the condition variable
 | 
|---|
| 365 | forall(L & | is_blocking_lock(L)) {
 | 
|---|
| 366 |         struct info_thread;
 | 
|---|
| 367 | 
 | 
|---|
| 368 |         // // for use by sequence
 | 
|---|
| 369 |         // info_thread(L) *& Back( info_thread(L) * this );
 | 
|---|
| 370 |         // info_thread(L) *& Next( info_thread(L) * this );
 | 
|---|
| 371 | }
 | 
|---|
| 372 | 
 | 
|---|
| 373 | //-----------------------------------------------------------------------------
 | 
|---|
| 374 | // Synchronization Locks
 | 
|---|
| 375 | forall(L & | is_blocking_lock(L)) {
 | 
|---|
| 376 |         struct condition_variable {
 | 
|---|
| 377 |                 // Spin lock used for mutual exclusion
 | 
|---|
| 378 |                 __spinlock_t lock;
 | 
|---|
| 379 | 
 | 
|---|
| 380 |                 // List of blocked threads
 | 
|---|
| 381 |                 dlist( info_thread(L) ) blocked_threads;
 | 
|---|
| 382 | 
 | 
|---|
| 383 |                 // Count of current blocked threads
 | 
|---|
| 384 |                 int count;
 | 
|---|
| 385 |         };
 | 
|---|
| 386 | 
 | 
|---|
| 387 | 
 | 
|---|
| 388 |         void  ?{}( condition_variable(L) & this );
 | 
|---|
| 389 |         void ^?{}( condition_variable(L) & this );
 | 
|---|
| 390 | 
 | 
|---|
| 391 |         bool notify_one( condition_variable(L) & this );
 | 
|---|
| 392 |         bool notify_all( condition_variable(L) & this );
 | 
|---|
| 393 | 
 | 
|---|
| 394 |         uintptr_t front( condition_variable(L) & this );
 | 
|---|
| 395 | 
 | 
|---|
| 396 |         bool empty  ( condition_variable(L) & this );
 | 
|---|
| 397 |         int  counter( condition_variable(L) & this );
 | 
|---|
| 398 | 
 | 
|---|
| 399 |         void wait( condition_variable(L) & this );
 | 
|---|
| 400 |         void wait( condition_variable(L) & this, uintptr_t info );
 | 
|---|
| 401 |         bool wait( condition_variable(L) & this, Duration duration );
 | 
|---|
| 402 |         bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
 | 
|---|
| 403 | 
 | 
|---|
| 404 |         void wait( condition_variable(L) & this, L & l );
 | 
|---|
| 405 |         void wait( condition_variable(L) & this, L & l, uintptr_t info );
 | 
|---|
| 406 |         bool wait( condition_variable(L) & this, L & l, Duration duration );
 | 
|---|
| 407 |         bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
 | 
|---|
| 408 | }
 | 
|---|