source: src/libcfa/concurrency/monitor.c@ b751c8e

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since b751c8e was b227f68, checked in by Thierry Delisle <tdelisle@…>, 8 years ago

Commented some debug messages.
Monitors now yield when spinning.
Debug mode saves more information about previous locks and interrupts

  • Property mode set to 100644
File size: 15.4 KB
Line 
1// -*- Mode: CFA -*-
2//
3// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
4//
5// The contents of this file are covered under the licence agreement in the
6// file "LICENCE" distributed with Cforall.
7//
8// monitor_desc.c --
9//
10// Author : Thierry Delisle
11// Created On : Thd Feb 23 12:27:26 2017
12// Last Modified By : Thierry Delisle
13// Last Modified On : --
14// Update Count : 0
15//
16
17#include "monitor"
18
19#include <stdlib>
20
21#include "libhdr.h"
22#include "kernel_private.h"
23
24//-----------------------------------------------------------------------------
25// Forward declarations
26static inline void set_owner( monitor_desc * this, thread_desc * owner );
27static inline thread_desc * next_thread( monitor_desc * this );
28
29static inline void lock_all( spinlock ** locks, unsigned short count );
30static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
31static inline void unlock_all( spinlock ** locks, unsigned short count );
32static inline void unlock_all( monitor_desc ** locks, unsigned short count );
33
34static inline void save_recursion ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count );
35static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
36
37static inline thread_desc * check_condition( __condition_criterion_t * );
38static inline void brand_condition( condition * );
39static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
40
41//-----------------------------------------------------------------------------
42// Enter/Leave routines
43
44
45extern "C" {
46 void __enter_monitor_desc( monitor_desc * this ) {
47 lock_yield( &this->lock DEBUG_CTX2 );
48 thread_desc * thrd = this_thread;
49
50 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
51
52 if( !this->owner ) {
53 //No one has the monitor, just take it
54 set_owner( this, thrd );
55 }
56 else if( this->owner == thrd) {
57 //We already have the monitor, just not how many times we took it
58 verify( this->recursion > 0 );
59 this->recursion += 1;
60 }
61 else {
62 //Some one else has the monitor, wait in line for it
63 append( &this->entry_queue, thrd );
64 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
65 BlockInternal( &this->lock );
66
67 //BlockInternal will unlock spinlock, no need to unlock ourselves
68 return;
69 }
70
71 unlock( &this->lock );
72 return;
73 }
74
75 // leave pseudo code :
76 // TODO
77 void __leave_monitor_desc( monitor_desc * this ) {
78 lock_yield( &this->lock DEBUG_CTX2 );
79
80 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);
81 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
82
83 //Leaving a recursion level, decrement the counter
84 this->recursion -= 1;
85
86 //If we haven't left the last level of recursion
87 //it means we don't need to do anything
88 if( this->recursion != 0) {
89 unlock( &this->lock );
90 return;
91 }
92
93 thread_desc * new_owner = next_thread( this );
94
95 //We can now let other threads in safely
96 unlock( &this->lock );
97
98 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
99
100 //We need to wake-up the thread
101 WakeThread( new_owner );
102 }
103
104 void __leave_thread_monitor( thread_desc * thrd ) {
105 monitor_desc * this = &thrd->mon;
106 lock_yield( &this->lock DEBUG_CTX2 );
107
108 disable_interrupts();
109
110 thrd->cor.state = Halted;
111
112 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
113
114 //Leaving a recursion level, decrement the counter
115 this->recursion -= 1;
116
117 //If we haven't left the last level of recursion
118 //it means we don't need to do anything
119 if( this->recursion != 0) {
120 unlock( &this->lock );
121 return;
122 }
123
124 thread_desc * new_owner = next_thread( this );
125
126 //We can now let other threads in safely
127 unlock( &this->lock );
128
129 //We need to wake-up the thread
130 if( new_owner) ScheduleThread( new_owner );
131 }
132}
133
134static inline void enter(monitor_desc ** monitors, int count) {
135 for(int i = 0; i < count; i++) {
136 __enter_monitor_desc( monitors[i] );
137 }
138}
139
140static inline void leave(monitor_desc ** monitors, int count) {
141 for(int i = count - 1; i >= 0; i--) {
142 __leave_monitor_desc( monitors[i] );
143 }
144}
145
146void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) {
147 this->m = m;
148 this->count = count;
149 qsort(this->m, count);
150 enter( this->m, this->count );
151
152 this->prev_mntrs = this_thread->current_monitors;
153 this->prev_count = this_thread->current_monitor_count;
154
155 this_thread->current_monitors = m;
156 this_thread->current_monitor_count = count;
157}
158
159void ^?{}( monitor_guard_t * this ) {
160 leave( this->m, this->count );
161
162 this_thread->current_monitors = this->prev_mntrs;
163 this_thread->current_monitor_count = this->prev_count;
164}
165
166void ?{}(__condition_node_t * this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
167 this->waiting_thread = waiting_thread;
168 this->count = count;
169 this->next = NULL;
170 this->user_info = user_info;
171}
172
173void ?{}(__condition_criterion_t * this ) {
174 this->ready = false;
175 this->target = NULL;
176 this->owner = NULL;
177 this->next = NULL;
178}
179
180void ?{}(__condition_criterion_t * this, monitor_desc * target, __condition_node_t * owner ) {
181 this->ready = false;
182 this->target = target;
183 this->owner = owner;
184 this->next = NULL;
185}
186
187//-----------------------------------------------------------------------------
188// Internal scheduling
189void wait( condition * this, uintptr_t user_info = 0 ) {
190 // LIB_DEBUG_PRINT_SAFE("Waiting\n");
191
192 brand_condition( this );
193
194 //Check that everything is as expected
195 assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
196 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
197 verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
198
199 unsigned short count = this->monitor_count;
200 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later
201 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal
202
203 // LIB_DEBUG_PRINT_SAFE("count %i\n", count);
204
205 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info };
206
207 __condition_criterion_t criteria[count];
208 for(int i = 0; i < count; i++) {
209 (&criteria[i]){ this->monitors[i], &waiter };
210 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
211 }
212
213 waiter.criteria = criteria;
214 append( &this->blocked, &waiter );
215
216 lock_all( this->monitors, locks, count );
217 save_recursion( this->monitors, recursions, count );
218 //DON'T unlock, ask the kernel to do it
219
220 //Find the next thread(s) to run
221 unsigned short thread_count = 0;
222 thread_desc * threads[ count ];
223 for(int i = 0; i < count; i++) {
224 threads[i] = 0;
225 }
226
227 for( int i = 0; i < count; i++) {
228 thread_desc * new_owner = next_thread( this->monitors[i] );
229 thread_count = insert_unique( threads, thread_count, new_owner );
230 }
231
232 // LIB_DEBUG_PRINT_SAFE("Will unblock: ");
233 for(int i = 0; i < thread_count; i++) {
234 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
235 }
236 // LIB_DEBUG_PRINT_SAFE("\n");
237
238 // Everything is ready to go to sleep
239 BlockInternal( locks, count, threads, thread_count );
240
241
242 //WE WOKE UP
243
244
245 //We are back, restore the owners and recursions
246 lock_all( locks, count );
247 restore_recursion( this->monitors, recursions, count );
248 unlock_all( locks, count );
249}
250
251bool signal( condition * this ) {
252 if( is_empty( this ) ) {
253 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
254 return false;
255 }
256
257 //Check that everything is as expected
258 verify( this->monitors );
259 verify( this->monitor_count != 0 );
260
261 unsigned short count = this->monitor_count;
262
263 //Some more checking in debug
264 LIB_DEBUG_DO(
265 thread_desc * this_thrd = this_thread;
266 if ( this->monitor_count != this_thrd->current_monitor_count ) {
267 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
268 } // if
269
270 for(int i = 0; i < this->monitor_count; i++) {
271 if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
272 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
273 } // if
274 }
275 );
276
277 //Lock all the monitors
278 lock_all( this->monitors, NULL, count );
279 // LIB_DEBUG_PRINT_SAFE("Signalling");
280
281 //Pop the head of the waiting queue
282 __condition_node_t * node = pop_head( &this->blocked );
283
284 //Add the thread to the proper AS stack
285 for(int i = 0; i < count; i++) {
286 __condition_criterion_t * crit = &node->criteria[i];
287 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
288 assert( !crit->ready );
289 push( &crit->target->signal_stack, crit );
290 }
291
292 // LIB_DEBUG_PRINT_SAFE("\n");
293
294 //Release
295 unlock_all( this->monitors, count );
296
297 return true;
298}
299
300bool signal_block( condition * this ) {
301 if( !this->blocked.head ) {
302 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
303 return false;
304 }
305
306 //Check that everything is as expected
307 verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
308 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
309
310 unsigned short count = this->monitor_count;
311 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later
312 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal
313
314 lock_all( this->monitors, locks, count );
315
316 //create creteria
317 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 };
318
319 __condition_criterion_t criteria[count];
320 for(int i = 0; i < count; i++) {
321 (&criteria[i]){ this->monitors[i], &waiter };
322 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
323 push( &criteria[i].target->signal_stack, &criteria[i] );
324 }
325
326 waiter.criteria = criteria;
327
328 //save contexts
329 save_recursion( this->monitors, recursions, count );
330
331 //Find the thread to run
332 thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
333 for(int i = 0; i < count; i++) {
334 set_owner( this->monitors[i], signallee );
335 }
336
337 LIB_DEBUG_PRINT_SAFE( "Waiting on signal block\n" );
338
339 //Everything is ready to go to sleep
340 BlockInternal( locks, count, &signallee, 1 );
341
342
343
344
345 LIB_DEBUG_PRINT_SAFE( "Back from signal block\n" );
346
347 //We are back, restore the owners and recursions
348 lock_all( locks, count );
349 restore_recursion( this->monitors, recursions, count );
350 unlock_all( locks, count );
351
352 return true;
353}
354
355uintptr_t front( condition * this ) {
356 verifyf( !is_empty(this),
357 "Attempt to access user data on an empty condition.\n"
358 "Possible cause is not checking if the condition is empty before reading stored data."
359 );
360 return this->blocked.head->user_info;
361}
362
363//-----------------------------------------------------------------------------
364// Internal scheduling
365void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
366 // thread_desc * this = this_thread;
367
368 // unsigned short count = this->current_monitor_count;
369 // unsigned int recursions[ count ]; //Save the current recursion levels to restore them later
370 // spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal
371
372 // lock_all( this->current_monitors, locks, count );
373
374
375
376
377
378 // // // Everything is ready to go to sleep
379 // // BlockInternal( locks, count, threads, thread_count );
380
381
382 // //WE WOKE UP
383
384
385 // //We are back, restore the owners and recursions
386 // lock_all( locks, count );
387 // restore_recursion( this->monitors, recursions, count );
388 // unlock_all( locks, count );
389}
390
391//-----------------------------------------------------------------------------
392// Utilities
393
394static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
395 //Pass the monitor appropriately
396 this->owner = owner;
397
398 //We are passing the monitor to someone else, which means recursion level is not 0
399 this->recursion = owner ? 1 : 0;
400}
401
402static inline thread_desc * next_thread( monitor_desc * this ) {
403 //Check the signaller stack
404 __condition_criterion_t * urgent = pop( &this->signal_stack );
405 if( urgent ) {
406 //The signaller stack is not empty,
407 //regardless of if we are ready to baton pass,
408 //we need to set the monitor as in use
409 set_owner( this, urgent->owner->waiting_thread );
410
411 return check_condition( urgent );
412 }
413
414 // No signaller thread
415 // Get the next thread in the entry_queue
416 thread_desc * new_owner = pop_head( &this->entry_queue );
417 set_owner( this, new_owner );
418
419 return new_owner;
420}
421
422static inline void lock_all( spinlock ** locks, unsigned short count ) {
423 for( int i = 0; i < count; i++ ) {
424 lock_yield( locks[i] DEBUG_CTX2 );
425 }
426}
427
428static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
429 for( int i = 0; i < count; i++ ) {
430 spinlock * l = &source[i]->lock;
431 lock_yield( l DEBUG_CTX2 );
432 if(locks) locks[i] = l;
433 }
434}
435
436static inline void unlock_all( spinlock ** locks, unsigned short count ) {
437 for( int i = 0; i < count; i++ ) {
438 unlock( locks[i] );
439 }
440}
441
442static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
443 for( int i = 0; i < count; i++ ) {
444 unlock( &locks[i]->lock );
445 }
446}
447
448
449static inline void save_recursion ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count ) {
450 for( int i = 0; i < count; i++ ) {
451 recursions[i] = ctx[i]->recursion;
452 }
453}
454
455static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ) {
456 for( int i = 0; i < count; i++ ) {
457 ctx[i]->recursion = recursions[i];
458 }
459}
460
461// Function has 2 different behavior
462// 1 - Marks a monitors as being ready to run
463// 2 - Checks if all the monitors are ready to run
464// if so return the thread to run
465static inline thread_desc * check_condition( __condition_criterion_t * target ) {
466 __condition_node_t * node = target->owner;
467 unsigned short count = node->count;
468 __condition_criterion_t * criteria = node->criteria;
469
470 bool ready2run = true;
471
472 for( int i = 0; i < count; i++ ) {
473
474 // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
475 if( &criteria[i] == target ) {
476 criteria[i].ready = true;
477 // LIB_DEBUG_PRINT_SAFE( "True\n" );
478 }
479
480 ready2run = criteria[i].ready && ready2run;
481 }
482
483 // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
484 return ready2run ? node->waiting_thread : NULL;
485}
486
487static inline void brand_condition( condition * this ) {
488 thread_desc * thrd = this_thread;
489 if( !this->monitors ) {
490 // LIB_DEBUG_PRINT_SAFE("Branding\n");
491 assertf( thrd->current_monitors != NULL, "No current monitor to brand condition", thrd->current_monitors );
492 this->monitor_count = thrd->current_monitor_count;
493
494 this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
495 for( int i = 0; i < this->monitor_count; i++ ) {
496 this->monitors[i] = thrd->current_monitors[i];
497 }
498 }
499}
500
501static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ) {
502 if( !val ) return end;
503
504 for(int i = 0; i <= end; i++) {
505 if( thrds[i] == val ) return end;
506 }
507
508 thrds[end] = val;
509 return end + 1;
510}
511
512void ?{}( __condition_blocked_queue_t * this ) {
513 this->head = NULL;
514 this->tail = &this->head;
515}
516
517void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
518 verify(this->tail != NULL);
519 *this->tail = c;
520 this->tail = &c->next;
521}
522
523__condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
524 __condition_node_t * head = this->head;
525 if( head ) {
526 this->head = head->next;
527 if( !head->next ) {
528 this->tail = &this->head;
529 }
530 head->next = NULL;
531 }
532 return head;
533}
Note: See TracBrowser for help on using the repository browser.