source: src/libcfa/concurrency/monitor.c@ 6ff4507

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since 6ff4507 was 6ff4507, checked in by Thierry Delisle <tdelisle@…>, 8 years ago

Disable Werror since new warnings appeared
Aesthetic refactoring in monitor.c
Monitors are now properly aggregated in waitfor
Monitors masks are now always saved and restore (TODO check if this is too much)
Insert_unique is now generic

  • Property mode set to 100644
File size: 22.7 KB
RevLine 
[f07e037]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[84c52a8]7// monitor_desc.c --
[f07e037]8//
9// Author : Thierry Delisle
10// Created On : Thd Feb 23 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[38ef0de]12// Last Modified On : Mon Jul 31 14:59:05 2017
13// Update Count : 3
[f07e037]14//
15
16#include "monitor"
17
[a933dcf4]18#include <stdlib>
19
[5ea06d6]20#include "libhdr.h"
[2ac095d]21#include "kernel_private.h"
[f07e037]22
[0c78741]23//-----------------------------------------------------------------------------
24// Forward declarations
25static inline void set_owner( monitor_desc * this, thread_desc * owner );
[6ff4507]26static inline void set_owner( monitor_desc ** storage, short count, thread_desc * owner );
27static inline void set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
28
[0c78741]29static inline thread_desc * next_thread( monitor_desc * this );
[6ae8c92]30static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
[0c78741]31
32static inline void lock_all( spinlock ** locks, unsigned short count );
33static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
34static inline void unlock_all( spinlock ** locks, unsigned short count );
35static inline void unlock_all( monitor_desc ** locks, unsigned short count );
36
[6ff4507]37static inline void save ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
38static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
[0c78741]39
[97e3296]40static inline void init ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
41static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
42
[6ff4507]43static inline thread_desc * check_condition ( __condition_criterion_t * );
44static inline void brand_condition ( condition * );
[6ae8c92]45static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
[b18830e]46
[6ff4507]47forall(dtype T | sized( T ))
48static inline short insert_unique( T ** array, short & size, T * val );
49static inline short count_max ( const __waitfor_mask_t & mask );
50static inline short aggregate ( monitor_desc ** storage, const __waitfor_mask_t & mask );
[97e3296]51
52//-----------------------------------------------------------------------------
53// Useful defines
[6ff4507]54#define wait_ctx(thrd, user_info) /* Create the necessary information to use the signaller stack */ \
55 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \
56 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \
57 init( count, monitors, &waiter, criteria ); /* Link everything together */ \
58
59#define wait_ctx_primed(thrd, user_info) /* Create the necessary information to use the signaller stack */ \
60 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \
61 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \
62 init_push( count, monitors, &waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \
63
64#define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \
65 monitor_desc ** monitors = mons; /* Save the targeted monitors */ \
66 unsigned short count = cnt; /* Save the count to a local variable */ \
67 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \
68 __waitfor_mask_t masks[ count ]; /* Save the current waitfor masks to restore them later */ \
69 spinlock * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \
70
71#define monitor_save save ( monitors, count, locks, recursions, masks )
72#define monitor_restore restore( monitors, count, locks, recursions, masks )
73
74#define blockAndWake( thrd, cnt ) /* Create the necessary information to use the signaller stack */ \
75 monitor_save; /* Save monitor states */ \
76 BlockInternal( locks, count, thrd, cnt ); /* Everything is ready to go to sleep */ \
77 monitor_restore; /* We are back, restore the owners and recursions */ \
78
[97e3296]79
[0c78741]80//-----------------------------------------------------------------------------
81// Enter/Leave routines
[690f13c]82
83
[cb0e6de]84extern "C" {
[97e3296]85 // Enter single monitor
[6ae8c92]86 static void __enter_monitor_desc( const __monitor_group_t & group ) {
[b18830e]87 monitor_desc * this = group.list[0];
88
[97e3296]89 // Lock the monitor spinlock, lock_yield to reduce contention
[b227f68]90 lock_yield( &this->lock DEBUG_CTX2 );
[1c273d0]91 thread_desc * thrd = this_thread;
[f07e037]92
[90c4df0]93 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
94
[cb0e6de]95 if( !this->owner ) {
[97e3296]96 // No one has the monitor, just take it
[cd348e7]97 set_owner( this, thrd );
[90c4df0]98
99 LIB_DEBUG_PRINT_SAFE("Kernel : mon is free \n");
[cb0e6de]100 }
101 else if( this->owner == thrd) {
[97e3296]102 // We already have the monitor, just not how many times we took it
[4aa2fb2]103 verify( this->recursion > 0 );
[cb0e6de]104 this->recursion += 1;
[90c4df0]105
106 LIB_DEBUG_PRINT_SAFE("Kernel : mon already owned \n");
[cb0e6de]107 }
[6ae8c92]108 else if( is_accepted( this, group) ) {
[97e3296]109 // Some one was waiting for us, enter
110 set_owner( this, thrd );
[90c4df0]111
112 LIB_DEBUG_PRINT_SAFE("Kernel : mon accepts \n");
[97e3296]113 }
[cb0e6de]114 else {
[90c4df0]115 LIB_DEBUG_PRINT_SAFE("Kernel : blocking \n");
116
[97e3296]117 // Some one else has the monitor, wait in line for it
[cb0e6de]118 append( &this->entry_queue, thrd );
[82ff5845]119 BlockInternal( &this->lock );
[cc7f4b1]120
[90c4df0]121 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered mon %p\n", thrd, this);
122
[97e3296]123 // BlockInternal will unlock spinlock, no need to unlock ourselves
[2ac095d]124 return;
[cb0e6de]125 }
[f07e037]126
[90c4df0]127 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered mon %p\n", thrd, this);
128
[97e3296]129 // Release the lock and leave
[cb0e6de]130 unlock( &this->lock );
[5ea06d6]131 return;
[cb0e6de]132 }
[f07e037]133
[97e3296]134 // Leave single monitor
[1c273d0]135 void __leave_monitor_desc( monitor_desc * this ) {
[97e3296]136 // Lock the monitor spinlock, lock_yield to reduce contention
[b227f68]137 lock_yield( &this->lock DEBUG_CTX2 );
[f07e037]138
[1c273d0]139 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
[cc7f4b1]140
[97e3296]141 // Leaving a recursion level, decrement the counter
[cb0e6de]142 this->recursion -= 1;
[f07e037]143
[97e3296]144 // If we haven't left the last level of recursion
145 // it means we don't need to do anything
[690f13c]146 if( this->recursion != 0) {
147 unlock( &this->lock );
148 return;
149 }
[f07e037]150
[97e3296]151 // Get the next thread, will be null on low contention monitor
[0c78741]152 thread_desc * new_owner = next_thread( this );
[5ea06d6]153
[97e3296]154 // We can now let other threads in safely
[cb0e6de]155 unlock( &this->lock );
[51f3798]156
[690f13c]157 //We need to wake-up the thread
[1c273d0]158 WakeThread( new_owner );
159 }
160
[97e3296]161 // Leave the thread monitor
162 // last routine called by a thread.
163 // Should never return
[1c273d0]164 void __leave_thread_monitor( thread_desc * thrd ) {
[b18830e]165 monitor_desc * this = &thrd->self_mon;
[97e3296]166
167 // Lock the monitor now
[b227f68]168 lock_yield( &this->lock DEBUG_CTX2 );
[1c273d0]169
170 disable_interrupts();
171
[b18830e]172 thrd->self_cor.state = Halted;
[1c273d0]173
174 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
175
[97e3296]176 // Leaving a recursion level, decrement the counter
[1c273d0]177 this->recursion -= 1;
178
[97e3296]179 // If we haven't left the last level of recursion
180 // it must mean there is an error
181 if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); }
[1c273d0]182
[97e3296]183 // Fetch the next thread, can be null
[1c273d0]184 thread_desc * new_owner = next_thread( this );
185
[97e3296]186 // Leave the thread, this will unlock the spinlock
187 // Use leave thread instead of BlockInternal which is
188 // specialized for this case and supports null new_owner
[f2b12406]189 LeaveThread( &this->lock, new_owner );
[97e3296]190
191 // Control flow should never reach here!
[cc7f4b1]192 }
[2781e65]193}
194
[97e3296]195// Enter multiple monitor
196// relies on the monitor array being sorted
[6ae8c92]197static inline void enter( __monitor_group_t monitors ) {
[b18830e]198 for(int i = 0; i < monitors.size; i++) {
199 __enter_monitor_desc( monitors );
[97e3296]200 }
[2781e65]201}
202
[97e3296]203// Leave multiple monitor
204// relies on the monitor array being sorted
[5ea06d6]205static inline void leave(monitor_desc ** monitors, int count) {
[0c78741]206 for(int i = count - 1; i >= 0; i--) {
207 __leave_monitor_desc( monitors[i] );
[2781e65]208 }
[5ea06d6]209}
210
[97e3296]211// Ctor for monitor guard
212// Sorts monitors before entering
[6b224a52]213void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() ) {
[97e3296]214 // Store current array
[242a902]215 this.m = m;
216 this.count = count;
[97e3296]217
218 // Sort monitors based on address -> TODO use a sort specialized for small numbers
[242a902]219 qsort(this.m, count);
[5ea06d6]220
[97e3296]221 // Save previous thread context
[b18830e]222 this.prev_mntrs = this_thread->monitors.list;
223 this.prev_count = this_thread->monitors.size;
224 this.prev_func = this_thread->monitors.func;
[5ea06d6]225
[97e3296]226 // Update thread context (needed for conditions)
[b18830e]227 this_thread->monitors.list = m;
228 this_thread->monitors.size = count;
229 this_thread->monitors.func = func;
[90c4df0]230
231 // Enter the monitors in order
[6ae8c92]232 __monitor_group_t group = {this.m, this.count, func};
[b18830e]233 enter( group );
[5ea06d6]234}
235
[6b224a52]236
[97e3296]237// Dtor for monitor guard
[242a902]238void ^?{}( monitor_guard_t & this ) {
[97e3296]239 // Leave the monitors in order
[242a902]240 leave( this.m, this.count );
[5ea06d6]241
[97e3296]242 // Restore thread context
[b18830e]243 this_thread->monitors.list = this.prev_mntrs;
244 this_thread->monitors.size = this.prev_count;
245 this_thread->monitors.func = this.prev_func;
[5ea06d6]246}
247
[97e3296]248//-----------------------------------------------------------------------------
249// Internal scheduling types
[242a902]250void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
251 this.waiting_thread = waiting_thread;
252 this.count = count;
253 this.next = NULL;
254 this.user_info = user_info;
[be3d020]255}
256
[242a902]257void ?{}(__condition_criterion_t & this ) {
258 this.ready = false;
259 this.target = NULL;
260 this.owner = NULL;
261 this.next = NULL;
[be3d020]262}
263
[242a902]264void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ) {
265 this.ready = false;
266 this.target = target;
267 this.owner = owner;
268 this.next = NULL;
[ad1a8dd]269}
270
[5ea06d6]271//-----------------------------------------------------------------------------
272// Internal scheduling
[be3d020]273void wait( condition * this, uintptr_t user_info = 0 ) {
[0c78741]274 brand_condition( this );
[5ea06d6]275
[97e3296]276 // Check that everything is as expected
[0c78741]277 assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
[4aa2fb2]278 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
279 verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
[5ea06d6]280
[97e3296]281 // Create storage for monitor context
282 monitor_ctx( this->monitors, this->monitor_count );
[0c78741]283
[97e3296]284 // Create the node specific to this wait operation
285 wait_ctx( this_thread, user_info );
[0c78741]286
[97e3296]287 // Append the current wait operation to the ones already queued on the condition
288 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
289 append( &this->blocked, &waiter );
[0c78741]290
[6ff4507]291 // Lock all monitors (aggregates the locks as well)
[97e3296]292 lock_all( monitors, locks, count );
[5ea06d6]293
[97e3296]294 // Find the next thread(s) to run
[6ff4507]295 short thread_count = 0;
[0c78741]296 thread_desc * threads[ count ];
[ad1a8dd]297 for(int i = 0; i < count; i++) {
298 threads[i] = 0;
299 }
300
[97e3296]301 // Remove any duplicate threads
[0c78741]302 for( int i = 0; i < count; i++) {
[97e3296]303 thread_desc * new_owner = next_thread( monitors[i] );
[6ff4507]304 insert_unique( threads, thread_count, new_owner );
[5ea06d6]305 }
306
[6ff4507]307 blockAndWake( threads, thread_count );
[5ea06d6]308}
309
[be3d020]310bool signal( condition * this ) {
[97e3296]311 if( is_empty( this ) ) { return false; }
[5ea06d6]312
313 //Check that everything is as expected
[4aa2fb2]314 verify( this->monitors );
315 verify( this->monitor_count != 0 );
[0c78741]316
[44264c5]317 //Some more checking in debug
[5ea06d6]318 LIB_DEBUG_DO(
[1c273d0]319 thread_desc * this_thrd = this_thread;
[b18830e]320 if ( this->monitor_count != this_thrd->monitors.size ) {
321 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
[97e3296]322 }
[0c78741]323
324 for(int i = 0; i < this->monitor_count; i++) {
[b18830e]325 if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
326 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
[97e3296]327 }
[0c78741]328 }
[5ea06d6]329 );
330
[97e3296]331 unsigned short count = this->monitor_count;
332
333 // Lock all monitors
[0c78741]334 lock_all( this->monitors, NULL, count );
335
[44264c5]336 //Pop the head of the waiting queue
[0c78741]337 __condition_node_t * node = pop_head( &this->blocked );
[44264c5]338
339 //Add the thread to the proper AS stack
[0c78741]340 for(int i = 0; i < count; i++) {
341 __condition_criterion_t * crit = &node->criteria[i];
342 assert( !crit->ready );
343 push( &crit->target->signal_stack, crit );
[5ea06d6]344 }
[0c78741]345
[44264c5]346 //Release
[0c78741]347 unlock_all( this->monitors, count );
[be3d020]348
349 return true;
[5ea06d6]350}
351
[be3d020]352bool signal_block( condition * this ) {
[97e3296]353 if( !this->blocked.head ) { return false; }
[44264c5]354
355 //Check that everything is as expected
[4aa2fb2]356 verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
357 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
[44264c5]358
[97e3296]359 // Create storage for monitor context
360 monitor_ctx( this->monitors, this->monitor_count );
[44264c5]361
[97e3296]362 // Lock all monitors (aggregates the locks them as well)
363 lock_all( monitors, locks, count );
[44264c5]364
[97e3296]365 // Create the node specific to this wait operation
366 wait_ctx_primed( this_thread, 0 )
[44264c5]367
368 //save contexts
[6ff4507]369 monitor_save;
[44264c5]370
371 //Find the thread to run
372 thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
[6ff4507]373 set_owner( monitors, count, signallee );
[44264c5]374
375 //Everything is ready to go to sleep
[82ff5845]376 BlockInternal( locks, count, &signallee, 1 );
[44264c5]377
[c81ebf9]378
[97e3296]379 // WE WOKE UP
[c81ebf9]380
381
[6ff4507]382 //We are back, restore the masks and recursions
383 monitor_restore;
[be3d020]384
385 return true;
386}
387
[97e3296]388// Access the user_info of the thread waiting at the front of the queue
[be3d020]389uintptr_t front( condition * this ) {
[2ac095d]390 verifyf( !is_empty(this),
[4aa2fb2]391 "Attempt to access user data on an empty condition.\n"
392 "Possible cause is not checking if the condition is empty before reading stored data."
[be3d020]393 );
394 return this->blocked.head->user_info;
[44264c5]395}
396
[c81ebf9]397//-----------------------------------------------------------------------------
[b18830e]398// External scheduling
399// cases to handle :
400// - target already there :
401// block and wake
402// - dtor already there
403// put thread on signaller stack
404// - non-blocking
405// return else
406// - timeout
407// return timeout
408// - block
409// setup mask
410// block
[6ae8c92]411void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
[b18830e]412 // This statment doesn't have a contiguous list of monitors...
413 // Create one!
[6ae8c92]414 short max = count_max( mask );
[b18830e]415 monitor_desc * mon_storage[max];
[6ae8c92]416 short actual_count = aggregate( mon_storage, mask );
[97e3296]417
418 // Create storage for monitor context
[b18830e]419 monitor_ctx( mon_storage, actual_count );
[c81ebf9]420
[6ff4507]421 // Lock all monitors (aggregates the locks as well)
[97e3296]422 lock_all( monitors, locks, count );
[c81ebf9]423
[b18830e]424 {
425 // Check if the entry queue
[6ae8c92]426 thread_desc * next; int index;
427 [next, index] = search_entry_queue( mask, monitors, count );
[b18830e]428
429 if( next ) {
[6ae8c92]430 if( mask.clauses[index].is_dtor ) {
[b18830e]431 #warning case not implemented
432 }
433 else {
[6ff4507]434 blockAndWake( &next, 1 );
[b18830e]435 }
436
437 return index;
[90c4df0]438 }
439 }
440
[c81ebf9]441
[b18830e]442 if( duration == 0 ) return -1;
443
444
445 verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
446
447
[6ff4507]448 monitor_save;
[6ae8c92]449 set_mask( monitors, count, mask );
[c81ebf9]450
[6ff4507]451 BlockInternal( locks, count ); // Everything is ready to go to sleep
[97e3296]452 //WE WOKE UP
[6ff4507]453 monitor_restore; //We are back, restore the masks and recursions
[c81ebf9]454}
455
[0c78741]456//-----------------------------------------------------------------------------
457// Utilities
458
459static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
460 //Pass the monitor appropriately
461 this->owner = owner;
462
463 //We are passing the monitor to someone else, which means recursion level is not 0
464 this->recursion = owner ? 1 : 0;
465}
466
[6ff4507]467static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
468 for( int i = 0; i < count; i++ ) {
469 set_owner( monitors[i], owner );
470 }
471}
472
473static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
474 for(int i = 0; i < count; i++) {
475 storage[i]->mask = mask;
476 }
477}
478
[0c78741]479static inline thread_desc * next_thread( monitor_desc * this ) {
480 //Check the signaller stack
481 __condition_criterion_t * urgent = pop( &this->signal_stack );
482 if( urgent ) {
483 //The signaller stack is not empty,
484 //regardless of if we are ready to baton pass,
485 //we need to set the monitor as in use
486 set_owner( this, urgent->owner->waiting_thread );
487
488 return check_condition( urgent );
489 }
490
491 // No signaller thread
492 // Get the next thread in the entry_queue
493 thread_desc * new_owner = pop_head( &this->entry_queue );
494 set_owner( this, new_owner );
495
496 return new_owner;
497}
498
[6ff4507]499static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
500 __acceptable_t * it = this->mask.clauses; // Optim
501 int count = this->mask.size;
502
503 // Check if there are any acceptable functions
504 if( !it ) return -1;
505
506 // If this isn't the first monitor to test this, there is no reason to repeat the test.
507 if( this != group[0] ) return group[0]->mask.accepted >= 0;
508
509 // For all acceptable functions check if this is the current function.
510 for( short i = 0; i < count; i++, it++ ) {
511 if( *it == group ) {
512 *this->mask.accepted = i;
513 return true;
514 }
515 }
516
517 // No function matched
518 return false;
519}
520
[97e3296]521static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
522 for(int i = 0; i < count; i++) {
[6b224a52]523 (criteria[i]){ monitors[i], waiter };
[97e3296]524 }
525
526 waiter->criteria = criteria;
527}
528
529static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
530 for(int i = 0; i < count; i++) {
[6b224a52]531 (criteria[i]){ monitors[i], waiter };
[97e3296]532 push( &criteria[i].target->signal_stack, &criteria[i] );
533 }
534
535 waiter->criteria = criteria;
536}
537
[0c78741]538static inline void lock_all( spinlock ** locks, unsigned short count ) {
539 for( int i = 0; i < count; i++ ) {
[b227f68]540 lock_yield( locks[i] DEBUG_CTX2 );
[0c78741]541 }
542}
543
544static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
545 for( int i = 0; i < count; i++ ) {
546 spinlock * l = &source[i]->lock;
[b227f68]547 lock_yield( l DEBUG_CTX2 );
[0c78741]548 if(locks) locks[i] = l;
549 }
550}
551
552static inline void unlock_all( spinlock ** locks, unsigned short count ) {
553 for( int i = 0; i < count; i++ ) {
554 unlock( locks[i] );
555 }
556}
557
558static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
559 for( int i = 0; i < count; i++ ) {
560 unlock( &locks[i]->lock );
561 }
562}
563
[6ff4507]564static inline void save ( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
[0c78741]565 for( int i = 0; i < count; i++ ) {
566 recursions[i] = ctx[i]->recursion;
[6ff4507]567 masks[i] = ctx[i]->mask;
[0c78741]568 }
569}
570
[6ff4507]571static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
572 lock_all( locks, count );
[0c78741]573 for( int i = 0; i < count; i++ ) {
574 ctx[i]->recursion = recursions[i];
[6ff4507]575 ctx[i]->mask = masks[i];
[0c78741]576 }
[6ff4507]577 unlock_all( locks, count );
[0c78741]578}
579
580// Function has 2 different behavior
581// 1 - Marks a monitors as being ready to run
582// 2 - Checks if all the monitors are ready to run
583// if so return the thread to run
584static inline thread_desc * check_condition( __condition_criterion_t * target ) {
585 __condition_node_t * node = target->owner;
586 unsigned short count = node->count;
587 __condition_criterion_t * criteria = node->criteria;
588
589 bool ready2run = true;
590
591 for( int i = 0; i < count; i++ ) {
[44264c5]592
[b227f68]593 // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
[0c78741]594 if( &criteria[i] == target ) {
595 criteria[i].ready = true;
[b227f68]596 // LIB_DEBUG_PRINT_SAFE( "True\n" );
[0c78741]597 }
598
599 ready2run = criteria[i].ready && ready2run;
600 }
601
[b227f68]602 // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
[0c78741]603 return ready2run ? node->waiting_thread : NULL;
604}
605
606static inline void brand_condition( condition * this ) {
[1c273d0]607 thread_desc * thrd = this_thread;
[0c78741]608 if( !this->monitors ) {
[b227f68]609 // LIB_DEBUG_PRINT_SAFE("Branding\n");
[b18830e]610 assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
611 this->monitor_count = thrd->monitors.size;
[a933dcf4]612
613 this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
614 for( int i = 0; i < this->monitor_count; i++ ) {
[b18830e]615 this->monitors[i] = thrd->monitors.list[i];
[a933dcf4]616 }
[0c78741]617 }
618}
619
[6ae8c92]620static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
[90c4df0]621
622 __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
623
624 // For each thread in the entry-queue
625 for( thread_desc ** thrd_it = &entry_queue->head;
626 *thrd_it;
627 thrd_it = &(*thrd_it)->next)
628 {
629 // For each acceptable check if it matches
[b18830e]630 int i;
[6ae8c92]631 __acceptable_t * end = mask.clauses + mask.size;
632 for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) {
[90c4df0]633 // Check if we have a match
[aaa4f93]634 if( *it == (*thrd_it)->monitors ) {
[90c4df0]635
636 // If we have a match return it
637 // after removeing it from the entry queue
[b18830e]638 return [remove( entry_queue, thrd_it ), i];
[90c4df0]639 }
640 }
641 }
642
[b18830e]643 return [0, -1];
644}
645
[6ff4507]646forall(dtype T | sized( T ))
647static inline short insert_unique( T ** array, short & size, T * val ) {
648 if( !val ) return size;
649
650 for(int i = 0; i <= size; i++) {
651 if( array[i] == val ) return size;
652 }
653
654 array[size] = val;
655 size = size + 1;
656 return size;
657}
658
[6ae8c92]659static inline short count_max( const __waitfor_mask_t & mask ) {
[b18830e]660 short max = 0;
[6ae8c92]661 for( int i = 0; i < mask.size; i++ ) {
[aaa4f93]662 max += mask.clauses[i].size;
[b18830e]663 }
664 return max;
[97e3296]665}
[b18830e]666
[6ae8c92]667static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
[6ff4507]668 short size = 0;
669 for( int i = 0; i < mask.size; i++ ) {
670 for( int j = 0; j < mask.clauses[i].size; j++) {
671 insert_unique( storage, size, mask.clauses[i].list[j] );
672 }
[b18830e]673 }
[6ff4507]674 qsort( storage, size );
675 return size;
[b18830e]676}
677
[242a902]678void ?{}( __condition_blocked_queue_t & this ) {
679 this.head = NULL;
680 this.tail = &this.head;
[0c78741]681}
682
683void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
[4aa2fb2]684 verify(this->tail != NULL);
[0c78741]685 *this->tail = c;
686 this->tail = &c->next;
687}
688
689__condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
690 __condition_node_t * head = this->head;
691 if( head ) {
692 this->head = head->next;
693 if( !head->next ) {
694 this->tail = &this->head;
695 }
696 head->next = NULL;
697 }
698 return head;
[4aa2fb2]699}
[6b0b624]700
701// Local Variables: //
702// mode: c //
703// tab-width: 4 //
704// End: //
Note: See TracBrowser for help on using the repository browser.