source: src/libcfa/concurrency/monitor.c @ 6ff4507

ADTaaron-thesisarm-ehast-experimentalcleanup-dtorsdeferred_resndemanglerenumforall-pointer-decayjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprnew-envno_listpersistent-indexerpthread-emulationqualifiedEnumresolv-newwith_gc
Last change on this file since 6ff4507 was 6ff4507, checked in by Thierry Delisle <tdelisle@…>, 8 years ago

Disable Werror since new warnings appeared
Aesthetic refactoring in monitor.c
Monitors are now properly aggregated in waitfor
Monitors masks are now always saved and restore (TODO check if this is too much)
Insert_unique is now generic

  • Property mode set to 100644
File size: 22.7 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// monitor_desc.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Thd Feb 23 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Jul 31 14:59:05 2017
13// Update Count     : 3
14//
15
16#include "monitor"
17
18#include <stdlib>
19
20#include "libhdr.h"
21#include "kernel_private.h"
22
23//-----------------------------------------------------------------------------
24// Forward declarations
25static inline void set_owner( monitor_desc * this, thread_desc * owner );
26static inline void set_owner( monitor_desc ** storage, short count, thread_desc * owner );
27static inline void set_mask ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
28
29static inline thread_desc * next_thread( monitor_desc * this );
30static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
31
32static inline void lock_all( spinlock ** locks, unsigned short count );
33static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
34static inline void unlock_all( spinlock ** locks, unsigned short count );
35static inline void unlock_all( monitor_desc ** locks, unsigned short count );
36
37static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
38static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
39
40static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
41static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
42
43static inline thread_desc *        check_condition   ( __condition_criterion_t * );
44static inline void                 brand_condition   ( condition * );
45static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
46
47forall(dtype T | sized( T ))
48static inline short insert_unique( T ** array, short & size, T * val );
49static inline short count_max    ( const __waitfor_mask_t & mask );
50static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
51
52//-----------------------------------------------------------------------------
53// Useful defines
54#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
55        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
56        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
57        init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
58
59#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
60        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
61        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
62        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
63
64#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
65        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
66        unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
67        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
68        __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
69        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
70
71#define monitor_save    save   ( monitors, count, locks, recursions, masks )
72#define monitor_restore restore( monitors, count, locks, recursions, masks )
73
74#define blockAndWake( thrd, cnt )                               /* Create the necessary information to use the signaller stack                         */ \
75        monitor_save;                                             /* Save monitor states                                                                 */ \
76        BlockInternal( locks, count, thrd, cnt );                 /* Everything is ready to go to sleep                                                  */ \
77        monitor_restore;                                          /* We are back, restore the owners and recursions                                      */ \
78
79
80//-----------------------------------------------------------------------------
81// Enter/Leave routines
82
83
84extern "C" {
85        // Enter single monitor
86        static void __enter_monitor_desc( const __monitor_group_t & group ) {
87                monitor_desc * this = group.list[0];
88
89                // Lock the monitor spinlock, lock_yield to reduce contention
90                lock_yield( &this->lock DEBUG_CTX2 );
91                thread_desc * thrd = this_thread;
92
93                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
94
95                if( !this->owner ) {
96                        // No one has the monitor, just take it
97                        set_owner( this, thrd );
98
99                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon is free \n");
100                }
101                else if( this->owner == thrd) {
102                        // We already have the monitor, just not how many times we took it
103                        verify( this->recursion > 0 );
104                        this->recursion += 1;
105
106                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon already owned \n");
107                }
108                else if( is_accepted( this, group) ) {
109                        // Some one was waiting for us, enter
110                        set_owner( this, thrd );
111
112                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts \n");
113                }
114                else {
115                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
116
117                        // Some one else has the monitor, wait in line for it
118                        append( &this->entry_queue, thrd );
119                        BlockInternal( &this->lock );
120
121                        LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
122
123                        // BlockInternal will unlock spinlock, no need to unlock ourselves
124                        return;
125                }
126
127                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
128
129                // Release the lock and leave
130                unlock( &this->lock );
131                return;
132        }
133
134        // Leave single monitor
135        void __leave_monitor_desc( monitor_desc * this ) {
136                // Lock the monitor spinlock, lock_yield to reduce contention
137                lock_yield( &this->lock DEBUG_CTX2 );
138
139                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
140
141                // Leaving a recursion level, decrement the counter
142                this->recursion -= 1;
143
144                // If we haven't left the last level of recursion
145                // it means we don't need to do anything
146                if( this->recursion != 0) {
147                        unlock( &this->lock );
148                        return;
149                }
150
151                // Get the next thread, will be null on low contention monitor
152                thread_desc * new_owner = next_thread( this );
153
154                // We can now let other threads in safely
155                unlock( &this->lock );
156
157                //We need to wake-up the thread
158                WakeThread( new_owner );
159        }
160
161        // Leave the thread monitor
162        // last routine called by a thread.
163        // Should never return
164        void __leave_thread_monitor( thread_desc * thrd ) {
165                monitor_desc * this = &thrd->self_mon;
166
167                // Lock the monitor now
168                lock_yield( &this->lock DEBUG_CTX2 );
169
170                disable_interrupts();
171
172                thrd->self_cor.state = Halted;
173
174                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
175
176                // Leaving a recursion level, decrement the counter
177                this->recursion -= 1;
178
179                // If we haven't left the last level of recursion
180                // it must mean there is an error
181                if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); }
182
183                // Fetch the next thread, can be null
184                thread_desc * new_owner = next_thread( this );
185
186                // Leave the thread, this will unlock the spinlock
187                // Use leave thread instead of BlockInternal which is
188                // specialized for this case and supports null new_owner
189                LeaveThread( &this->lock, new_owner );
190
191                // Control flow should never reach here!
192        }
193}
194
195// Enter multiple monitor
196// relies on the monitor array being sorted
197static inline void enter( __monitor_group_t monitors ) {
198        for(int i = 0; i < monitors.size; i++) {
199                __enter_monitor_desc( monitors );
200        }
201}
202
203// Leave multiple monitor
204// relies on the monitor array being sorted
205static inline void leave(monitor_desc ** monitors, int count) {
206        for(int i = count - 1; i >= 0; i--) {
207                __leave_monitor_desc( monitors[i] );
208        }
209}
210
211// Ctor for monitor guard
212// Sorts monitors before entering
213void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, void (*func)() ) {
214        // Store current array
215        this.m = m;
216        this.count = count;
217
218        // Sort monitors based on address -> TODO use a sort specialized for small numbers
219        qsort(this.m, count);
220
221        // Save previous thread context
222        this.prev_mntrs = this_thread->monitors.list;
223        this.prev_count = this_thread->monitors.size;
224        this.prev_func  = this_thread->monitors.func;
225
226        // Update thread context (needed for conditions)
227        this_thread->monitors.list = m;
228        this_thread->monitors.size = count;
229        this_thread->monitors.func = func;
230
231        // Enter the monitors in order
232        __monitor_group_t group = {this.m, this.count, func};
233        enter( group );
234}
235
236
237// Dtor for monitor guard
238void ^?{}( monitor_guard_t & this ) {
239        // Leave the monitors in order
240        leave( this.m, this.count );
241
242        // Restore thread context
243        this_thread->monitors.list = this.prev_mntrs;
244        this_thread->monitors.size = this.prev_count;
245        this_thread->monitors.func = this.prev_func;
246}
247
248//-----------------------------------------------------------------------------
249// Internal scheduling types
250void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
251        this.waiting_thread = waiting_thread;
252        this.count = count;
253        this.next = NULL;
254        this.user_info = user_info;
255}
256
257void ?{}(__condition_criterion_t & this ) {
258        this.ready  = false;
259        this.target = NULL;
260        this.owner  = NULL;
261        this.next   = NULL;
262}
263
264void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ) {
265        this.ready  = false;
266        this.target = target;
267        this.owner  = owner;
268        this.next   = NULL;
269}
270
271//-----------------------------------------------------------------------------
272// Internal scheduling
273void wait( condition * this, uintptr_t user_info = 0 ) {
274        brand_condition( this );
275
276        // Check that everything is as expected
277        assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
278        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
279        verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
280
281        // Create storage for monitor context
282        monitor_ctx( this->monitors, this->monitor_count );
283
284        // Create the node specific to this wait operation
285        wait_ctx( this_thread, user_info );
286
287        // Append the current wait operation to the ones already queued on the condition
288        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
289        append( &this->blocked, &waiter );
290
291        // Lock all monitors (aggregates the locks as well)
292        lock_all( monitors, locks, count );
293
294        // Find the next thread(s) to run
295        short thread_count = 0;
296        thread_desc * threads[ count ];
297        for(int i = 0; i < count; i++) {
298                threads[i] = 0;
299        }
300
301        // Remove any duplicate threads
302        for( int i = 0; i < count; i++) {
303                thread_desc * new_owner = next_thread( monitors[i] );
304                insert_unique( threads, thread_count, new_owner );
305        }
306
307        blockAndWake( threads, thread_count );
308}
309
310bool signal( condition * this ) {
311        if( is_empty( this ) ) { return false; }
312
313        //Check that everything is as expected
314        verify( this->monitors );
315        verify( this->monitor_count != 0 );
316
317        //Some more checking in debug
318        LIB_DEBUG_DO(
319                thread_desc * this_thrd = this_thread;
320                if ( this->monitor_count != this_thrd->monitors.size ) {
321                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
322                }
323
324                for(int i = 0; i < this->monitor_count; i++) {
325                        if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
326                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
327                        }
328                }
329        );
330
331        unsigned short count = this->monitor_count;
332
333        // Lock all monitors
334        lock_all( this->monitors, NULL, count );
335
336        //Pop the head of the waiting queue
337        __condition_node_t * node = pop_head( &this->blocked );
338
339        //Add the thread to the proper AS stack
340        for(int i = 0; i < count; i++) {
341                __condition_criterion_t * crit = &node->criteria[i];
342                assert( !crit->ready );
343                push( &crit->target->signal_stack, crit );
344        }
345
346        //Release
347        unlock_all( this->monitors, count );
348
349        return true;
350}
351
352bool signal_block( condition * this ) {
353        if( !this->blocked.head ) { return false; }
354
355        //Check that everything is as expected
356        verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
357        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
358
359        // Create storage for monitor context
360        monitor_ctx( this->monitors, this->monitor_count );
361
362        // Lock all monitors (aggregates the locks them as well)
363        lock_all( monitors, locks, count );
364
365        // Create the node specific to this wait operation
366        wait_ctx_primed( this_thread, 0 )
367
368        //save contexts
369        monitor_save;
370
371        //Find the thread to run
372        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
373        set_owner( monitors, count, signallee );
374
375        //Everything is ready to go to sleep
376        BlockInternal( locks, count, &signallee, 1 );
377
378
379        // WE WOKE UP
380
381
382        //We are back, restore the masks and recursions
383        monitor_restore;
384
385        return true;
386}
387
388// Access the user_info of the thread waiting at the front of the queue
389uintptr_t front( condition * this ) {
390        verifyf( !is_empty(this),
391                "Attempt to access user data on an empty condition.\n"
392                "Possible cause is not checking if the condition is empty before reading stored data."
393        );
394        return this->blocked.head->user_info;
395}
396
397//-----------------------------------------------------------------------------
398// External scheduling
399// cases to handle :
400//      - target already there :
401//              block and wake
402//      - dtor already there
403//              put thread on signaller stack
404//      - non-blocking
405//              return else
406//      - timeout
407//              return timeout
408//      - block
409//              setup mask
410//              block
411void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
412        // This statment doesn't have a contiguous list of monitors...
413        // Create one!
414        short max = count_max( mask );
415        monitor_desc * mon_storage[max];
416        short actual_count = aggregate( mon_storage, mask );
417
418        // Create storage for monitor context
419        monitor_ctx( mon_storage, actual_count );
420
421        // Lock all monitors (aggregates the locks as well)
422        lock_all( monitors, locks, count );
423
424        {
425                // Check if the entry queue
426                thread_desc * next; int index;
427                [next, index] = search_entry_queue( mask, monitors, count );
428
429                if( next ) {
430                        if( mask.clauses[index].is_dtor ) {
431                                #warning case not implemented
432                        }
433                        else {
434                                blockAndWake( &next, 1 );
435                        }
436
437                        return index;
438                }
439        }
440
441
442        if( duration == 0 ) return -1;
443
444
445        verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
446
447
448        monitor_save;
449        set_mask( monitors, count, mask );
450
451        BlockInternal( locks, count );       // Everything is ready to go to sleep
452        //WE WOKE UP
453        monitor_restore;                     //We are back, restore the masks and recursions
454}
455
456//-----------------------------------------------------------------------------
457// Utilities
458
459static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
460        //Pass the monitor appropriately
461        this->owner = owner;
462
463        //We are passing the monitor to someone else, which means recursion level is not 0
464        this->recursion = owner ? 1 : 0;
465}
466
467static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
468        for( int i = 0; i < count; i++ ) {
469                set_owner( monitors[i], owner );
470        }
471}
472
473static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
474        for(int i = 0; i < count; i++) {
475                storage[i]->mask = mask;
476        }
477}
478
479static inline thread_desc * next_thread( monitor_desc * this ) {
480        //Check the signaller stack
481        __condition_criterion_t * urgent = pop( &this->signal_stack );
482        if( urgent ) {
483                //The signaller stack is not empty,
484                //regardless of if we are ready to baton pass,
485                //we need to set the monitor as in use
486                set_owner( this,  urgent->owner->waiting_thread );
487
488                return check_condition( urgent );
489        }
490
491        // No signaller thread
492        // Get the next thread in the entry_queue
493        thread_desc * new_owner = pop_head( &this->entry_queue );
494        set_owner( this, new_owner );
495
496        return new_owner;
497}
498
499static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
500        __acceptable_t * it = this->mask.clauses; // Optim
501        int count = this->mask.size;
502
503        // Check if there are any acceptable functions
504        if( !it ) return -1;
505
506        // If this isn't the first monitor to test this, there is no reason to repeat the test.
507        if( this != group[0] ) return group[0]->mask.accepted >= 0;
508
509        // For all acceptable functions check if this is the current function.
510        for( short i = 0; i < count; i++, it++ ) {
511                if( *it == group ) {
512                        *this->mask.accepted = i;
513                        return true;
514                }
515        }
516
517        // No function matched
518        return false;
519}
520
521static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
522        for(int i = 0; i < count; i++) {
523                (criteria[i]){ monitors[i], waiter };
524        }
525
526        waiter->criteria = criteria;
527}
528
529static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
530        for(int i = 0; i < count; i++) {
531                (criteria[i]){ monitors[i], waiter };
532                push( &criteria[i].target->signal_stack, &criteria[i] );
533        }
534
535        waiter->criteria = criteria;
536}
537
538static inline void lock_all( spinlock ** locks, unsigned short count ) {
539        for( int i = 0; i < count; i++ ) {
540                lock_yield( locks[i] DEBUG_CTX2 );
541        }
542}
543
544static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
545        for( int i = 0; i < count; i++ ) {
546                spinlock * l = &source[i]->lock;
547                lock_yield( l DEBUG_CTX2 );
548                if(locks) locks[i] = l;
549        }
550}
551
552static inline void unlock_all( spinlock ** locks, unsigned short count ) {
553        for( int i = 0; i < count; i++ ) {
554                unlock( locks[i] );
555        }
556}
557
558static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
559        for( int i = 0; i < count; i++ ) {
560                unlock( &locks[i]->lock );
561        }
562}
563
564static inline void save   ( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
565        for( int i = 0; i < count; i++ ) {
566                recursions[i] = ctx[i]->recursion;
567                masks[i]      = ctx[i]->mask;
568        }
569}
570
571static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
572        lock_all( locks, count );
573        for( int i = 0; i < count; i++ ) {
574                ctx[i]->recursion = recursions[i];
575                ctx[i]->mask      = masks[i];
576        }
577        unlock_all( locks, count );
578}
579
580// Function has 2 different behavior
581// 1 - Marks a monitors as being ready to run
582// 2 - Checks if all the monitors are ready to run
583//     if so return the thread to run
584static inline thread_desc * check_condition( __condition_criterion_t * target ) {
585        __condition_node_t * node = target->owner;
586        unsigned short count = node->count;
587        __condition_criterion_t * criteria = node->criteria;
588
589        bool ready2run = true;
590
591        for(    int i = 0; i < count; i++ ) {
592
593                // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
594                if( &criteria[i] == target ) {
595                        criteria[i].ready = true;
596                        // LIB_DEBUG_PRINT_SAFE( "True\n" );
597                }
598
599                ready2run = criteria[i].ready && ready2run;
600        }
601
602        // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
603        return ready2run ? node->waiting_thread : NULL;
604}
605
606static inline void brand_condition( condition * this ) {
607        thread_desc * thrd = this_thread;
608        if( !this->monitors ) {
609                // LIB_DEBUG_PRINT_SAFE("Branding\n");
610                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
611                this->monitor_count = thrd->monitors.size;
612
613                this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
614                for( int i = 0; i < this->monitor_count; i++ ) {
615                        this->monitors[i] = thrd->monitors.list[i];
616                }
617        }
618}
619
620static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
621
622        __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
623
624        // For each thread in the entry-queue
625        for(    thread_desc ** thrd_it = &entry_queue->head;
626                *thrd_it;
627                thrd_it = &(*thrd_it)->next)
628        {
629                // For each acceptable check if it matches
630                int i;
631                __acceptable_t * end = mask.clauses + mask.size;
632                for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) {
633                        // Check if we have a match
634                        if( *it == (*thrd_it)->monitors ) {
635
636                                // If we have a match return it
637                                // after removeing it from the entry queue
638                                return [remove( entry_queue, thrd_it ), i];
639                        }
640                }
641        }
642
643        return [0, -1];
644}
645
646forall(dtype T | sized( T ))
647static inline short insert_unique( T ** array, short & size, T * val ) {
648        if( !val ) return size;
649
650        for(int i = 0; i <= size; i++) {
651                if( array[i] == val ) return size;
652        }
653
654        array[size] = val;
655        size = size + 1;
656        return size;
657}
658
659static inline short count_max( const __waitfor_mask_t & mask ) {
660        short max = 0;
661        for( int i = 0; i < mask.size; i++ ) {
662                max += mask.clauses[i].size;
663        }
664        return max;
665}
666
667static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
668        short size = 0;
669        for( int i = 0; i < mask.size; i++ ) {
670                for( int j = 0; j < mask.clauses[i].size; j++) {
671                        insert_unique( storage, size, mask.clauses[i].list[j] );
672                }
673        }
674        qsort( storage, size );
675        return size;
676}
677
678void ?{}( __condition_blocked_queue_t & this ) {
679        this.head = NULL;
680        this.tail = &this.head;
681}
682
683void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
684        verify(this->tail != NULL);
685        *this->tail = c;
686        this->tail = &c->next;
687}
688
689__condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
690        __condition_node_t * head = this->head;
691        if( head ) {
692                this->head = head->next;
693                if( !head->next ) {
694                        this->tail = &this->head;
695                }
696                head->next = NULL;
697        }
698        return head;
699}
700
701// Local Variables: //
702// mode: c //
703// tab-width: 4 //
704// End: //
Note: See TracBrowser for help on using the repository browser.