source: src/libcfa/concurrency/monitor.c @ 6a5be52

aaron-thesisarm-ehcleanup-dtorsdeferred_resndemanglerjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprnew-envno_listpersistent-indexerresolv-newwith_gc
Last change on this file since 6a5be52 was 6a5be52, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Modified waitfor to sort monitors

  • Property mode set to 100644
File size: 28.6 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// monitor_desc.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Thd Feb 23 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Jul 31 14:59:05 2017
13// Update Count     : 3
14//
15
16#include "monitor"
17
18#include <stdlib>
19
20#include "libhdr.h"
21#include "kernel_private.h"
22
23//-----------------------------------------------------------------------------
24// Forward declarations
25static inline void set_owner ( monitor_desc * this, thread_desc * owner );
26static inline void set_owner ( monitor_desc ** storage, short count, thread_desc * owner );
27static inline void set_mask  ( monitor_desc ** storage, short count, const __waitfor_mask_t & mask );
28static inline void reset_mask( monitor_desc * this );
29
30static inline thread_desc * next_thread( monitor_desc * this );
31static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
32
33static inline void lock_all( spinlock ** locks, unsigned short count );
34static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
35static inline void unlock_all( spinlock ** locks, unsigned short count );
36static inline void unlock_all( monitor_desc ** locks, unsigned short count );
37
38static inline void save   ( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks );
39static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*in */ recursions, __waitfor_mask_t * /*in */ masks );
40
41static inline void init     ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
42static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria );
43
44static inline thread_desc *        check_condition   ( __condition_criterion_t * );
45static inline void                 brand_condition   ( condition * );
46static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc ** monitors, int count );
47
48forall(dtype T | sized( T ))
49static inline short insert_unique( T ** array, short & size, T * val );
50static inline short count_max    ( const __waitfor_mask_t & mask );
51static inline short aggregate    ( monitor_desc ** storage, const __waitfor_mask_t & mask );
52
53//-----------------------------------------------------------------------------
54// Useful defines
55#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
56        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
57        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
58        init( count, monitors, &waiter, criteria );               /* Link everything together                                                            */ \
59
60#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
61        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
62        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
63        init_push( count, monitors, &waiter, criteria );          /* Link everything together and push it to the AS-Stack                                */ \
64
65#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
66        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
67        unsigned short count = cnt;                               /* Save the count to a local variable                                                  */ \
68        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
69        __waitfor_mask_t masks[ count ];                          /* Save the current waitfor masks to restore them later                                */ \
70        spinlock *   locks     [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
71
72#define monitor_save    save   ( monitors, count, locks, recursions, masks )
73#define monitor_restore restore( monitors, count, locks, recursions, masks )
74
75
76//-----------------------------------------------------------------------------
77// Enter/Leave routines
78
79
80extern "C" {
81        // Enter single monitor
82        static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
83                // Lock the monitor spinlock, lock_yield to reduce contention
84                lock_yield( &this->lock DEBUG_CTX2 );
85                thread_desc * thrd = this_thread;
86
87                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
88
89                if( !this->owner ) {
90                        // No one has the monitor, just take it
91                        set_owner( this, thrd );
92
93                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon is free \n");
94                }
95                else if( this->owner == thrd) {
96                        // We already have the monitor, just note how many times we took it
97                        this->recursion += 1;
98
99                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon already owned \n");
100                }
101                else if( is_accepted( this, group) ) {
102                        // Some one was waiting for us, enter
103                        set_owner( this, thrd );
104
105                        // Reset mask
106                        reset_mask( this );
107
108                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts \n");
109                }
110                else {
111                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
112
113                        // Some one else has the monitor, wait in line for it
114                        append( &this->entry_queue, thrd );
115                        BlockInternal( &this->lock );
116
117                        LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
118
119                        // BlockInternal will unlock spinlock, no need to unlock ourselves
120                        return;
121                }
122
123                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
124
125                // Release the lock and leave
126                unlock( &this->lock );
127                return;
128        }
129
130        static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
131                // Lock the monitor spinlock, lock_yield to reduce contention
132                lock_yield( &this->lock DEBUG_CTX2 );
133                thread_desc * thrd = this_thread;
134
135                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
136
137
138                if( !this->owner ) {
139                        LIB_DEBUG_PRINT_SAFE("Kernel : Destroying free mon %p\n", this);
140
141                        // No one has the monitor, just take it
142                        set_owner( this, thrd );
143
144                        unlock( &this->lock );
145                        return;
146                }
147                else if( this->owner == thrd) {
148                        // We already have the monitor... but where about to destroy it so the nesting will fail
149                        // Abort!
150                        abortf("Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.");
151                }
152
153                int count = 1;
154                monitor_desc ** monitors = &this;
155                __monitor_group_t group = { &this, 1, func };
156                if( is_accepted( this, group) ) {
157                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts dtor, block and signal it \n");
158
159                        // Wake the thread that is waiting for this
160                        __condition_criterion_t * urgent = pop( &this->signal_stack );
161                        verify( urgent );
162
163                        // Reset mask
164                        reset_mask( this );
165
166                        // Create the node specific to this wait operation
167                        wait_ctx_primed( this_thread, 0 )
168
169                        // Some one else has the monitor, wait for him to finish and then run
170                        BlockInternal( &this->lock, urgent->owner->waiting_thread );
171
172                        // Some one was waiting for us, enter
173                        set_owner( this, thrd );
174                }
175                else {
176                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
177
178                        wait_ctx( this_thread, 0 )
179                        this->dtor_node = &waiter;
180
181                        // Some one else has the monitor, wait in line for it
182                        append( &this->entry_queue, thrd );
183                        BlockInternal( &this->lock );
184
185                        // BlockInternal will unlock spinlock, no need to unlock ourselves
186                        return;
187                }
188
189                LIB_DEBUG_PRINT_SAFE("Kernel : Destroying %p\n", this);
190
191        }
192
193        // Leave single monitor
194        void __leave_monitor_desc( monitor_desc * this ) {
195                // Lock the monitor spinlock, lock_yield to reduce contention
196                lock_yield( &this->lock DEBUG_CTX2 );
197
198                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
199
200                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this );
201
202                // Leaving a recursion level, decrement the counter
203                this->recursion -= 1;
204
205                // If we haven't left the last level of recursion
206                // it means we don't need to do anything
207                if( this->recursion != 0) {
208                        LIB_DEBUG_PRINT_SAFE("Kernel :  recursion still %d\n", this->recursion);
209                        unlock( &this->lock );
210                        return;
211                }
212
213                // Get the next thread, will be null on low contention monitor
214                thread_desc * new_owner = next_thread( this );
215
216                // We can now let other threads in safely
217                unlock( &this->lock );
218
219                //We need to wake-up the thread
220                WakeThread( new_owner );
221        }
222
223        // Leave single monitor for the last time
224        void __leave_dtor_monitor_desc( monitor_desc * this ) {
225                LIB_DEBUG_DO(
226                        if( this_thread != this->owner ) {
227                                abortf("Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner);
228                        }
229                        if( this->recursion != 1 ) {
230                                abortf("Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
231                        }
232                )
233        }
234
235        // Leave the thread monitor
236        // last routine called by a thread.
237        // Should never return
238        void __leave_thread_monitor( thread_desc * thrd ) {
239                monitor_desc * this = &thrd->self_mon;
240
241                // Lock the monitor now
242                lock_yield( &this->lock DEBUG_CTX2 );
243
244                disable_interrupts();
245
246                thrd->self_cor.state = Halted;
247
248                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
249
250                // Leaving a recursion level, decrement the counter
251                this->recursion -= 1;
252
253                // If we haven't left the last level of recursion
254                // it must mean there is an error
255                if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); }
256
257                // Fetch the next thread, can be null
258                thread_desc * new_owner = next_thread( this );
259
260                // Leave the thread, this will unlock the spinlock
261                // Use leave thread instead of BlockInternal which is
262                // specialized for this case and supports null new_owner
263                LeaveThread( &this->lock, new_owner );
264
265                // Control flow should never reach here!
266        }
267}
268
269// Enter multiple monitor
270// relies on the monitor array being sorted
271static inline void enter( __monitor_group_t monitors ) {
272        for(int i = 0; i < monitors.size; i++) {
273                __enter_monitor_desc( monitors.list[i], monitors );
274        }
275}
276
277// Leave multiple monitor
278// relies on the monitor array being sorted
279static inline void leave(monitor_desc ** monitors, int count) {
280        for(int i = count - 1; i >= 0; i--) {
281                __leave_monitor_desc( monitors[i] );
282        }
283}
284
285// Ctor for monitor guard
286// Sorts monitors before entering
287void ?{}( monitor_guard_t & this, monitor_desc ** m, int count, fptr_t func ) {
288        // Store current array
289        this.m = m;
290        this.count = count;
291
292        // Sort monitors based on address -> TODO use a sort specialized for small numbers
293        qsort(this.m, count);
294
295        // Save previous thread context
296        this.prev_mntrs = this_thread->monitors.list;
297        this.prev_count = this_thread->monitors.size;
298        this.prev_func  = this_thread->monitors.func;
299
300        // Update thread context (needed for conditions)
301        this_thread->monitors.list = m;
302        this_thread->monitors.size = count;
303        this_thread->monitors.func = func;
304
305        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
306
307        // Enter the monitors in order
308        __monitor_group_t group = {this.m, this.count, func};
309        enter( group );
310
311        // LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");
312}
313
314
315// Dtor for monitor guard
316void ^?{}( monitor_guard_t & this ) {
317        // LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);
318
319        // Leave the monitors in order
320        leave( this.m, this.count );
321
322        // LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
323
324        // Restore thread context
325        this_thread->monitors.list = this.prev_mntrs;
326        this_thread->monitors.size = this.prev_count;
327        this_thread->monitors.func = this.prev_func;
328}
329
330
331// Ctor for monitor guard
332// Sorts monitors before entering
333void ?{}( monitor_dtor_guard_t & this, monitor_desc ** m, fptr_t func ) {
334        // Store current array
335        this.m = *m;
336
337        // Save previous thread context
338        this.prev_mntrs = this_thread->monitors.list;
339        this.prev_count = this_thread->monitors.size;
340        this.prev_func  = this_thread->monitors.func;
341
342        // Update thread context (needed for conditions)
343        this_thread->monitors.list = m;
344        this_thread->monitors.size = 1;
345        this_thread->monitors.func = func;
346
347        __enter_monitor_dtor( this.m, func );
348}
349
350
351// Dtor for monitor guard
352void ^?{}( monitor_dtor_guard_t & this ) {
353        // Leave the monitors in order
354        __leave_dtor_monitor_desc( this.m );
355
356        // Restore thread context
357        this_thread->monitors.list = this.prev_mntrs;
358        this_thread->monitors.size = this.prev_count;
359        this_thread->monitors.func = this.prev_func;
360}
361
362//-----------------------------------------------------------------------------
363// Internal scheduling types
364void ?{}(__condition_node_t & this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
365        this.waiting_thread = waiting_thread;
366        this.count = count;
367        this.next = NULL;
368        this.user_info = user_info;
369}
370
371void ?{}(__condition_criterion_t & this ) {
372        this.ready  = false;
373        this.target = NULL;
374        this.owner  = NULL;
375        this.next   = NULL;
376}
377
378void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t * owner ) {
379        this.ready  = false;
380        this.target = target;
381        this.owner  = owner;
382        this.next   = NULL;
383}
384
385//-----------------------------------------------------------------------------
386// Internal scheduling
387void wait( condition * this, uintptr_t user_info = 0 ) {
388        brand_condition( this );
389
390        // Check that everything is as expected
391        assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
392        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
393        verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
394
395        // Create storage for monitor context
396        monitor_ctx( this->monitors, this->monitor_count );
397
398        // Create the node specific to this wait operation
399        wait_ctx( this_thread, user_info );
400
401        // Append the current wait operation to the ones already queued on the condition
402        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
403        append( &this->blocked, &waiter );
404
405        // Lock all monitors (aggregates the locks as well)
406        lock_all( monitors, locks, count );
407
408        // Find the next thread(s) to run
409        short thread_count = 0;
410        thread_desc * threads[ count ];
411        __builtin_memset( threads, 0, sizeof( threads ) );
412
413        // Save monitor states
414        monitor_save;
415
416        // Remove any duplicate threads
417        for( int i = 0; i < count; i++) {
418                thread_desc * new_owner = next_thread( monitors[i] );
419                insert_unique( threads, thread_count, new_owner );
420        }
421
422        // Everything is ready to go to sleep
423        BlockInternal( locks, count, threads, thread_count );
424
425        // We are back, restore the owners and recursions
426        monitor_restore;
427}
428
429bool signal( condition * this ) {
430        if( is_empty( this ) ) { return false; }
431
432        //Check that everything is as expected
433        verify( this->monitors );
434        verify( this->monitor_count != 0 );
435
436        //Some more checking in debug
437        LIB_DEBUG_DO(
438                thread_desc * this_thrd = this_thread;
439                if ( this->monitor_count != this_thrd->monitors.size ) {
440                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->monitors.size );
441                }
442
443                for(int i = 0; i < this->monitor_count; i++) {
444                        if ( this->monitors[i] != this_thrd->monitors.list[i] ) {
445                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->monitors.list[i] );
446                        }
447                }
448        );
449
450        unsigned short count = this->monitor_count;
451
452        // Lock all monitors
453        lock_all( this->monitors, NULL, count );
454
455        //Pop the head of the waiting queue
456        __condition_node_t * node = pop_head( &this->blocked );
457
458        //Add the thread to the proper AS stack
459        for(int i = 0; i < count; i++) {
460                __condition_criterion_t * crit = &node->criteria[i];
461                assert( !crit->ready );
462                push( &crit->target->signal_stack, crit );
463        }
464
465        //Release
466        unlock_all( this->monitors, count );
467
468        return true;
469}
470
471bool signal_block( condition * this ) {
472        if( !this->blocked.head ) { return false; }
473
474        //Check that everything is as expected
475        verifyf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
476        verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
477
478        // Create storage for monitor context
479        monitor_ctx( this->monitors, this->monitor_count );
480
481        // Lock all monitors (aggregates the locks them as well)
482        lock_all( monitors, locks, count );
483
484        // Create the node specific to this wait operation
485        wait_ctx_primed( this_thread, 0 )
486
487        //save contexts
488        monitor_save;
489
490        //Find the thread to run
491        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
492        set_owner( monitors, count, signallee );
493
494        //Everything is ready to go to sleep
495        BlockInternal( locks, count, &signallee, 1 );
496
497
498        // WE WOKE UP
499
500
501        //We are back, restore the masks and recursions
502        monitor_restore;
503
504        return true;
505}
506
507// Access the user_info of the thread waiting at the front of the queue
508uintptr_t front( condition * this ) {
509        verifyf( !is_empty(this),
510                "Attempt to access user data on an empty condition.\n"
511                "Possible cause is not checking if the condition is empty before reading stored data."
512        );
513        return this->blocked.head->user_info;
514}
515
516//-----------------------------------------------------------------------------
517// External scheduling
518// cases to handle :
519//      - target already there :
520//              block and wake
521//      - dtor already there
522//              put thread on signaller stack
523//      - non-blocking
524//              return else
525//      - timeout
526//              return timeout
527//      - block
528//              setup mask
529//              block
530void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
531        // This statment doesn't have a contiguous list of monitors...
532        // Create one!
533        short max = count_max( mask );
534        monitor_desc * mon_storage[max];
535        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
536        short actual_count = aggregate( mon_storage, mask );
537
538        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (short)max);
539
540        if(actual_count == 0) return;
541
542        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : waitfor internal proceeding\n");
543
544        // Create storage for monitor context
545        monitor_ctx( mon_storage, actual_count );
546
547        // Lock all monitors (aggregates the locks as well)
548        lock_all( monitors, locks, count );
549
550        {
551                // Check if the entry queue
552                thread_desc * next; int index;
553                [next, index] = search_entry_queue( mask, monitors, count );
554
555                if( next ) {
556                        *mask.accepted = index;
557                        if( mask.clauses[index].is_dtor ) {
558                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : dtor already there\n");
559                                verifyf( mask.clauses[index].size == 1        , "ERROR: Accepted dtor has more than 1 mutex parameter." );
560
561                                monitor_desc * mon2dtor = mask.clauses[index].list[0];
562                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
563
564                                __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
565                                push( &mon2dtor->signal_stack, dtor_crit );
566
567                                unlock_all( locks, count );
568                        }
569                        else {
570                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, baton-passing\n");
571
572                                // Create the node specific to this wait operation
573                                wait_ctx_primed( this_thread, 0 );
574
575                                // Save monitor states
576                                monitor_save;
577
578                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel :  baton of %d monitors : ", count );
579                                #ifdef __CFA_DEBUG_PRINT__
580                                        for( int i = 0; i < count; i++) {
581                                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "%p %p ", monitors[i], monitors[i]->signal_stack.top );
582                                        }
583                                #endif
584                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "\n");
585
586                                // Set the owners to be the next thread
587                                set_owner( monitors, count, next );
588
589                                // Everything is ready to go to sleep
590                                BlockInternal( locks, count, &next, 1 );
591
592                                // We are back, restore the owners and recursions
593                                monitor_restore;
594
595                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, returned\n");
596                        }
597
598                        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
599
600                        return;
601                }
602        }
603
604
605        if( duration == 0 ) {
606                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : non-blocking, exiting\n");
607
608                unlock_all( locks, count );
609
610                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
611                return;
612        }
613
614
615        verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
616
617        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : blocking waitfor\n");
618
619        // Create the node specific to this wait operation
620        wait_ctx_primed( this_thread, 0 );
621
622        monitor_save;
623        set_mask( monitors, count, mask );
624
625        for(int i = 0; i < count; i++) {
626                verify( monitors[i]->owner == this_thread );
627        }
628
629        //Everything is ready to go to sleep
630        BlockInternal( locks, count );
631
632
633        // WE WOKE UP
634
635
636        //We are back, restore the masks and recursions
637        monitor_restore;
638
639        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : exiting\n");
640
641        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
642}
643
644//-----------------------------------------------------------------------------
645// Utilities
646
647static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
648        // LIB_DEBUG_PRINT_SAFE("Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
649
650        //Pass the monitor appropriately
651        this->owner = owner;
652
653        //We are passing the monitor to someone else, which means recursion level is not 0
654        this->recursion = owner ? 1 : 0;
655}
656
657static inline void set_owner( monitor_desc ** monitors, short count, thread_desc * owner ) {
658        monitors[0]->owner     = owner;
659        monitors[0]->recursion = 1;
660        for( int i = 1; i < count; i++ ) {
661                monitors[i]->owner     = owner;
662                monitors[i]->recursion = 0;
663        }
664}
665
666static inline void set_mask( monitor_desc ** storage, short count, const __waitfor_mask_t & mask ) {
667        for(int i = 0; i < count; i++) {
668                storage[i]->mask = mask;
669        }
670}
671
672static inline void reset_mask( monitor_desc * this ) {
673        this->mask.accepted = NULL;
674        this->mask.clauses = NULL;
675        this->mask.size = 0;
676}
677
678static inline thread_desc * next_thread( monitor_desc * this ) {
679        //Check the signaller stack
680        LIB_DEBUG_PRINT_SAFE("Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
681        __condition_criterion_t * urgent = pop( &this->signal_stack );
682        if( urgent ) {
683                //The signaller stack is not empty,
684                //regardless of if we are ready to baton pass,
685                //we need to set the monitor as in use
686                set_owner( this,  urgent->owner->waiting_thread );
687
688                return check_condition( urgent );
689        }
690
691        // No signaller thread
692        // Get the next thread in the entry_queue
693        thread_desc * new_owner = pop_head( &this->entry_queue );
694        set_owner( this, new_owner );
695
696        return new_owner;
697}
698
699static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
700        __acceptable_t * it = this->mask.clauses; // Optim
701        int count = this->mask.size;
702
703        // Check if there are any acceptable functions
704        if( !it ) return false;
705
706        // If this isn't the first monitor to test this, there is no reason to repeat the test.
707        if( this != group[0] ) return group[0]->mask.accepted >= 0;
708
709        // For all acceptable functions check if this is the current function.
710        for( short i = 0; i < count; i++, it++ ) {
711                if( *it == group ) {
712                        *this->mask.accepted = i;
713                        return true;
714                }
715        }
716
717        // No function matched
718        return false;
719}
720
721static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
722        for(int i = 0; i < count; i++) {
723                (criteria[i]){ monitors[i], waiter };
724        }
725
726        waiter->criteria = criteria;
727}
728
729static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) {
730        for(int i = 0; i < count; i++) {
731                (criteria[i]){ monitors[i], waiter };
732                LIB_DEBUG_PRINT_SAFE( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
733                push( &criteria[i].target->signal_stack, &criteria[i] );
734        }
735
736        waiter->criteria = criteria;
737}
738
739static inline void lock_all( spinlock ** locks, unsigned short count ) {
740        for( int i = 0; i < count; i++ ) {
741                lock_yield( locks[i] DEBUG_CTX2 );
742        }
743}
744
745static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
746        for( int i = 0; i < count; i++ ) {
747                spinlock * l = &source[i]->lock;
748                lock_yield( l DEBUG_CTX2 );
749                if(locks) locks[i] = l;
750        }
751}
752
753static inline void unlock_all( spinlock ** locks, unsigned short count ) {
754        for( int i = 0; i < count; i++ ) {
755                unlock( locks[i] );
756        }
757}
758
759static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
760        for( int i = 0; i < count; i++ ) {
761                unlock( &locks[i]->lock );
762        }
763}
764
765static inline void save( monitor_desc ** ctx, short count, __attribute((unused)) spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
766        for( int i = 0; i < count; i++ ) {
767                recursions[i] = ctx[i]->recursion;
768                masks[i]      = ctx[i]->mask;
769        }
770}
771
772static inline void restore( monitor_desc ** ctx, short count, spinlock ** locks, unsigned int * /*out*/ recursions, __waitfor_mask_t * /*out*/ masks ) {
773        lock_all( locks, count );
774        for( int i = 0; i < count; i++ ) {
775                ctx[i]->recursion = recursions[i];
776                ctx[i]->mask      = masks[i];
777        }
778        unlock_all( locks, count );
779}
780
781// Function has 2 different behavior
782// 1 - Marks a monitors as being ready to run
783// 2 - Checks if all the monitors are ready to run
784//     if so return the thread to run
785static inline thread_desc * check_condition( __condition_criterion_t * target ) {
786        __condition_node_t * node = target->owner;
787        unsigned short count = node->count;
788        __condition_criterion_t * criteria = node->criteria;
789
790        bool ready2run = true;
791
792        for(    int i = 0; i < count; i++ ) {
793
794                // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
795                if( &criteria[i] == target ) {
796                        criteria[i].ready = true;
797                        // LIB_DEBUG_PRINT_SAFE( "True\n" );
798                }
799
800                ready2run = criteria[i].ready && ready2run;
801        }
802
803        LIB_DEBUG_PRINT_SAFE( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL );
804        return ready2run ? node->waiting_thread : NULL;
805}
806
807static inline void brand_condition( condition * this ) {
808        thread_desc * thrd = this_thread;
809        if( !this->monitors ) {
810                // LIB_DEBUG_PRINT_SAFE("Branding\n");
811                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
812                this->monitor_count = thrd->monitors.size;
813
814                this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
815                for( int i = 0; i < this->monitor_count; i++ ) {
816                        this->monitors[i] = thrd->monitors.list[i];
817                }
818        }
819}
820
821static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc ** monitors, int count ) {
822
823        __thread_queue_t * entry_queue = &monitors[0]->entry_queue;
824
825        // For each thread in the entry-queue
826        for(    thread_desc ** thrd_it = &entry_queue->head;
827                *thrd_it;
828                thrd_it = &(*thrd_it)->next
829        ) {
830                // For each acceptable check if it matches
831                int i = 0;
832                __acceptable_t * end = mask.clauses + mask.size;
833                for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) {
834                        // Check if we have a match
835                        if( *it == (*thrd_it)->monitors ) {
836
837                                // If we have a match return it
838                                // after removeing it from the entry queue
839                                return [remove( entry_queue, thrd_it ), i];
840                        }
841                }
842        }
843
844        return [0, -1];
845}
846
847forall(dtype T | sized( T ))
848static inline short insert_unique( T ** array, short & size, T * val ) {
849        if( !val ) return size;
850
851        for(int i = 0; i <= size; i++) {
852                if( array[i] == val ) return size;
853        }
854
855        array[size] = val;
856        size = size + 1;
857        return size;
858}
859
860static inline short count_max( const __waitfor_mask_t & mask ) {
861        short max = 0;
862        for( int i = 0; i < mask.size; i++ ) {
863                max += mask.clauses[i].size;
864        }
865        return max;
866}
867
868static inline short aggregate( monitor_desc ** storage, const __waitfor_mask_t & mask ) {
869        short size = 0;
870        for( int i = 0; i < mask.size; i++ ) {
871                qsort( mask.clauses[i].list, mask.clauses[i].size );
872                for( int j = 0; j < mask.clauses[i].size; j++) {
873                        insert_unique( storage, size, mask.clauses[i].list[j] );
874                }
875        }
876        // TODO insertion sort instead of this
877        qsort( storage, size );
878        return size;
879}
880
881void ?{}( __condition_blocked_queue_t & this ) {
882        this.head = NULL;
883        this.tail = &this.head;
884}
885
886void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
887        verify(this->tail != NULL);
888        *this->tail = c;
889        this->tail = &c->next;
890}
891
892__condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
893        __condition_node_t * head = this->head;
894        if( head ) {
895                this->head = head->next;
896                if( !head->next ) {
897                        this->tail = &this->head;
898                }
899                head->next = NULL;
900        }
901        return head;
902}
903
904// Local Variables: //
905// mode: c //
906// tab-width: 4 //
907// End: //
Note: See TracBrowser for help on using the repository browser.