source: src/libcfa/concurrency/monitor.c @ ea7d2b0

aaron-thesisarm-ehcleanup-dtorsdeferred_resndemanglerjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprnew-envno_listpersistent-indexerresolv-newwith_gc
Last change on this file since ea7d2b0 was ea7d2b0, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Moved spinlocks to bits/locks.h

  • Property mode set to 100644
File size: 29.1 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// monitor_desc.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Thd Feb 23 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Jul 31 14:59:05 2017
13// Update Count     : 3
14//
15
16#include "monitor"
17
18#include <stdlib>
19#include <inttypes.h>
20
21#include "libhdr.h"
22#include "kernel_private.h"
23
24#include "bits/algorithms.h"
25
26//-----------------------------------------------------------------------------
27// Forward declarations
28static inline void set_owner ( monitor_desc * this, thread_desc * owner );
29static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
30static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
31static inline void reset_mask( monitor_desc * this );
32
33static inline thread_desc * next_thread( monitor_desc * this );
34static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
35
36static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
37static inline void lock_all  ( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
38static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
39static inline void unlock_all( monitor_desc * locks [], __lock_size_t count );
40
41static inline void save   ( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
42static inline void restore( monitor_desc * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
43
44static inline void init     ( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
45static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
46
47static inline thread_desc *        check_condition   ( __condition_criterion_t * );
48static inline void                 brand_condition   ( condition & );
49static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc * monitors [], __lock_size_t count );
50
51forall(dtype T | sized( T ))
52static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
53static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
54static inline __lock_size_t aggregate    ( monitor_desc * storage [], const __waitfor_mask_t & mask );
55
56//-----------------------------------------------------------------------------
57// Useful defines
58#define wait_ctx(thrd, user_info)                               /* Create the necessary information to use the signaller stack                         */ \
59        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
60        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
61        init( count, monitors, waiter, criteria );                /* Link everything together                                                            */ \
62
63#define wait_ctx_primed(thrd, user_info)                        /* Create the necessary information to use the signaller stack                         */ \
64        __condition_node_t waiter = { thrd, count, user_info };   /* Create the node specific to this wait operation                                     */ \
65        __condition_criterion_t criteria[count];                  /* Create the creteria this wait operation needs to wake up                            */ \
66        init_push( count, monitors, waiter, criteria );           /* Link everything together and push it to the AS-Stack                                */ \
67
68#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
69        monitor_desc ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
70        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
71        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
72        __waitfor_mask_t masks [ count ];                         /* Save the current waitfor masks to restore them later                                */ \
73        __spinlock_t *   locks [ count ];                         /* We need to pass-in an array of locks to BlockInternal                               */ \
74
75#define monitor_save    save   ( monitors, count, locks, recursions, masks )
76#define monitor_restore restore( monitors, count, locks, recursions, masks )
77
78
79//-----------------------------------------------------------------------------
80// Enter/Leave routines
81
82
83extern "C" {
84        // Enter single monitor
85        static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) {
86                // Lock the monitor spinlock, lock_yield to reduce contention
87                lock_yield( this->lock DEBUG_CTX2 );
88                thread_desc * thrd = this_thread;
89
90                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
91
92                if( !this->owner ) {
93                        // No one has the monitor, just take it
94                        set_owner( this, thrd );
95
96                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon is free \n");
97                }
98                else if( this->owner == thrd) {
99                        // We already have the monitor, just note how many times we took it
100                        this->recursion += 1;
101
102                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon already owned \n");
103                }
104                else if( is_accepted( this, group) ) {
105                        // Some one was waiting for us, enter
106                        set_owner( this, thrd );
107
108                        // Reset mask
109                        reset_mask( this );
110
111                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts \n");
112                }
113                else {
114                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
115
116                        // Some one else has the monitor, wait in line for it
117                        append( this->entry_queue, thrd );
118                        BlockInternal( &this->lock );
119
120                        LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
121
122                        // BlockInternal will unlock spinlock, no need to unlock ourselves
123                        return;
124                }
125
126                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered  mon %p\n", thrd, this);
127
128                // Release the lock and leave
129                unlock( this->lock );
130                return;
131        }
132
133        static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) {
134                // Lock the monitor spinlock, lock_yield to reduce contention
135                lock_yield( this->lock DEBUG_CTX2 );
136                thread_desc * thrd = this_thread;
137
138                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
139
140
141                if( !this->owner ) {
142                        LIB_DEBUG_PRINT_SAFE("Kernel : Destroying free mon %p\n", this);
143
144                        // No one has the monitor, just take it
145                        set_owner( this, thrd );
146
147                        unlock( this->lock );
148                        return;
149                }
150                else if( this->owner == thrd) {
151                        // We already have the monitor... but where about to destroy it so the nesting will fail
152                        // Abort!
153                        abortf("Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.");
154                }
155
156                __lock_size_t count = 1;
157                monitor_desc ** monitors = &this;
158                __monitor_group_t group = { &this, 1, func };
159                if( is_accepted( this, group) ) {
160                        LIB_DEBUG_PRINT_SAFE("Kernel :  mon accepts dtor, block and signal it \n");
161
162                        // Wake the thread that is waiting for this
163                        __condition_criterion_t * urgent = pop( this->signal_stack );
164                        verify( urgent );
165
166                        // Reset mask
167                        reset_mask( this );
168
169                        // Create the node specific to this wait operation
170                        wait_ctx_primed( this_thread, 0 )
171
172                        // Some one else has the monitor, wait for him to finish and then run
173                        BlockInternal( &this->lock, urgent->owner->waiting_thread );
174
175                        // Some one was waiting for us, enter
176                        set_owner( this, thrd );
177                }
178                else {
179                        LIB_DEBUG_PRINT_SAFE("Kernel :  blocking \n");
180
181                        wait_ctx( this_thread, 0 )
182                        this->dtor_node = &waiter;
183
184                        // Some one else has the monitor, wait in line for it
185                        append( this->entry_queue, thrd );
186                        BlockInternal( &this->lock );
187
188                        // BlockInternal will unlock spinlock, no need to unlock ourselves
189                        return;
190                }
191
192                LIB_DEBUG_PRINT_SAFE("Kernel : Destroying %p\n", this);
193
194        }
195
196        // Leave single monitor
197        void __leave_monitor_desc( monitor_desc * this ) {
198                // Lock the monitor spinlock, lock_yield to reduce contention
199                lock_yield( this->lock DEBUG_CTX2 );
200
201                LIB_DEBUG_PRINT_SAFE("Kernel : %10p Leaving mon %p (%p)\n", this_thread, this, this->owner);
202
203                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", this_thread, this->owner, this->recursion, this );
204
205                // Leaving a recursion level, decrement the counter
206                this->recursion -= 1;
207
208                // If we haven't left the last level of recursion
209                // it means we don't need to do anything
210                if( this->recursion != 0) {
211                        LIB_DEBUG_PRINT_SAFE("Kernel :  recursion still %d\n", this->recursion);
212                        unlock( this->lock );
213                        return;
214                }
215
216                // Get the next thread, will be null on low contention monitor
217                thread_desc * new_owner = next_thread( this );
218
219                // We can now let other threads in safely
220                unlock( this->lock );
221
222                //We need to wake-up the thread
223                WakeThread( new_owner );
224        }
225
226        // Leave single monitor for the last time
227        void __leave_dtor_monitor_desc( monitor_desc * this ) {
228                LIB_DEBUG_DO(
229                        if( this_thread != this->owner ) {
230                                abortf("Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, this_thread, this->owner);
231                        }
232                        if( this->recursion != 1 ) {
233                                abortf("Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
234                        }
235                )
236        }
237
238        // Leave the thread monitor
239        // last routine called by a thread.
240        // Should never return
241        void __leave_thread_monitor( thread_desc * thrd ) {
242                monitor_desc * this = &thrd->self_mon;
243
244                // Lock the monitor now
245                lock_yield( this->lock DEBUG_CTX2 );
246
247                disable_interrupts();
248
249                thrd->self_cor.state = Halted;
250
251                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
252
253                // Leaving a recursion level, decrement the counter
254                this->recursion -= 1;
255
256                // If we haven't left the last level of recursion
257                // it must mean there is an error
258                if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); }
259
260                // Fetch the next thread, can be null
261                thread_desc * new_owner = next_thread( this );
262
263                // Leave the thread, this will unlock the spinlock
264                // Use leave thread instead of BlockInternal which is
265                // specialized for this case and supports null new_owner
266                LeaveThread( &this->lock, new_owner );
267
268                // Control flow should never reach here!
269        }
270}
271
272// Enter multiple monitor
273// relies on the monitor array being sorted
274static inline void enter( __monitor_group_t monitors ) {
275        for( __lock_size_t i = 0; i < monitors.size; i++) {
276                __enter_monitor_desc( monitors.list[i], monitors );
277        }
278}
279
280// Leave multiple monitor
281// relies on the monitor array being sorted
282static inline void leave(monitor_desc * monitors [], __lock_size_t count) {
283        for( __lock_size_t i = count - 1; i >= 0; i--) {
284                __leave_monitor_desc( monitors[i] );
285        }
286}
287
288// Ctor for monitor guard
289// Sorts monitors before entering
290void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
291        // Store current array
292        this.m = m;
293        this.count = count;
294
295        // Sort monitors based on address -> TODO use a sort specialized for small numbers
296        __libcfa_small_sort(this.m, count);
297
298        // Save previous thread context
299        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
300
301        // Update thread context (needed for conditions)
302        this_thread->monitors.[list, size, func] = [m, count, func];
303
304        // LIB_DEBUG_PRINT_SAFE("MGUARD : enter %d\n", count);
305
306        // Enter the monitors in order
307        __monitor_group_t group = {this.m, this.count, func};
308        enter( group );
309
310        // LIB_DEBUG_PRINT_SAFE("MGUARD : entered\n");
311}
312
313
314// Dtor for monitor guard
315void ^?{}( monitor_guard_t & this ) {
316        // LIB_DEBUG_PRINT_SAFE("MGUARD : leaving %d\n", this.count);
317
318        // Leave the monitors in order
319        leave( this.m, this.count );
320
321        // LIB_DEBUG_PRINT_SAFE("MGUARD : left\n");
322
323        // Restore thread context
324        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
325}
326
327// Ctor for monitor guard
328// Sorts monitors before entering
329void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
330        // Store current array
331        this.m = *m;
332
333        // Save previous thread context
334        this.[prev_mntrs, prev_count, prev_func] = this_thread->monitors.[list, size, func];
335
336        // Update thread context (needed for conditions)
337        this_thread->monitors.[list, size, func] = [m, 1, func];
338
339        __enter_monitor_dtor( this.m, func );
340}
341
342// Dtor for monitor guard
343void ^?{}( monitor_dtor_guard_t & this ) {
344        // Leave the monitors in order
345        __leave_dtor_monitor_desc( this.m );
346
347        // Restore thread context
348        this_thread->monitors.[list, size, func] = this.[prev_mntrs, prev_count, prev_func];
349}
350
351//-----------------------------------------------------------------------------
352// Internal scheduling types
353void ?{}(__condition_node_t & this, thread_desc * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
354        this.waiting_thread = waiting_thread;
355        this.count = count;
356        this.next = NULL;
357        this.user_info = user_info;
358}
359
360void ?{}(__condition_criterion_t & this ) {
361        this.ready  = false;
362        this.target = NULL;
363        this.owner  = NULL;
364        this.next   = NULL;
365}
366
367void ?{}(__condition_criterion_t & this, monitor_desc * target, __condition_node_t & owner ) {
368        this.ready  = false;
369        this.target = target;
370        this.owner  = &owner;
371        this.next   = NULL;
372}
373
374//-----------------------------------------------------------------------------
375// Internal scheduling
376void wait( condition & this, uintptr_t user_info = 0 ) {
377        brand_condition( this );
378
379        // Check that everything is as expected
380        assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
381        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
382        verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count );
383
384        // Create storage for monitor context
385        monitor_ctx( this.monitors, this.monitor_count );
386
387        // Create the node specific to this wait operation
388        wait_ctx( this_thread, user_info );
389
390        // Append the current wait operation to the ones already queued on the condition
391        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
392        append( this.blocked, &waiter );
393
394        // Lock all monitors (aggregates the locks as well)
395        lock_all( monitors, locks, count );
396
397        // Find the next thread(s) to run
398        __lock_size_t thread_count = 0;
399        thread_desc * threads[ count ];
400        __builtin_memset( threads, 0, sizeof( threads ) );
401
402        // Save monitor states
403        monitor_save;
404
405        // Remove any duplicate threads
406        for( __lock_size_t i = 0; i < count; i++) {
407                thread_desc * new_owner = next_thread( monitors[i] );
408                insert_unique( threads, thread_count, new_owner );
409        }
410
411        // Everything is ready to go to sleep
412        BlockInternal( locks, count, threads, thread_count );
413
414        // We are back, restore the owners and recursions
415        monitor_restore;
416}
417
418bool signal( condition & this ) {
419        if( is_empty( this ) ) { return false; }
420
421        //Check that everything is as expected
422        verify( this.monitors );
423        verify( this.monitor_count != 0 );
424
425        //Some more checking in debug
426        LIB_DEBUG_DO(
427                thread_desc * this_thrd = this_thread;
428                if ( this.monitor_count != this_thrd->monitors.size ) {
429                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", &this, this.monitor_count, this_thrd->monitors.size );
430                }
431
432                for(int i = 0; i < this.monitor_count; i++) {
433                        if ( this.monitors[i] != this_thrd->monitors.list[i] ) {
434                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", &this, this.monitors[i], this_thrd->monitors.list[i] );
435                        }
436                }
437        );
438
439        __lock_size_t count = this.monitor_count;
440
441        // Lock all monitors
442        lock_all( this.monitors, NULL, count );
443
444        //Pop the head of the waiting queue
445        __condition_node_t * node = pop_head( this.blocked );
446
447        //Add the thread to the proper AS stack
448        for(int i = 0; i < count; i++) {
449                __condition_criterion_t * crit = &node->criteria[i];
450                assert( !crit->ready );
451                push( crit->target->signal_stack, crit );
452        }
453
454        //Release
455        unlock_all( this.monitors, count );
456
457        return true;
458}
459
460bool signal_block( condition & this ) {
461        if( !this.blocked.head ) { return false; }
462
463        //Check that everything is as expected
464        verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );
465        verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count );
466
467        // Create storage for monitor context
468        monitor_ctx( this.monitors, this.monitor_count );
469
470        // Lock all monitors (aggregates the locks them as well)
471        lock_all( monitors, locks, count );
472
473        // Create the node specific to this wait operation
474        wait_ctx_primed( this_thread, 0 )
475
476        //save contexts
477        monitor_save;
478
479        //Find the thread to run
480        thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
481        set_owner( monitors, count, signallee );
482
483        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
484
485        //Everything is ready to go to sleep
486        BlockInternal( locks, count, &signallee, 1 );
487
488
489        // WE WOKE UP
490
491
492        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel :   signal_block returned\n" );
493
494        //We are back, restore the masks and recursions
495        monitor_restore;
496
497        return true;
498}
499
500// Access the user_info of the thread waiting at the front of the queue
501uintptr_t front( condition & this ) {
502        verifyf( !is_empty(this),
503                "Attempt to access user data on an empty condition.\n"
504                "Possible cause is not checking if the condition is empty before reading stored data."
505        );
506        return this.blocked.head->user_info;
507}
508
509//-----------------------------------------------------------------------------
510// External scheduling
511// cases to handle :
512//      - target already there :
513//              block and wake
514//      - dtor already there
515//              put thread on signaller stack
516//      - non-blocking
517//              return else
518//      - timeout
519//              return timeout
520//      - block
521//              setup mask
522//              block
523void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
524        // This statment doesn't have a contiguous list of monitors...
525        // Create one!
526        __lock_size_t max = count_max( mask );
527        monitor_desc * mon_storage[max];
528        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
529        __lock_size_t actual_count = aggregate( mon_storage, mask );
530
531        LIB_DEBUG_PRINT_BUFFER_DECL( "Kernel : waitfor %d (s: %d, m: %d)\n", actual_count, mask.size, (__lock_size_t)max);
532
533        if(actual_count == 0) return;
534
535        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : waitfor internal proceeding\n");
536
537        // Create storage for monitor context
538        monitor_ctx( mon_storage, actual_count );
539
540        // Lock all monitors (aggregates the locks as well)
541        lock_all( monitors, locks, count );
542
543        {
544                // Check if the entry queue
545                thread_desc * next; int index;
546                [next, index] = search_entry_queue( mask, monitors, count );
547
548                if( next ) {
549                        *mask.accepted = index;
550                        if( mask.clauses[index].is_dtor ) {
551                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : dtor already there\n");
552                                verifyf( mask.clauses[index].size == 1        , "ERROR: Accepted dtor has more than 1 mutex parameter." );
553
554                                monitor_desc * mon2dtor = mask.clauses[index].list[0];
555                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
556
557                                __condition_criterion_t * dtor_crit = mon2dtor->dtor_node->criteria;
558                                push( mon2dtor->signal_stack, dtor_crit );
559
560                                unlock_all( locks, count );
561                        }
562                        else {
563                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, baton-passing\n");
564
565                                // Create the node specific to this wait operation
566                                wait_ctx_primed( this_thread, 0 );
567
568                                // Save monitor states
569                                monitor_save;
570
571                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel :  baton of %d monitors : ", count );
572                                #ifdef __CFA_DEBUG_PRINT__
573                                        for( int i = 0; i < count; i++) {
574                                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "%p %p ", monitors[i], monitors[i]->signal_stack.top );
575                                        }
576                                #endif
577                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "\n");
578
579                                // Set the owners to be the next thread
580                                set_owner( monitors, count, next );
581
582                                // Everything is ready to go to sleep
583                                BlockInternal( locks, count, &next, 1 );
584
585                                // We are back, restore the owners and recursions
586                                monitor_restore;
587
588                                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : thread present, returned\n");
589                        }
590
591                        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
592
593                        return;
594                }
595        }
596
597
598        if( duration == 0 ) {
599                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : non-blocking, exiting\n");
600
601                unlock_all( locks, count );
602
603                LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
604                return;
605        }
606
607
608        verifyf( duration < 0, "Timeout on waitfor statments not supported yet.");
609
610        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : blocking waitfor\n");
611
612        // Create the node specific to this wait operation
613        wait_ctx_primed( this_thread, 0 );
614
615        monitor_save;
616        set_mask( monitors, count, mask );
617
618        for( __lock_size_t i = 0; i < count; i++) {
619                verify( monitors[i]->owner == this_thread );
620        }
621
622        //Everything is ready to go to sleep
623        BlockInternal( locks, count );
624
625
626        // WE WOKE UP
627
628
629        //We are back, restore the masks and recursions
630        monitor_restore;
631
632        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : exiting\n");
633
634        LIB_DEBUG_PRINT_BUFFER_LOCAL( "Kernel : accepted %d\n", *mask.accepted);
635}
636
637//-----------------------------------------------------------------------------
638// Utilities
639
640static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
641        // LIB_DEBUG_PRINT_SAFE("Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
642
643        //Pass the monitor appropriately
644        this->owner = owner;
645
646        //We are passing the monitor to someone else, which means recursion level is not 0
647        this->recursion = owner ? 1 : 0;
648}
649
650static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
651        monitors[0]->owner     = owner;
652        monitors[0]->recursion = 1;
653        for( __lock_size_t i = 1; i < count; i++ ) {
654                monitors[i]->owner     = owner;
655                monitors[i]->recursion = 0;
656        }
657}
658
659static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
660        for( __lock_size_t i = 0; i < count; i++) {
661                storage[i]->mask = mask;
662        }
663}
664
665static inline void reset_mask( monitor_desc * this ) {
666        this->mask.accepted = NULL;
667        this->mask.clauses = NULL;
668        this->mask.size = 0;
669}
670
671static inline thread_desc * next_thread( monitor_desc * this ) {
672        //Check the signaller stack
673        LIB_DEBUG_PRINT_SAFE("Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
674        __condition_criterion_t * urgent = pop( this->signal_stack );
675        if( urgent ) {
676                //The signaller stack is not empty,
677                //regardless of if we are ready to baton pass,
678                //we need to set the monitor as in use
679                set_owner( this,  urgent->owner->waiting_thread );
680
681                return check_condition( urgent );
682        }
683
684        // No signaller thread
685        // Get the next thread in the entry_queue
686        thread_desc * new_owner = pop_head( this->entry_queue );
687        set_owner( this, new_owner );
688
689        return new_owner;
690}
691
692static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & group ) {
693        __acceptable_t * it = this->mask.clauses; // Optim
694        __lock_size_t count = this->mask.size;
695
696        // Check if there are any acceptable functions
697        if( !it ) return false;
698
699        // If this isn't the first monitor to test this, there is no reason to repeat the test.
700        if( this != group[0] ) return group[0]->mask.accepted >= 0;
701
702        // For all acceptable functions check if this is the current function.
703        for( __lock_size_t i = 0; i < count; i++, it++ ) {
704                if( *it == group ) {
705                        *this->mask.accepted = i;
706                        return true;
707                }
708        }
709
710        // No function matched
711        return false;
712}
713
714static inline void init( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
715        for( __lock_size_t i = 0; i < count; i++) {
716                (criteria[i]){ monitors[i], waiter };
717        }
718
719        waiter.criteria = criteria;
720}
721
722static inline void init_push( __lock_size_t count, monitor_desc * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
723        for( __lock_size_t i = 0; i < count; i++) {
724                (criteria[i]){ monitors[i], waiter };
725                LIB_DEBUG_PRINT_SAFE( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
726                push( criteria[i].target->signal_stack, &criteria[i] );
727        }
728
729        waiter.criteria = criteria;
730}
731
732static inline void lock_all( __spinlock_t * locks [], __lock_size_t count ) {
733        for( __lock_size_t i = 0; i < count; i++ ) {
734                lock_yield( *locks[i] DEBUG_CTX2 );
735        }
736}
737
738static inline void lock_all( monitor_desc * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
739        for( __lock_size_t i = 0; i < count; i++ ) {
740                __spinlock_t * l = &source[i]->lock;
741                lock_yield( *l DEBUG_CTX2 );
742                if(locks) locks[i] = l;
743        }
744}
745
746static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ) {
747        for( __lock_size_t i = 0; i < count; i++ ) {
748                unlock( *locks[i] );
749        }
750}
751
752static inline void unlock_all( monitor_desc * locks [], __lock_size_t count ) {
753        for( __lock_size_t i = 0; i < count; i++ ) {
754                unlock( locks[i]->lock );
755        }
756}
757
758static inline void save(
759        monitor_desc * ctx [],
760        __lock_size_t count,
761        __attribute((unused)) __spinlock_t * locks [],
762        unsigned int /*out*/ recursions [],
763        __waitfor_mask_t /*out*/ masks []
764) {
765        for( __lock_size_t i = 0; i < count; i++ ) {
766                recursions[i] = ctx[i]->recursion;
767                masks[i]      = ctx[i]->mask;
768        }
769}
770
771static inline void restore(
772        monitor_desc * ctx [],
773        __lock_size_t count,
774        __spinlock_t * locks [],
775        unsigned int /*out*/ recursions [],
776        __waitfor_mask_t /*out*/ masks []
777) {
778        lock_all( locks, count );
779        for( __lock_size_t i = 0; i < count; i++ ) {
780                ctx[i]->recursion = recursions[i];
781                ctx[i]->mask      = masks[i];
782        }
783        unlock_all( locks, count );
784}
785
786// Function has 2 different behavior
787// 1 - Marks a monitors as being ready to run
788// 2 - Checks if all the monitors are ready to run
789//     if so return the thread to run
790static inline thread_desc * check_condition( __condition_criterion_t * target ) {
791        __condition_node_t * node = target->owner;
792        unsigned short count = node->count;
793        __condition_criterion_t * criteria = node->criteria;
794
795        bool ready2run = true;
796
797        for(    int i = 0; i < count; i++ ) {
798
799                // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
800                if( &criteria[i] == target ) {
801                        criteria[i].ready = true;
802                        // LIB_DEBUG_PRINT_SAFE( "True\n" );
803                }
804
805                ready2run = criteria[i].ready && ready2run;
806        }
807
808        LIB_DEBUG_PRINT_SAFE( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL );
809        return ready2run ? node->waiting_thread : NULL;
810}
811
812static inline void brand_condition( condition & this ) {
813        thread_desc * thrd = this_thread;
814        if( !this.monitors ) {
815                // LIB_DEBUG_PRINT_SAFE("Branding\n");
816                assertf( thrd->monitors.list != NULL, "No current monitor to brand condition %p", thrd->monitors.list );
817                this.monitor_count = thrd->monitors.size;
818
819                this.monitors = malloc( this.monitor_count * sizeof( *this.monitors ) );
820                for( int i = 0; i < this.monitor_count; i++ ) {
821                        this.monitors[i] = thrd->monitors.list[i];
822                }
823        }
824}
825
826static inline [thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc * monitors [], __lock_size_t count ) {
827
828        __thread_queue_t & entry_queue = monitors[0]->entry_queue;
829
830        // For each thread in the entry-queue
831        for(    thread_desc ** thrd_it = &entry_queue.head;
832                *thrd_it;
833                thrd_it = &(*thrd_it)->next
834        ) {
835                // For each acceptable check if it matches
836                int i = 0;
837                __acceptable_t * end = mask.clauses + mask.size;
838                for( __acceptable_t * it = mask.clauses; it != end; it++, i++ ) {
839                        // Check if we have a match
840                        if( *it == (*thrd_it)->monitors ) {
841
842                                // If we have a match return it
843                                // after removeing it from the entry queue
844                                return [remove( entry_queue, thrd_it ), i];
845                        }
846                }
847        }
848
849        return [0, -1];
850}
851
852forall(dtype T | sized( T ))
853static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ) {
854        if( !val ) return size;
855
856        for( __lock_size_t i = 0; i <= size; i++) {
857                if( array[i] == val ) return size;
858        }
859
860        array[size] = val;
861        size = size + 1;
862        return size;
863}
864
865static inline __lock_size_t count_max( const __waitfor_mask_t & mask ) {
866        __lock_size_t max = 0;
867        for( __lock_size_t i = 0; i < mask.size; i++ ) {
868                max += mask.clauses[i].size;
869        }
870        return max;
871}
872
873static inline __lock_size_t aggregate( monitor_desc * storage [], const __waitfor_mask_t & mask ) {
874        __lock_size_t size = 0;
875        for( __lock_size_t i = 0; i < mask.size; i++ ) {
876                __libcfa_small_sort( mask.clauses[i].list, mask.clauses[i].size );
877                for( __lock_size_t j = 0; j < mask.clauses[i].size; j++) {
878                        insert_unique( storage, size, mask.clauses[i].list[j] );
879                }
880        }
881        // TODO insertion sort instead of this
882        __libcfa_small_sort( storage, size );
883        return size;
884}
885
886void ?{}( __condition_blocked_queue_t & this ) {
887        this.head = NULL;
888        this.tail = &this.head;
889}
890
891void append( __condition_blocked_queue_t & this, __condition_node_t * c ) {
892        verify(this.tail != NULL);
893        *this.tail = c;
894        this.tail = &c->next;
895}
896
897__condition_node_t * pop_head( __condition_blocked_queue_t & this ) {
898        __condition_node_t * head = this.head;
899        if( head ) {
900                this.head = head->next;
901                if( !head->next ) {
902                        this.tail = &this.head;
903                }
904                head->next = NULL;
905        }
906        return head;
907}
908
909// Local Variables: //
910// mode: c //
911// tab-width: 4 //
912// End: //
Note: See TracBrowser for help on using the repository browser.