source: src/libcfa/concurrency/monitor.c @ c81ebf9

aaron-thesisarm-ehcleanup-dtorsdeferred_resndemanglerjacob/cs343-translationjenkins-sandboxnew-astnew-ast-unique-exprnew-envno_listpersistent-indexerresolv-newwith_gc
Last change on this file since c81ebf9 was c81ebf9, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

More work done on preemption in cforall, next step disabling interrupts at the correct places

  • Property mode set to 100644
File size: 14.6 KB
Line 
1//                              -*- Mode: CFA -*-
2//
3// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
4//
5// The contents of this file are covered under the licence agreement in the
6// file "LICENCE" distributed with Cforall.
7//
8// monitor_desc.c --
9//
10// Author           : Thierry Delisle
11// Created On       : Thd Feb 23 12:27:26 2017
12// Last Modified By : Thierry Delisle
13// Last Modified On : --
14// Update Count     : 0
15//
16
17#include "monitor"
18
19#include <stdlib>
20
21#include "kernel_private.h"
22#include "libhdr.h"
23
24//-----------------------------------------------------------------------------
25// Forward declarations
26static inline void set_owner( monitor_desc * this, thread_desc * owner );
27static inline thread_desc * next_thread( monitor_desc * this );
28
29static inline void lock_all( spinlock ** locks, unsigned short count );
30static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count );
31static inline void unlock_all( spinlock ** locks, unsigned short count );
32static inline void unlock_all( monitor_desc ** locks, unsigned short count );
33
34static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count );
35static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count );
36
37static inline thread_desc * check_condition( __condition_criterion_t * );
38static inline void brand_condition( condition * );
39static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val );
40
41//-----------------------------------------------------------------------------
42// Enter/Leave routines
43
44
45extern "C" {
46        void __enter_monitor_desc(monitor_desc * this) {
47                lock( &this->lock );
48                thread_desc * thrd = this_thread();
49
50                LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
51
52                if( !this->owner ) {
53                        //No one has the monitor, just take it
54                        set_owner( this, thrd );
55                }
56                else if( this->owner == thrd) {
57                        //We already have the monitor, just not how many times we took it
58                        assert( this->recursion > 0 );
59                        this->recursion += 1;
60                }
61                else {
62                        //Some one else has the monitor, wait in line for it
63                        append( &this->entry_queue, thrd );
64                        LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
65                        ScheduleInternal( &this->lock );
66
67                        //ScheduleInternal will unlock spinlock, no need to unlock ourselves
68                        return; 
69                }
70
71                unlock( &this->lock );
72                return;
73        }
74
75        // leave pseudo code :
76        //      TODO
77        void __leave_monitor_desc(monitor_desc * this) {
78                lock( &this->lock );
79
80                thread_desc * thrd = this_thread();
81
82                LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
83                assertf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
84
85                //Leaving a recursion level, decrement the counter
86                this->recursion -= 1;
87
88                //If we haven't left the last level of recursion
89                //it means we don't need to do anything
90                if( this->recursion != 0) {
91                        unlock( &this->lock );
92                        return;
93                }
94
95                thread_desc * new_owner = next_thread( this );
96
97                //We can now let other threads in safely
98                unlock( &this->lock );
99
100                LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
101
102                //We need to wake-up the thread
103                ScheduleThread( new_owner );
104        }
105}
106
107static inline void enter(monitor_desc ** monitors, int count) {
108        for(int i = 0; i < count; i++) {
109                __enter_monitor_desc( monitors[i] );
110        }
111}
112
113static inline void leave(monitor_desc ** monitors, int count) {
114        for(int i = count - 1; i >= 0; i--) {
115                __leave_monitor_desc( monitors[i] );
116        }
117}
118
119void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) {
120        this->m = m;
121        this->count = count;
122        qsort(this->m, count);
123        enter( this->m, this->count );
124
125        this->prev_mntrs = this_thread()->current_monitors;
126        this->prev_count = this_thread()->current_monitor_count;
127
128        this_thread()->current_monitors      = m;
129        this_thread()->current_monitor_count = count;
130}
131
132void ^?{}( monitor_guard_t * this ) {
133        leave( this->m, this->count );
134
135        this_thread()->current_monitors      = this->prev_mntrs;
136        this_thread()->current_monitor_count = this->prev_count;
137}
138
139void ?{}(__condition_node_t * this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) {
140        this->waiting_thread = waiting_thread;
141        this->count = count;
142        this->next = NULL;
143        this->user_info = user_info;
144}
145
146void ?{}(__condition_criterion_t * this ) {
147        this->ready  = false;
148        this->target = NULL;
149        this->owner  = NULL;
150        this->next   = NULL;
151}
152
153void ?{}(__condition_criterion_t * this, monitor_desc * target, __condition_node_t * owner ) {
154        this->ready  = false;
155        this->target = target;
156        this->owner  = owner;
157        this->next   = NULL;
158}
159
160//-----------------------------------------------------------------------------
161// Internal scheduling
162void wait( condition * this, uintptr_t user_info = 0 ) {
163        LIB_DEBUG_PRINT_SAFE("Waiting\n");
164
165        brand_condition( this );
166
167        //Check that everything is as expected
168        assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
169        assertf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
170        assertf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count );
171
172        unsigned short count = this->monitor_count;
173        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
174        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
175
176        LIB_DEBUG_PRINT_SAFE("count %i\n", count);
177
178        __condition_node_t waiter = { this_thread(), count, user_info };
179
180        __condition_criterion_t criteria[count];
181        for(int i = 0; i < count; i++) {
182                (&criteria[i]){ this->monitors[i], &waiter };
183                LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
184        }
185
186        waiter.criteria = criteria;
187        append( &this->blocked, &waiter );
188
189        lock_all( this->monitors, locks, count );
190        save_recursion( this->monitors, recursions, count );
191        //DON'T unlock, ask the kernel to do it
192
193        //Find the next thread(s) to run
194        unsigned short thread_count = 0;
195        thread_desc * threads[ count ];
196        for(int i = 0; i < count; i++) {
197                threads[i] = 0;
198        }
199
200        for( int i = 0; i < count; i++) {
201                thread_desc * new_owner = next_thread( this->monitors[i] );
202                thread_count = insert_unique( threads, thread_count, new_owner );
203        }
204
205        LIB_DEBUG_PRINT_SAFE("Will unblock: ");
206        for(int i = 0; i < thread_count; i++) {
207                LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
208        }
209        LIB_DEBUG_PRINT_SAFE("\n");
210
211        // Everything is ready to go to sleep
212        ScheduleInternal( locks, count, threads, thread_count );
213
214
215        //WE WOKE UP
216
217
218        //We are back, restore the owners and recursions
219        lock_all( locks, count );
220        restore_recursion( this->monitors, recursions, count );
221        unlock_all( locks, count );
222}
223
224bool signal( condition * this ) {
225        if( is_empty( this ) ) {
226                LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
227                return false;
228        }
229
230        //Check that everything is as expected
231        assert( this->monitors );
232        assert( this->monitor_count != 0 );
233
234        unsigned short count = this->monitor_count;
235       
236        //Some more checking in debug
237        LIB_DEBUG_DO(
238                thread_desc * this_thrd = this_thread();
239                if ( this->monitor_count != this_thrd->current_monitor_count ) {
240                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
241                } // if
242
243                for(int i = 0; i < this->monitor_count; i++) {
244                        if ( this->monitors[i] != this_thrd->current_monitors[i] ) {
245                                abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] );
246                        } // if
247                }
248        );
249
250        //Lock all the monitors
251        lock_all( this->monitors, NULL, count );
252        LIB_DEBUG_PRINT_SAFE("Signalling");
253
254        //Pop the head of the waiting queue
255        __condition_node_t * node = pop_head( &this->blocked );
256
257        //Add the thread to the proper AS stack
258        for(int i = 0; i < count; i++) {
259                __condition_criterion_t * crit = &node->criteria[i];
260                LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
261                assert( !crit->ready );
262                push( &crit->target->signal_stack, crit );
263        }
264
265        LIB_DEBUG_PRINT_SAFE("\n");
266
267        //Release
268        unlock_all( this->monitors, count );
269
270        return true;
271}
272
273bool signal_block( condition * this ) {
274        if( !this->blocked.head ) {
275                LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
276                return false;
277        }
278
279        //Check that everything is as expected
280        assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors );
281        assertf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count );
282
283        unsigned short count = this->monitor_count;
284        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
285        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
286
287        lock_all( this->monitors, locks, count );
288
289        //create creteria
290        __condition_node_t waiter = { this_thread(), count, 0 };
291
292        __condition_criterion_t criteria[count];
293        for(int i = 0; i < count; i++) {
294                (&criteria[i]){ this->monitors[i], &waiter };
295                LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
296                push( &criteria[i].target->signal_stack, &criteria[i] );
297        }
298
299        waiter.criteria = criteria;
300
301        //save contexts
302        save_recursion( this->monitors, recursions, count );
303
304        //Find the thread to run
305        thread_desc * signallee = pop_head( &this->blocked )->waiting_thread;
306        for(int i = 0; i < count; i++) {
307                set_owner( this->monitors[i], signallee );
308        }
309
310        LIB_DEBUG_PRINT_SAFE( "Waiting on signal block\n" );
311
312        //Everything is ready to go to sleep
313        ScheduleInternal( locks, count, &signallee, 1 );
314
315
316
317
318        LIB_DEBUG_PRINT_SAFE( "Back from signal block\n" );
319
320        //We are back, restore the owners and recursions
321        lock_all( locks, count );
322        restore_recursion( this->monitors, recursions, count );
323        unlock_all( locks, count );
324
325        return true;
326}
327
328uintptr_t front( condition * this ) {
329        LIB_DEBUG_DO(
330                if( is_empty(this) ) {
331                        abortf( "Attempt to access user data on an empty condition.\n"
332                    "Possible cause is not checking if the condition is empty before reading stored data." );
333                }
334        );
335        return this->blocked.head->user_info;
336}
337
338//-----------------------------------------------------------------------------
339// Internal scheduling
340void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
341        // thread_desc * this = this_thread();
342
343        // unsigned short count = this->current_monitor_count;
344        // unsigned int recursions[ count ];            //Save the current recursion levels to restore them later
345        // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to ScheduleInternal
346
347        // lock_all( this->current_monitors, locks, count );
348
349
350
351
352
353        // // // Everything is ready to go to sleep
354        // // ScheduleInternal( locks, count, threads, thread_count );
355
356
357        // //WE WOKE UP
358
359
360        // //We are back, restore the owners and recursions
361        // lock_all( locks, count );
362        // restore_recursion( this->monitors, recursions, count );
363        // unlock_all( locks, count );
364}
365
366//-----------------------------------------------------------------------------
367// Utilities
368
369static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
370        //Pass the monitor appropriately
371        this->owner = owner;
372
373        //We are passing the monitor to someone else, which means recursion level is not 0
374        this->recursion = owner ? 1 : 0;
375}
376
377static inline thread_desc * next_thread( monitor_desc * this ) {
378        //Check the signaller stack
379        __condition_criterion_t * urgent = pop( &this->signal_stack );
380        if( urgent ) {
381                //The signaller stack is not empty,
382                //regardless of if we are ready to baton pass,
383                //we need to set the monitor as in use
384                set_owner( this,  urgent->owner->waiting_thread );
385
386                return check_condition( urgent );
387        }
388
389        // No signaller thread
390        // Get the next thread in the entry_queue
391        thread_desc * new_owner = pop_head( &this->entry_queue );
392        set_owner( this, new_owner );
393
394        return new_owner;
395}
396
397static inline void lock_all( spinlock ** locks, unsigned short count ) {
398        for( int i = 0; i < count; i++ ) {
399                lock( locks[i] );
400        }
401}
402
403static inline void lock_all( monitor_desc ** source, spinlock ** /*out*/ locks, unsigned short count ) {
404        for( int i = 0; i < count; i++ ) {
405                spinlock * l = &source[i]->lock;
406                lock( l );
407                if(locks) locks[i] = l;
408        }
409}
410
411static inline void unlock_all( spinlock ** locks, unsigned short count ) {
412        for( int i = 0; i < count; i++ ) {
413                unlock( locks[i] );
414        }
415}
416
417static inline void unlock_all( monitor_desc ** locks, unsigned short count ) {
418        for( int i = 0; i < count; i++ ) {
419                unlock( &locks[i]->lock );
420        }
421}
422
423
424static inline void save_recursion   ( monitor_desc ** ctx, unsigned int * /*out*/ recursions, unsigned short count ) {
425        for( int i = 0; i < count; i++ ) {
426                recursions[i] = ctx[i]->recursion;
427        }
428}
429
430static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ) {
431        for( int i = 0; i < count; i++ ) {
432                ctx[i]->recursion = recursions[i];
433        }
434}
435
436// Function has 2 different behavior
437// 1 - Marks a monitors as being ready to run
438// 2 - Checks if all the monitors are ready to run
439//     if so return the thread to run
440static inline thread_desc * check_condition( __condition_criterion_t * target ) {
441        __condition_node_t * node = target->owner;
442        unsigned short count = node->count;
443        __condition_criterion_t * criteria = node->criteria;
444
445        bool ready2run = true;
446
447        for(    int i = 0; i < count; i++ ) {
448
449                LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
450                if( &criteria[i] == target ) {
451                        criteria[i].ready = true;
452                        LIB_DEBUG_PRINT_SAFE( "True\n" );
453                }
454
455                ready2run = criteria[i].ready && ready2run;
456        }
457
458        LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
459        return ready2run ? node->waiting_thread : NULL;
460}
461
462static inline void brand_condition( condition * this ) {
463        thread_desc * thrd = this_thread();
464        if( !this->monitors ) {
465                LIB_DEBUG_PRINT_SAFE("Branding\n");
466                assertf( thrd->current_monitors != NULL, "No current monitor to brand condition", thrd->current_monitors );
467                this->monitor_count = thrd->current_monitor_count;
468
469                this->monitors = malloc( this->monitor_count * sizeof( *this->monitors ) );
470                for( int i = 0; i < this->monitor_count; i++ ) {
471                        this->monitors[i] = thrd->current_monitors[i];
472                }
473        }
474}
475
476static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ) {
477        if( !val ) return end;
478
479        for(int i = 0; i <= end; i++) {
480                if( thrds[i] == val ) return end;
481        }
482
483        thrds[end] = val;
484        return end + 1;
485}
486
487void ?{}( __condition_blocked_queue_t * this ) {
488        this->head = NULL;
489        this->tail = &this->head;
490}
491
492void append( __condition_blocked_queue_t * this, __condition_node_t * c ) {
493        assert(this->tail != NULL);
494        *this->tail = c;
495        this->tail = &c->next;
496}
497
498__condition_node_t * pop_head( __condition_blocked_queue_t * this ) {
499        __condition_node_t * head = this->head;
500        if( head ) {
501                this->head = head->next;
502                if( !head->next ) {
503                        this->tail = &this->head;
504                }
505                head->next = NULL;
506        }
507        return head;
508}
Note: See TracBrowser for help on using the repository browser.