source: src/libcfa/concurrency/preemption.c @ 6b8643d

new-envwith_gc
Last change on this file since 6b8643d was 13073be, checked in by Thierry Delisle <tdelisle@…>, 6 years ago

Fix atomic builtins in libcfa and prelude

  • Property mode set to 100644
File size: 16.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// signal.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Jun 5 14:20:42 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Mon Apr  9 13:52:39 2018
13// Update Count     : 36
14//
15
16#include "preemption.h"
17#include <assert.h>
18
19extern "C" {
20#include <errno.h>
21#include <stdio.h>
22#include <string.h>
23#include <unistd.h>
24}
25
26#include "bits/signal.h"
27
28#if !defined(__CFA_DEFAULT_PREEMPTION__)
29#define __CFA_DEFAULT_PREEMPTION__ 10`ms
30#endif
31
32Duration default_preemption() __attribute__((weak)) {
33        return __CFA_DEFAULT_PREEMPTION__;
34}
35
36// FwdDeclarations : timeout handlers
37static void preempt( processor   * this );
38static void timeout( thread_desc * this );
39
40// FwdDeclarations : Signal handlers
41void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
42void sigHandler_segv     ( __CFA_SIGPARMS__ );
43void sigHandler_ill      ( __CFA_SIGPARMS__ );
44void sigHandler_fpe      ( __CFA_SIGPARMS__ );
45void sigHandler_abort    ( __CFA_SIGPARMS__ );
46
47// FwdDeclarations : alarm thread main
48void * alarm_loop( __attribute__((unused)) void * args );
49
50// Machine specific register name
51#if   defined( __i386 )
52#define CFA_REG_IP gregs[REG_EIP]
53#elif defined( __x86_64 )
54#define CFA_REG_IP gregs[REG_RIP]
55#elif defined( __ARM_ARCH )
56#define CFA_REG_IP arm_pc
57#else
58#error unknown hardware architecture
59#endif
60
61KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
62event_kernel_t * event_kernel;                        // kernel public handle to even kernel
63static pthread_t alarm_thread;                        // pthread handle to alarm thread
64
65void ?{}(event_kernel_t & this) with( this ) {
66        alarms{};
67        lock{};
68}
69
70enum {
71        PREEMPT_NORMAL    = 0,
72        PREEMPT_TERMINATE = 1,
73};
74
75//=============================================================================================
76// Kernel Preemption logic
77//=============================================================================================
78
79// Get next expired node
80static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) {
81        if( !alarms->head ) return NULL;                          // If no alarms return null
82        if( alarms->head->alarm >= currtime ) return NULL;        // If alarms head not expired return null
83        return pop(alarms);                                       // Otherwise just pop head
84}
85
86// Tick one frame of the Discrete Event Simulation for alarms
87void tick_preemption() {
88        alarm_node_t * node = NULL;                     // Used in the while loop but cannot be declared in the while condition
89        alarm_list_t * alarms = &event_kernel->alarms;  // Local copy for ease of reading
90        Time currtime = __kernel_get_time();                    // Check current time once so we everything "happens at once"
91
92        //Loop throught every thing expired
93        while( node = get_expired( alarms, currtime ) ) {
94                // __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" );
95
96                // Check if this is a kernel
97                if( node->kernel_alarm ) {
98                        preempt( node->proc );
99                }
100                else {
101                        timeout( node->thrd );
102                }
103
104                // Check if this is a periodic alarm
105                Duration period = node->period;
106                if( period > 0 ) {
107                        // __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv );
108                        node->alarm = currtime + period;    // Alarm is periodic, add currtime to it (used cached current time)
109                        insert( alarms, node );             // Reinsert the node for the next time it triggers
110                }
111                else {
112                        node->set = false;                  // Node is one-shot, just mark it as not pending
113                }
114        }
115
116        // If there are still alarms pending, reset the timer
117        if( alarms->head ) {
118                __cfaabi_dbg_print_buffer_decl( " KERNEL: @%lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
119                Duration delta = alarms->head->alarm - currtime;
120                Duration caped = max(delta, 50`us);
121                // itimerval tim  = { caped };
122                // __cfaabi_dbg_print_buffer_local( "    Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec);
123
124                __kernel_set_timer( caped );
125        }
126}
127
128// Update the preemption of a processor and notify interested parties
129void update_preemption( processor * this, Duration duration ) {
130        alarm_node_t * alarm = this->preemption_alarm;
131
132        // Alarms need to be enabled
133        if ( duration > 0 && ! alarm->set ) {
134                alarm->alarm = __kernel_get_time() + duration;
135                alarm->period = duration;
136                register_self( alarm );
137        }
138        // Zero duration but alarm is set
139        else if ( duration == 0 && alarm->set ) {
140                unregister_self( alarm );
141                alarm->alarm = 0;
142                alarm->period = 0;
143        }
144        // If alarm is different from previous, change it
145        else if ( duration > 0 && alarm->period != duration ) {
146                unregister_self( alarm );
147                alarm->alarm = __kernel_get_time() + duration;
148                alarm->period = duration;
149                register_self( alarm );
150        }
151}
152
153//=============================================================================================
154// Kernel Signal Tools
155//=============================================================================================
156
157__cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; )
158
159extern "C" {
160        // Disable interrupts by incrementing the counter
161        void disable_interrupts() {
162                with( kernelTLS.preemption_state ) {
163                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
164
165                        // Set enabled flag to false
166                        // should be atomic to avoid preemption in the middle of the operation.
167                        // use memory order RELAXED since there is no inter-thread on this variable requirements
168                        __atomic_store_n(&enabled, false, __ATOMIC_RELAXED);
169
170                        // Signal the compiler that a fence is needed but only for signal handlers
171                        __atomic_signal_fence(__ATOMIC_ACQUIRE);
172
173                        __attribute__((unused)) unsigned short new_val = disable_count + 1;
174                        disable_count = new_val;
175                        verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
176                }
177        }
178
179        // Enable interrupts by decrementing the counter
180        // If counter reaches 0, execute any pending CtxSwitch
181        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
182                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
183                thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic store
184
185                with( kernelTLS.preemption_state ){
186                        unsigned short prev = disable_count;
187                        disable_count -= 1;
188                        verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
189
190                        // Check if we need to prempt the thread because an interrupt was missed
191                        if( prev == 1 ) {
192                                static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
193
194                                // Set enabled flag to true
195                                // should be atomic to avoid preemption in the middle of the operation.
196                                // use memory order RELAXED since there is no inter-thread on this variable requirements
197                                __atomic_store_n(&enabled, true, __ATOMIC_RELAXED);
198
199                                // Signal the compiler that a fence is needed but only for signal handlers
200                                __atomic_signal_fence(__ATOMIC_RELEASE);
201                                if( proc->pending_preemption ) {
202                                        proc->pending_preemption = false;
203                                        BlockInternal( thrd );
204                                }
205                        }
206                }
207
208                // For debugging purposes : keep track of the last person to enable the interrupts
209                __cfaabi_dbg_debug_do( proc->last_enable = caller; )
210        }
211
212        // Disable interrupts by incrementint the counter
213        // Don't execute any pending CtxSwitch even if counter reaches 0
214        void enable_interrupts_noPoll() {
215                unsigned short prev = kernelTLS.preemption_state.disable_count;
216                kernelTLS.preemption_state.disable_count -= 1;
217                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
218                if( prev == 1 ) {
219                        static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
220                        // Set enabled flag to true
221                        // should be atomic to avoid preemption in the middle of the operation.
222                        // use memory order RELAXED since there is no inter-thread on this variable requirements
223                        __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);
224
225                        // Signal the compiler that a fence is needed but only for signal handlers
226                        __atomic_signal_fence(__ATOMIC_RELEASE);
227                }
228        }
229}
230
231// sigprocmask wrapper : unblock a single signal
232static inline void signal_unblock( int sig ) {
233        sigset_t mask;
234        sigemptyset( &mask );
235        sigaddset( &mask, sig );
236
237        if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) {
238            abort( "internal error, pthread_sigmask" );
239        }
240}
241
242// sigprocmask wrapper : block a single signal
243static inline void signal_block( int sig ) {
244        sigset_t mask;
245        sigemptyset( &mask );
246        sigaddset( &mask, sig );
247
248        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
249            abort( "internal error, pthread_sigmask" );
250        }
251}
252
253// kill wrapper : signal a processor
254static void preempt( processor * this ) {
255        sigval_t value = { PREEMPT_NORMAL };
256        pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
257}
258
259// kill wrapper : signal a processor
260void terminate(processor * this) {
261        this->do_terminate = true;
262        sigval_t value = { PREEMPT_TERMINATE };
263        pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
264}
265
266// reserved for future use
267static void timeout( thread_desc * this ) {
268        //TODO : implement waking threads
269}
270
271// KERNEL ONLY
272// Check if a CtxSwitch signal handler shoud defer
273// If true  : preemption is safe
274// If false : preemption is unsafe and marked as pending
275static inline bool preemption_ready() {
276        // Check if preemption is safe
277        bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;
278
279        // Adjust the pending flag accordingly
280        kernelTLS.this_processor->pending_preemption = !ready;
281        return ready;
282}
283
284//=============================================================================================
285// Kernel Signal Startup/Shutdown logic
286//=============================================================================================
287
288// Startup routine to activate preemption
289// Called from kernel_startup
290void kernel_start_preemption() {
291        __cfaabi_dbg_print_safe( "Kernel : Starting preemption\n" );
292
293        // Start with preemption disabled until ready
294        kernelTLS.preemption_state.enabled = false;
295        kernelTLS.preemption_state.disable_count = 1;
296
297        // Initialize the event kernel
298        event_kernel = (event_kernel_t *)&storage_event_kernel;
299        (*event_kernel){};
300
301        // Setup proper signal handlers
302        __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART );         // CtxSwitch handler
303
304        signal_block( SIGALRM );
305
306        pthread_create( &alarm_thread, NULL, alarm_loop, NULL );
307}
308
309// Shutdown routine to deactivate preemption
310// Called from kernel_shutdown
311void kernel_stop_preemption() {
312        __cfaabi_dbg_print_safe( "Kernel : Preemption stopping\n" );
313
314        // Block all signals since we are already shutting down
315        sigset_t mask;
316        sigfillset( &mask );
317        sigprocmask( SIG_BLOCK, &mask, NULL );
318
319        // Notify the alarm thread of the shutdown
320        sigval val = { 1 };
321        pthread_sigqueue( alarm_thread, SIGALRM, val );
322
323        // Wait for the preemption thread to finish
324        pthread_join( alarm_thread, NULL );
325
326        // Preemption is now fully stopped
327
328        __cfaabi_dbg_print_safe( "Kernel : Preemption stopped\n" );
329}
330
331// Raii ctor/dtor for the preemption_scope
332// Used by thread to control when they want to receive preemption signals
333void ?{}( preemption_scope & this, processor * proc ) {
334        (this.alarm){ proc, (Time){ 0 }, 0`s };
335        this.proc = proc;
336        this.proc->preemption_alarm = &this.alarm;
337
338        update_preemption( this.proc, this.proc->cltr->preemption_rate );
339}
340
341void ^?{}( preemption_scope & this ) {
342        disable_interrupts();
343
344        update_preemption( this.proc, 0`s );
345}
346
347//=============================================================================================
348// Kernel Signal Handlers
349//=============================================================================================
350
351// Context switch signal handler
352// Receives SIGUSR1 signal and causes the current thread to yield
353void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
354        __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); )
355
356        // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it,
357        // the interrupt that is supposed to force the kernel thread to preempt might arrive
358        // before the kernel thread has even started running. When that happens an iterrupt
359        // we a null 'this_processor' will be caught, just ignore it.
360        if(! kernelTLS.this_processor ) return;
361
362        choose(sfp->si_value.sival_int) {
363                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
364                case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);
365                default:
366                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
367        }
368
369        // Check if it is safe to preempt here
370        if( !preemption_ready() ) { return; }
371
372        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
373
374        // Sync flag : prevent recursive calls to the signal handler
375        kernelTLS.preemption_state.in_progress = true;
376
377        // Clear sighandler mask before context switching.
378        static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
379        if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) {
380                abort( "internal error, sigprocmask" );
381        }
382
383        // TODO: this should go in finish action
384        // Clear the in progress flag
385        kernelTLS.preemption_state.in_progress = false;
386
387        // Preemption can occur here
388
389        BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch
390}
391
392// Main of the alarm thread
393// Waits on SIGALRM and send SIGUSR1 to whom ever needs it
394void * alarm_loop( __attribute__((unused)) void * args ) {
395        // Block sigalrms to control when they arrive
396        sigset_t mask;
397        sigfillset(&mask);
398        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
399            abort( "internal error, pthread_sigmask" );
400        }
401
402        sigemptyset( &mask );
403        sigaddset( &mask, SIGALRM );
404
405        // Main loop
406        while( true ) {
407                // Wait for a sigalrm
408                siginfo_t info;
409                int sig = sigwaitinfo( &mask, &info );
410
411                if( sig < 0 ) {
412                        //Error!
413                        int err = errno;
414                        switch( err ) {
415                                case EAGAIN :
416                                case EINTR :
417                                        {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );}
418                                        continue;
419                        case EINVAL :
420                                        abort( "Timeout was invalid." );
421                                default:
422                                        abort( "Unhandled error %d", err);
423                        }
424                }
425
426                // If another signal arrived something went wrong
427                assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int);
428
429                // __cfaabi_dbg_print_safe( "Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
430                // Switch on the code (a.k.a. the sender) to
431                switch( info.si_code )
432                {
433                // Timers can apparently be marked as sent for the kernel
434                // In either case, tick preemption
435                case SI_TIMER:
436                case SI_KERNEL:
437                        // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" );
438                        lock( event_kernel->lock __cfaabi_dbg_ctx2 );
439                        tick_preemption();
440                        unlock( event_kernel->lock );
441                        break;
442                // Signal was not sent by the kernel but by an other thread
443                case SI_QUEUE:
444                        // For now, other thread only signal the alarm thread to shut it down
445                        // If this needs to change use info.si_value and handle the case here
446                        goto EXIT;
447                }
448        }
449
450EXIT:
451        __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" );
452        return NULL;
453}
454
455//=============================================================================================
456// Kernel Signal Debug
457//=============================================================================================
458
459void __cfaabi_check_preemption() {
460        bool ready = kernelTLS.preemption_state.enabled;
461        if(!ready) { abort("Preemption should be ready"); }
462
463        sigset_t oldset;
464        int ret;
465        ret = pthread_sigmask(0, NULL, &oldset);
466        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
467
468        ret = sigismember(&oldset, SIGUSR1);
469        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
470        if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
471
472        ret = sigismember(&oldset, SIGALRM);
473        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
474        if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
475
476        ret = sigismember(&oldset, SIGTERM);
477        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
478        if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
479}
480
481// Local Variables: //
482// mode: c //
483// tab-width: 4 //
484// End: //
Note: See TracBrowser for help on using the repository browser.