source: libcfa/src/concurrency/preemption.cfa@ 50b8885

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 50b8885 was 3381ed7, checked in by Thierry Delisle <tdelisle@…>, 6 years ago

Added park/unpark primitives thread and removed BlockInternal.
Converted monitors to use park unpark.
Intrusive Queue now mark next field when thread is inside queue.
Added several asserts to kernel and monitor.
Added a few tests for park and unpark.

  • Property mode set to 100644
File size: 16.6 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// signal.c --
8//
9// Author : Thierry Delisle
10// Created On : Mon Jun 5 14:20:42 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Thu Dec 5 16:34:05 2019
13// Update Count : 43
14//
15
16#define __cforall_thread__
17
18#include "preemption.hfa"
19#include <assert.h>
20
21extern "C" {
22#include <errno.h>
23#include <stdio.h>
24#include <string.h>
25#include <unistd.h>
26#include <limits.h> // PTHREAD_STACK_MIN
27}
28
29#include "bits/signal.hfa"
30
31#if !defined(__CFA_DEFAULT_PREEMPTION__)
32#define __CFA_DEFAULT_PREEMPTION__ 10`ms
33#endif
34
35Duration default_preemption() __attribute__((weak)) {
36 return __CFA_DEFAULT_PREEMPTION__;
37}
38
39// FwdDeclarations : timeout handlers
40static void preempt( processor * this );
41static void timeout( thread_desc * this );
42
43// FwdDeclarations : Signal handlers
44static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
45static void sigHandler_segv ( __CFA_SIGPARMS__ );
46static void sigHandler_ill ( __CFA_SIGPARMS__ );
47static void sigHandler_fpe ( __CFA_SIGPARMS__ );
48static void sigHandler_abort ( __CFA_SIGPARMS__ );
49
50// FwdDeclarations : alarm thread main
51static void * alarm_loop( __attribute__((unused)) void * args );
52
53// Machine specific register name
54#if defined( __i386 )
55#define CFA_REG_IP gregs[REG_EIP]
56#elif defined( __x86_64 )
57#define CFA_REG_IP gregs[REG_RIP]
58#elif defined( __ARM_ARCH )
59#define CFA_REG_IP arm_pc
60#else
61#error unknown hardware architecture
62#endif
63
64KERNEL_STORAGE(event_kernel_t, event_kernel); // private storage for event kernel
65event_kernel_t * event_kernel; // kernel public handle to even kernel
66static pthread_t alarm_thread; // pthread handle to alarm thread
67static void * alarm_stack; // pthread stack for alarm thread
68
69static void ?{}(event_kernel_t & this) with( this ) {
70 alarms{};
71 lock{};
72}
73
74enum {
75 PREEMPT_NORMAL = 0,
76 PREEMPT_TERMINATE = 1,
77};
78
79//=============================================================================================
80// Kernel Preemption logic
81//=============================================================================================
82
83// Get next expired node
84static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) {
85 if( !alarms->head ) return 0p; // If no alarms return null
86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null
87 return pop(alarms); // Otherwise just pop head
88}
89
90// Tick one frame of the Discrete Event Simulation for alarms
91static void tick_preemption() {
92 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition
93 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading
94 Time currtime = __kernel_get_time(); // Check current time once so everything "happens at once"
95
96 //Loop throught every thing expired
97 while( node = get_expired( alarms, currtime ) ) {
98 // __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" );
99
100 // Check if this is a kernel
101 if( node->kernel_alarm ) {
102 preempt( node->proc );
103 }
104 else {
105 timeout( node->thrd );
106 }
107
108 // Check if this is a periodic alarm
109 Duration period = node->period;
110 if( period > 0 ) {
111 // __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv );
112 node->alarm = currtime + period; // Alarm is periodic, add currtime to it (used cached current time)
113 insert( alarms, node ); // Reinsert the node for the next time it triggers
114 }
115 else {
116 node->set = false; // Node is one-shot, just mark it as not pending
117 }
118 }
119
120 // If there are still alarms pending, reset the timer
121 if( alarms->head ) {
122 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
123 Duration delta = alarms->head->alarm - currtime;
124 Duration caped = max(delta, 50`us);
125 // itimerval tim = { caped };
126 // __cfaabi_dbg_print_buffer_local( " Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec);
127
128 __kernel_set_timer( caped );
129 }
130}
131
132// Update the preemption of a processor and notify interested parties
133void update_preemption( processor * this, Duration duration ) {
134 alarm_node_t * alarm = this->preemption_alarm;
135
136 // Alarms need to be enabled
137 if ( duration > 0 && ! alarm->set ) {
138 alarm->alarm = __kernel_get_time() + duration;
139 alarm->period = duration;
140 register_self( alarm );
141 }
142 // Zero duration but alarm is set
143 else if ( duration == 0 && alarm->set ) {
144 unregister_self( alarm );
145 alarm->alarm = 0;
146 alarm->period = 0;
147 }
148 // If alarm is different from previous, change it
149 else if ( duration > 0 && alarm->period != duration ) {
150 unregister_self( alarm );
151 alarm->alarm = __kernel_get_time() + duration;
152 alarm->period = duration;
153 register_self( alarm );
154 }
155}
156
157//=============================================================================================
158// Kernel Signal Tools
159//=============================================================================================
160
161__cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; )
162
163extern "C" {
164 // Disable interrupts by incrementing the counter
165 void disable_interrupts() {
166 with( kernelTLS.preemption_state ) {
167 #if GCC_VERSION > 50000
168 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
169 #endif
170
171 // Set enabled flag to false
172 // should be atomic to avoid preemption in the middle of the operation.
173 // use memory order RELAXED since there is no inter-thread on this variable requirements
174 __atomic_store_n(&enabled, false, __ATOMIC_RELAXED);
175
176 // Signal the compiler that a fence is needed but only for signal handlers
177 __atomic_signal_fence(__ATOMIC_ACQUIRE);
178
179 __attribute__((unused)) unsigned short new_val = disable_count + 1;
180 disable_count = new_val;
181 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them
182 }
183 }
184
185 // Enable interrupts by decrementing the counter
186 // If counter reaches 0, execute any pending CtxSwitch
187 void enable_interrupts( __cfaabi_dbg_ctx_param ) {
188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
189
190 with( kernelTLS.preemption_state ){
191 unsigned short prev = disable_count;
192 disable_count -= 1;
193 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
194
195 // Check if we need to prempt the thread because an interrupt was missed
196 if( prev == 1 ) {
197 #if GCC_VERSION > 50000
198 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
199 #endif
200
201 // Set enabled flag to true
202 // should be atomic to avoid preemption in the middle of the operation.
203 // use memory order RELAXED since there is no inter-thread on this variable requirements
204 __atomic_store_n(&enabled, true, __ATOMIC_RELAXED);
205
206 // Signal the compiler that a fence is needed but only for signal handlers
207 __atomic_signal_fence(__ATOMIC_RELEASE);
208 if( proc->pending_preemption ) {
209 proc->pending_preemption = false;
210 force_yield( __POLL_PREEMPTION );
211 }
212 }
213 }
214
215 // For debugging purposes : keep track of the last person to enable the interrupts
216 __cfaabi_dbg_debug_do( proc->last_enable = caller; )
217 }
218
219 // Disable interrupts by incrementint the counter
220 // Don't execute any pending CtxSwitch even if counter reaches 0
221 void enable_interrupts_noPoll() {
222 unsigned short prev = kernelTLS.preemption_state.disable_count;
223 kernelTLS.preemption_state.disable_count -= 1;
224 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts
225 if( prev == 1 ) {
226 #if GCC_VERSION > 50000
227 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
228 #endif
229 // Set enabled flag to true
230 // should be atomic to avoid preemption in the middle of the operation.
231 // use memory order RELAXED since there is no inter-thread on this variable requirements
232 __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);
233
234 // Signal the compiler that a fence is needed but only for signal handlers
235 __atomic_signal_fence(__ATOMIC_RELEASE);
236 }
237 }
238}
239
240// sigprocmask wrapper : unblock a single signal
241static inline void signal_unblock( int sig ) {
242 sigset_t mask;
243 sigemptyset( &mask );
244 sigaddset( &mask, sig );
245
246 if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) {
247 abort( "internal error, pthread_sigmask" );
248 }
249}
250
251// sigprocmask wrapper : block a single signal
252static inline void signal_block( int sig ) {
253 sigset_t mask;
254 sigemptyset( &mask );
255 sigaddset( &mask, sig );
256
257 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
258 abort( "internal error, pthread_sigmask" );
259 }
260}
261
262// kill wrapper : signal a processor
263static void preempt( processor * this ) {
264 sigval_t value = { PREEMPT_NORMAL };
265 pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
266}
267
268// reserved for future use
269static void timeout( thread_desc * this ) {
270 //TODO : implement waking threads
271}
272
273// KERNEL ONLY
274// Check if a CtxSwitch signal handler shoud defer
275// If true : preemption is safe
276// If false : preemption is unsafe and marked as pending
277static inline bool preemption_ready() {
278 // Check if preemption is safe
279 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;
280
281 // Adjust the pending flag accordingly
282 kernelTLS.this_processor->pending_preemption = !ready;
283 return ready;
284}
285
286//=============================================================================================
287// Kernel Signal Startup/Shutdown logic
288//=============================================================================================
289
290// Startup routine to activate preemption
291// Called from kernel_startup
292void kernel_start_preemption() {
293 __cfaabi_dbg_print_safe( "Kernel : Starting preemption\n" );
294
295 // Start with preemption disabled until ready
296 kernelTLS.preemption_state.enabled = false;
297 kernelTLS.preemption_state.disable_count = 1;
298
299 // Initialize the event kernel
300 event_kernel = (event_kernel_t *)&storage_event_kernel;
301 (*event_kernel){};
302
303 // Setup proper signal handlers
304 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler
305
306 signal_block( SIGALRM );
307
308 alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p );
309}
310
311// Shutdown routine to deactivate preemption
312// Called from kernel_shutdown
313void kernel_stop_preemption() {
314 __cfaabi_dbg_print_safe( "Kernel : Preemption stopping\n" );
315
316 // Block all signals since we are already shutting down
317 sigset_t mask;
318 sigfillset( &mask );
319 sigprocmask( SIG_BLOCK, &mask, 0p );
320
321 // Notify the alarm thread of the shutdown
322 sigval val = { 1 };
323 pthread_sigqueue( alarm_thread, SIGALRM, val );
324
325 // Wait for the preemption thread to finish
326
327 pthread_join( alarm_thread, 0p );
328 free( alarm_stack );
329
330 // Preemption is now fully stopped
331
332 __cfaabi_dbg_print_safe( "Kernel : Preemption stopped\n" );
333}
334
335// Raii ctor/dtor for the preemption_scope
336// Used by thread to control when they want to receive preemption signals
337void ?{}( preemption_scope & this, processor * proc ) {
338 (this.alarm){ proc, (Time){ 0 }, 0`s };
339 this.proc = proc;
340 this.proc->preemption_alarm = &this.alarm;
341
342 update_preemption( this.proc, this.proc->cltr->preemption_rate );
343}
344
345void ^?{}( preemption_scope & this ) {
346 disable_interrupts();
347
348 update_preemption( this.proc, 0`s );
349}
350
351//=============================================================================================
352// Kernel Signal Handlers
353//=============================================================================================
354
355// Context switch signal handler
356// Receives SIGUSR1 signal and causes the current thread to yield
357static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
358 __cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); )
359
360 // SKULLDUGGERY: if a thread creates a processor and the immediately deletes it,
361 // the interrupt that is supposed to force the kernel thread to preempt might arrive
362 // before the kernel thread has even started running. When that happens an iterrupt
363 // we a null 'this_processor' will be caught, just ignore it.
364 if(! kernelTLS.this_processor ) return;
365
366 choose(sfp->si_value.sival_int) {
367 case PREEMPT_NORMAL : ;// Normal case, nothing to do here
368 case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
369 default:
370 abort( "internal error, signal value is %d", sfp->si_value.sival_int );
371 }
372
373 // Check if it is safe to preempt here
374 if( !preemption_ready() ) { return; }
375
376 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
377
378 // Sync flag : prevent recursive calls to the signal handler
379 kernelTLS.preemption_state.in_progress = true;
380
381 // Clear sighandler mask before context switching.
382 #if GCC_VERSION > 50000
383 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
384 #endif
385 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) {
386 abort( "internal error, sigprocmask" );
387 }
388
389 // TODO: this should go in finish action
390 // Clear the in progress flag
391 kernelTLS.preemption_state.in_progress = false;
392
393 // Preemption can occur here
394
395 force_yield( __ALARM_PREEMPTION ); // Do the actual CtxSwitch
396}
397
398// Main of the alarm thread
399// Waits on SIGALRM and send SIGUSR1 to whom ever needs it
400static void * alarm_loop( __attribute__((unused)) void * args ) {
401 // Block sigalrms to control when they arrive
402 sigset_t mask;
403 sigfillset(&mask);
404 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
405 abort( "internal error, pthread_sigmask" );
406 }
407
408 sigemptyset( &mask );
409 sigaddset( &mask, SIGALRM );
410
411 // Main loop
412 while( true ) {
413 // Wait for a sigalrm
414 siginfo_t info;
415 int sig = sigwaitinfo( &mask, &info );
416
417 if( sig < 0 ) {
418 //Error!
419 int err = errno;
420 switch( err ) {
421 case EAGAIN :
422 case EINTR :
423 {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );}
424 continue;
425 case EINVAL :
426 abort( "Timeout was invalid." );
427 default:
428 abort( "Unhandled error %d", err);
429 }
430 }
431
432 // If another signal arrived something went wrong
433 assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int);
434
435 // __cfaabi_dbg_print_safe( "Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
436 // Switch on the code (a.k.a. the sender) to
437 switch( info.si_code )
438 {
439 // Timers can apparently be marked as sent for the kernel
440 // In either case, tick preemption
441 case SI_TIMER:
442 case SI_KERNEL:
443 // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" );
444 lock( event_kernel->lock __cfaabi_dbg_ctx2 );
445 tick_preemption();
446 unlock( event_kernel->lock );
447 break;
448 // Signal was not sent by the kernel but by an other thread
449 case SI_QUEUE:
450 // For now, other thread only signal the alarm thread to shut it down
451 // If this needs to change use info.si_value and handle the case here
452 goto EXIT;
453 }
454 }
455
456EXIT:
457 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" );
458 return 0p;
459}
460
461//=============================================================================================
462// Kernel Signal Debug
463//=============================================================================================
464
465void __cfaabi_check_preemption() {
466 bool ready = kernelTLS.preemption_state.enabled;
467 if(!ready) { abort("Preemption should be ready"); }
468
469 sigset_t oldset;
470 int ret;
471 ret = pthread_sigmask(0, 0p, &oldset);
472 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
473
474 ret = sigismember(&oldset, SIGUSR1);
475 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }
476 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
477
478 ret = sigismember(&oldset, SIGALRM);
479 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }
480 if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
481
482 ret = sigismember(&oldset, SIGTERM);
483 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }
484 if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
485}
486
487#ifdef __CFA_WITH_VERIFY__
488bool __cfaabi_dbg_in_kernel() {
489 return !kernelTLS.preemption_state.enabled;
490}
491#endif
492
493// Local Variables: //
494// mode: c //
495// tab-width: 4 //
496// End: //
Note: See TracBrowser for help on using the repository browser.