source: libcfa/src/concurrency/kernel.cfa@ 5fe7322

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 5fe7322 was e67a82d, checked in by Peter A. Buhr <pabuhr@…>, 5 years ago

fix conflicts

  • Property mode set to 100644
File size: 22.6 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel.c --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Fri Aug 14 15:23:00 2020
13// Update Count : 69
14//
15
16#define __cforall_thread__
17// #define __CFA_DEBUG_PRINT_RUNTIME_CORE__
18
19//C Includes
20#include <errno.h>
21#include <stdio.h>
22#include <signal.h>
23#include <unistd.h>
24
25//CFA Includes
26#include "kernel_private.hfa"
27#include "preemption.hfa"
28
29//Private includes
30#define __CFA_INVOKE_PRIVATE__
31#include "invoke.h"
32
33
34//-----------------------------------------------------------------------------
35// Some assembly required
36#if defined( __i386 )
37 // mxcr : SSE Status and Control bits (control bits are preserved across function calls)
38 // fcw : X87 FPU control word (preserved across function calls)
39 #define __x87_store \
40 uint32_t __mxcr; \
41 uint16_t __fcw; \
42 __asm__ volatile ( \
43 "stmxcsr %0\n" \
44 "fnstcw %1\n" \
45 : "=m" (__mxcr),\
46 "=m" (__fcw) \
47 )
48
49 #define __x87_load \
50 __asm__ volatile ( \
51 "fldcw %1\n" \
52 "ldmxcsr %0\n" \
53 ::"m" (__mxcr),\
54 "m" (__fcw) \
55 )
56
57#elif defined( __x86_64 )
58 #define __x87_store \
59 uint32_t __mxcr; \
60 uint16_t __fcw; \
61 __asm__ volatile ( \
62 "stmxcsr %0\n" \
63 "fnstcw %1\n" \
64 : "=m" (__mxcr),\
65 "=m" (__fcw) \
66 )
67
68 #define __x87_load \
69 __asm__ volatile ( \
70 "fldcw %1\n" \
71 "ldmxcsr %0\n" \
72 :: "m" (__mxcr),\
73 "m" (__fcw) \
74 )
75
76#elif defined( __ARM_ARCH )
77 #define __x87_store \
78 uint32_t __fpcntl[2]; \
79 __asm__ volatile ( \
80 "mrs x9, FPCR\n" \
81 "mrs x10, FPSR\n" \
82 "stp x9, x10, %0\n" \
83 : "=m" (__fpcntl) : : "x9", "x10" \
84 )
85
86 #define __x87_load \
87 __asm__ volatile ( \
88 "ldp x9, x10, %0\n" \
89 "msr FPSR, x10\n" \
90 "msr FPCR, x9\n" \
91 : "=m" (__fpcntl) : : "x9", "x10" \
92 )
93
94#else
95 #error unknown hardware architecture
96#endif
97
98extern $thread * mainThread;
99extern processor * mainProcessor;
100
101//-----------------------------------------------------------------------------
102// Kernel Scheduling logic
103static $thread * __next_thread(cluster * this);
104static $thread * __next_thread_slow(cluster * this);
105static void __run_thread(processor * this, $thread * dst);
106static void __wake_one(struct __processor_id_t * id, cluster * cltr);
107
108static void push (__cluster_idles & idles, processor & proc);
109static void remove(__cluster_idles & idles, processor & proc);
110static [unsigned idle, unsigned total, * processor] query( & __cluster_idles idles );
111
112
113//=============================================================================================
114// Kernel Scheduling logic
115//=============================================================================================
116//Main of the processor contexts
117void main(processorCtx_t & runner) {
118 // Because of a bug, we couldn't initialized the seed on construction
119 // Do it here
120 kernelTLS.rand_seed ^= rdtscl();
121 kernelTLS.ready_rng.fwd_seed = 25214903917_l64u * (rdtscl() ^ (uintptr_t)&runner);
122 __tls_rand_advance_bck();
123
124 processor * this = runner.proc;
125 verify(this);
126
127 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
128 #if !defined(__CFA_NO_STATISTICS__)
129 if( this->print_halts ) {
130 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->id, this->name, (void*)this);
131 }
132 #endif
133
134 {
135 // Setup preemption data
136 preemption_scope scope = { this };
137
138 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
139
140 $thread * readyThread = 0p;
141 MAIN_LOOP:
142 for() {
143 // Try to get the next thread
144 readyThread = __next_thread( this->cltr );
145
146 if( !readyThread ) {
147 readyThread = __next_thread_slow( this->cltr );
148 }
149
150 HALT:
151 if( !readyThread ) {
152 // Don't block if we are done
153 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
154
155 #if !defined(__CFA_NO_STATISTICS__)
156 __tls_stats()->ready.sleep.halts++;
157 #endif
158
159 // Push self to idle stack
160 push(this->cltr->idles, * this);
161
162 // Confirm the ready-queue is empty
163 readyThread = __next_thread_slow( this->cltr );
164 if( readyThread ) {
165 // A thread was found, cancel the halt
166 remove(this->cltr->idles, * this);
167
168 #if !defined(__CFA_NO_STATISTICS__)
169 __tls_stats()->ready.sleep.cancels++;
170 #endif
171
172 // continue the mai loop
173 break HALT;
174 }
175
176 #if !defined(__CFA_NO_STATISTICS__)
177 if(this->print_halts) {
178 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->id, rdtscl());
179 }
180 #endif
181
182 wait( this->idle );
183
184 #if !defined(__CFA_NO_STATISTICS__)
185 if(this->print_halts) {
186 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->id, rdtscl());
187 }
188 #endif
189
190 // We were woken up, remove self from idle
191 remove(this->cltr->idles, * this);
192
193 // DON'T just proceed, start looking again
194 continue MAIN_LOOP;
195 }
196
197 /* paranoid */ verify( readyThread );
198
199 // We found a thread run it
200 __run_thread(this, readyThread);
201
202 // Are we done?
203 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
204 }
205
206 __cfadbg_print_safe(runtime_core, "Kernel : core %p stopping\n", this);
207 }
208
209 V( this->terminated );
210
211 if(this == mainProcessor) {
212 // HACK : the coroutine context switch expects this_thread to be set
213 // and it make sense for it to be set in all other cases except here
214 // fake it
215 kernelTLS.this_thread = mainThread;
216 }
217
218 __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
219}
220
221static int * __volatile_errno() __attribute__((noinline));
222static int * __volatile_errno() { asm(""); return &errno; }
223
224// KERNEL ONLY
225// runThread runs a thread by context switching
226// from the processor coroutine to the target thread
227static void __run_thread(processor * this, $thread * thrd_dst) {
228 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
229 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
230 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next );
231 __builtin_prefetch( thrd_dst->context.SP );
232
233 $coroutine * proc_cor = get_coroutine(this->runner);
234
235 // Update global state
236 kernelTLS.this_thread = thrd_dst;
237
238 // set state of processor coroutine to inactive
239 verify(proc_cor->state == Active);
240 proc_cor->state = Blocked;
241
242 // Actually run the thread
243 RUNNING: while(true) {
244 thrd_dst->preempted = __NO_PREEMPTION;
245 thrd_dst->state = Active;
246
247 __cfaabi_dbg_debug_do(
248 thrd_dst->park_stale = true;
249 thrd_dst->unpark_stale = true;
250 )
251
252 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
253 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst );
254 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
255 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
256
257 // set context switch to the thread that the processor is executing
258 verify( thrd_dst->context.SP );
259 __cfactx_switch( &proc_cor->context, &thrd_dst->context );
260 // when __cfactx_switch returns we are back in the processor coroutine
261
262 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
263 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
264 /* paranoid */ verify( kernelTLS.this_thread == thrd_dst );
265 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
266
267
268 // We just finished running a thread, there are a few things that could have happened.
269 // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
270 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it.
271 // 4 - Preempted
272 // In case 1, we may have won a race so we can't write to the state again.
273 // In case 2, we lost the race so we now own the thread.
274
275 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
276 // The thread was preempted, reschedule it and reset the flag
277 __schedule_thread( (__processor_id_t*)this, thrd_dst );
278 break RUNNING;
279 }
280
281 if(unlikely(thrd_dst->state == Halted)) {
282 // The thread has halted, it should never be scheduled/run again
283 // We may need to wake someone up here since
284 unpark( this->destroyer __cfaabi_dbg_ctx2 );
285 this->destroyer = 0p;
286 break RUNNING;
287 }
288
289 /* paranoid */ verify( thrd_dst->state == Active );
290 thrd_dst->state = Blocked;
291
292 // set state of processor coroutine to active and the thread to inactive
293 int old_ticket = __atomic_fetch_sub(&thrd_dst->ticket, 1, __ATOMIC_SEQ_CST);
294 __cfaabi_dbg_debug_do( thrd_dst->park_result = old_ticket; )
295 switch(old_ticket) {
296 case 1:
297 // This is case 1, the regular case, nothing more is needed
298 break RUNNING;
299 case 2:
300 // This is case 2, the racy case, someone tried to run this thread before it finished blocking
301 // In this case, just run it again.
302 continue RUNNING;
303 default:
304 // This makes no sense, something is wrong abort
305 abort();
306 }
307 }
308
309 // Just before returning to the processor, set the processor coroutine to active
310 proc_cor->state = Active;
311 kernelTLS.this_thread = 0p;
312
313 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
314}
315
316// KERNEL_ONLY
317void returnToKernel() {
318 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
319 $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
320 $thread * thrd_src = kernelTLS.this_thread;
321
322 #if !defined(__CFA_NO_STATISTICS__)
323 struct processor * last_proc = kernelTLS.this_processor;
324 #endif
325
326 // Run the thread on this processor
327 {
328 int local_errno = *__volatile_errno();
329 #if defined( __i386 ) || defined( __x86_64 )
330 __x87_store;
331 #endif
332 verify( proc_cor->context.SP );
333 __cfactx_switch( &thrd_src->context, &proc_cor->context );
334 #if defined( __i386 ) || defined( __x86_64 )
335 __x87_load;
336 #endif
337 *__volatile_errno() = local_errno;
338 }
339
340 #if !defined(__CFA_NO_STATISTICS__)
341 if(last_proc != kernelTLS.this_processor) {
342 __tls_stats()->ready.threads.migration++;
343 }
344 #endif
345
346 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
347 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
348 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit), "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
349}
350
351//-----------------------------------------------------------------------------
352// Scheduler routines
353// KERNEL ONLY
354void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) {
355 /* paranoid */ verify( thrd );
356 /* paranoid */ verify( thrd->state != Halted );
357 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
358 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
359 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
360 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
361 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
362 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
363 /* paranoid */ #endif
364 /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
365
366 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
367
368 ready_schedule_lock ( id );
369 push( thrd->curr_cluster, thrd );
370 __wake_one(id, thrd->curr_cluster);
371 ready_schedule_unlock( id );
372
373 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
374}
375
376// KERNEL ONLY
377static inline $thread * __next_thread(cluster * this) with( *this ) {
378 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
379
380 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor );
381 $thread * thrd = pop( this );
382 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
383
384 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
385 return thrd;
386}
387
388// KERNEL ONLY
389static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
390 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
391
392 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor );
393 $thread * thrd = pop_slow( this );
394 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
395
396 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
397 return thrd;
398}
399
400// KERNEL ONLY unpark with out disabling interrupts
401void __unpark( struct __processor_id_t * id, $thread * thrd __cfaabi_dbg_ctx_param2 ) {
402 // record activity
403 __cfaabi_dbg_record_thrd( *thrd, false, caller );
404
405 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
406 __cfaabi_dbg_debug_do( thrd->unpark_result = old_ticket; thrd->unpark_state = thrd->state; )
407 switch(old_ticket) {
408 case 1:
409 // Wake won the race, the thread will reschedule/rerun itself
410 break;
411 case 0:
412 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
413 /* paranoid */ verify( thrd->state == Blocked );
414
415 // Wake lost the race,
416 __schedule_thread( id, thrd );
417 break;
418 default:
419 // This makes no sense, something is wrong abort
420 abort();
421 }
422}
423
424void unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) {
425 if( !thrd ) return;
426
427 disable_interrupts();
428 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd __cfaabi_dbg_ctx_fwd2 );
429 enable_interrupts( __cfaabi_dbg_ctx );
430}
431
432void park( __cfaabi_dbg_ctx_param ) {
433 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
434 disable_interrupts();
435 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
436 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
437
438 // record activity
439 __cfaabi_dbg_record_thrd( *kernelTLS.this_thread, true, caller );
440
441 returnToKernel();
442
443 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
444 enable_interrupts( __cfaabi_dbg_ctx );
445 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
446
447}
448
449// KERNEL ONLY
450void __leave_thread() {
451 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
452 returnToKernel();
453 abort();
454}
455
456// KERNEL ONLY
457bool force_yield( __Preemption_Reason reason ) {
458 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
459 disable_interrupts();
460 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
461
462 $thread * thrd = kernelTLS.this_thread;
463 /* paranoid */ verify(thrd->state == Active);
464
465 // SKULLDUGGERY: It is possible that we are preempting this thread just before
466 // it was going to park itself. If that is the case and it is already using the
467 // intrusive fields then we can't use them to preempt the thread
468 // If that is the case, abandon the preemption.
469 bool preempted = false;
470 if(thrd->link.next == 0p) {
471 preempted = true;
472 thrd->preempted = reason;
473 returnToKernel();
474 }
475
476 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
477 enable_interrupts_noPoll();
478 /* paranoid */ verify( kernelTLS.preemption_state.enabled );
479
480 return preempted;
481}
482
483//=============================================================================================
484// Kernel Idle Sleep
485//=============================================================================================
486// Wake a thread from the front if there are any
487static void __wake_one(struct __processor_id_t * id, cluster * this) {
488 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
489 /* paranoid */ verify( ready_schedule_islocked( id ) );
490
491 // Check if there is a sleeping processor
492 processor * p;
493 unsigned idle;
494 unsigned total;
495 [idle, total, p] = query(this->idles);
496
497 // If no one is sleeping, we are done
498 if( idle == 0 ) return;
499
500 // We found a processor, wake it up
501 post( p->idle );
502
503 #if !defined(__CFA_NO_STATISTICS__)
504 __tls_stats()->ready.sleep.wakes++;
505 #endif
506
507 /* paranoid */ verify( ready_schedule_islocked( id ) );
508 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
509
510 return;
511}
512
513// Unconditionnaly wake a thread
514void __wake_proc(processor * this) {
515 __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
516
517 disable_interrupts();
518 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
519 bool ret = post( this->idle );
520 enable_interrupts( __cfaabi_dbg_ctx );
521}
522
523static void push (__cluster_idles & this, processor & proc) {
524 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
525 lock( this );
526 this.idle++;
527 /* paranoid */ verify( this.idle <= this.total );
528
529 insert_first(this.list, proc);
530 unlock( this );
531 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
532}
533
534static void remove(__cluster_idles & this, processor & proc) {
535 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
536 lock( this );
537 this.idle--;
538 /* paranoid */ verify( this.idle >= 0 );
539
540 remove(proc);
541 unlock( this );
542 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
543}
544
545static [unsigned idle, unsigned total, * processor] query( & __cluster_idles this ) {
546 for() {
547 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);
548 if( 1 == (l % 2) ) { Pause(); continue; }
549 unsigned idle = this.idle;
550 unsigned total = this.total;
551 processor * proc = &this.list`first;
552 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it
553 asm volatile("": : :"memory");
554 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }
555 return [idle, total, proc];
556 }
557}
558
559//=============================================================================================
560// Unexpected Terminating logic
561//=============================================================================================
562static __spinlock_t kernel_abort_lock;
563static bool kernel_abort_called = false;
564
565void * kernel_abort(void) __attribute__ ((__nothrow__)) {
566 // abort cannot be recursively entered by the same or different processors because all signal handlers return when
567 // the globalAbort flag is true.
568 lock( kernel_abort_lock __cfaabi_dbg_ctx2 );
569
570 // first task to abort ?
571 if ( kernel_abort_called ) { // not first task to abort ?
572 unlock( kernel_abort_lock );
573
574 sigset_t mask;
575 sigemptyset( &mask );
576 sigaddset( &mask, SIGALRM ); // block SIGALRM signals
577 sigaddset( &mask, SIGUSR1 ); // block SIGALRM signals
578 sigsuspend( &mask ); // block the processor to prevent further damage during abort
579 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it
580 }
581 else {
582 kernel_abort_called = true;
583 unlock( kernel_abort_lock );
584 }
585
586 return kernelTLS.this_thread;
587}
588
589void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
590 $thread * thrd = ( $thread * ) kernel_data;
591
592 if(thrd) {
593 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
594 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
595
596 if ( &thrd->self_cor != thrd->curr_cor ) {
597 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor );
598 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
599 }
600 else {
601 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 );
602 }
603 }
604 else {
605 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
606 __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
607 }
608}
609
610int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
611 return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
612}
613
614static __spinlock_t kernel_debug_lock;
615
616extern "C" {
617 void __cfaabi_bits_acquire() {
618 lock( kernel_debug_lock __cfaabi_dbg_ctx2 );
619 }
620
621 void __cfaabi_bits_release() {
622 unlock( kernel_debug_lock );
623 }
624}
625
626//=============================================================================================
627// Kernel Utilities
628//=============================================================================================
629//-----------------------------------------------------------------------------
630// Locks
631void ?{}( semaphore & this, int count = 1 ) {
632 (this.lock){};
633 this.count = count;
634 (this.waiting){};
635}
636void ^?{}(semaphore & this) {}
637
638bool P(semaphore & this) with( this ){
639 lock( lock __cfaabi_dbg_ctx2 );
640 count -= 1;
641 if ( count < 0 ) {
642 // queue current task
643 append( waiting, kernelTLS.this_thread );
644
645 // atomically release spin lock and block
646 unlock( lock );
647 park( __cfaabi_dbg_ctx );
648 return true;
649 }
650 else {
651 unlock( lock );
652 return false;
653 }
654}
655
656bool V(semaphore & this) with( this ) {
657 $thread * thrd = 0p;
658 lock( lock __cfaabi_dbg_ctx2 );
659 count += 1;
660 if ( count <= 0 ) {
661 // remove task at head of waiting list
662 thrd = pop_head( waiting );
663 }
664
665 unlock( lock );
666
667 // make new owner
668 unpark( thrd __cfaabi_dbg_ctx2 );
669
670 return thrd != 0p;
671}
672
673bool V(semaphore & this, unsigned diff) with( this ) {
674 $thread * thrd = 0p;
675 lock( lock __cfaabi_dbg_ctx2 );
676 int release = max(-count, (int)diff);
677 count += diff;
678 for(release) {
679 unpark( pop_head( waiting ) __cfaabi_dbg_ctx2 );
680 }
681
682 unlock( lock );
683
684 return thrd != 0p;
685}
686
687//-----------------------------------------------------------------------------
688// Debug
689__cfaabi_dbg_debug_do(
690 extern "C" {
691 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
692 this.prev_name = prev_name;
693 this.prev_thrd = kernelTLS.this_thread;
694 }
695
696 void __cfaabi_dbg_record_thrd($thread & this, bool park, const char prev_name[]) {
697 if(park) {
698 this.park_caller = prev_name;
699 this.park_stale = false;
700 }
701 else {
702 this.unpark_caller = prev_name;
703 this.unpark_stale = false;
704 }
705 }
706 }
707)
708
709//-----------------------------------------------------------------------------
710// Debug
711bool threading_enabled(void) __attribute__((const)) {
712 return true;
713}
714
715//-----------------------------------------------------------------------------
716// Statistics
717#if !defined(__CFA_NO_STATISTICS__)
718 void print_halts( processor & this ) {
719 this.print_halts = true;
720 }
721#endif
722// Local Variables: //
723// mode: c //
724// tab-width: 4 //
725// End: //
Note: See TracBrowser for help on using the repository browser.