//
// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
//
// The contents of this file are covered under the licence agreement in the
// file "LICENCE" distributed with Cforall.
//
// signal.c --
//
// Author           : Thierry Delisle
// Created On       : Mon Jun 5 14:20:42 2017
// Last Modified By : Peter A. Buhr
// Last Modified On : Tue Jun  5 17:35:49 2018
// Update Count     : 37
//

#include "preemption.h"
#include <assert.h>

extern "C" {
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
}

#include "bits/signal.h"

#if !defined(__CFA_DEFAULT_PREEMPTION__)
#define __CFA_DEFAULT_PREEMPTION__ 10`ms
#endif

Duration default_preemption() __attribute__((weak)) {
	return __CFA_DEFAULT_PREEMPTION__;
}

// FwdDeclarations : timeout handlers
static void preempt( processor   * this );
static void timeout( thread_desc * this );

// FwdDeclarations : Signal handlers
void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
void sigHandler_segv     ( __CFA_SIGPARMS__ );
void sigHandler_ill      ( __CFA_SIGPARMS__ );
void sigHandler_fpe      ( __CFA_SIGPARMS__ );
void sigHandler_abort    ( __CFA_SIGPARMS__ );

// FwdDeclarations : alarm thread main
void * alarm_loop( __attribute__((unused)) void * args );

// Machine specific register name
#if   defined( __i386 )
#define CFA_REG_IP gregs[REG_EIP]
#elif defined( __x86_64 )
#define CFA_REG_IP gregs[REG_RIP]
#elif defined( __ARM_ARCH )
#define CFA_REG_IP arm_pc
#else
#error unknown hardware architecture
#endif

KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
event_kernel_t * event_kernel;                        // kernel public handle to even kernel
static pthread_t alarm_thread;                        // pthread handle to alarm thread

void ?{}(event_kernel_t & this) with( this ) {
	alarms{};
	lock{};
}

enum {
	PREEMPT_NORMAL    = 0,
	PREEMPT_TERMINATE = 1,
};

//=============================================================================================
// Kernel Preemption logic
//=============================================================================================

// Get next expired node
static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) {
	if( !alarms->head ) return NULL;                          // If no alarms return null
	if( alarms->head->alarm >= currtime ) return NULL;        // If alarms head not expired return null
	return pop(alarms);                                       // Otherwise just pop head
}

// Tick one frame of the Discrete Event Simulation for alarms
void tick_preemption() {
	alarm_node_t * node = NULL;                     // Used in the while loop but cannot be declared in the while condition
	alarm_list_t * alarms = &event_kernel->alarms;  // Local copy for ease of reading
	Time currtime = __kernel_get_time();			// Check current time once so we everything "happens at once"

	//Loop throught every thing expired
	while( node = get_expired( alarms, currtime ) ) {
		// __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" );

		// Check if this is a kernel
		if( node->kernel_alarm ) {
			preempt( node->proc );
		}
		else {
			timeout( node->thrd );
		}

		// Check if this is a periodic alarm
		Duration period = node->period;
		if( period > 0 ) {
			// __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv );
			node->alarm = currtime + period;    // Alarm is periodic, add currtime to it (used cached current time)
			insert( alarms, node );             // Reinsert the node for the next time it triggers
		}
		else {
			node->set = false;                  // Node is one-shot, just mark it as not pending
		}
	}

	// If there are still alarms pending, reset the timer
	if( alarms->head ) {
		__cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
		Duration delta = alarms->head->alarm - currtime;
		Duration caped = max(delta, 50`us);
		// itimerval tim  = { caped };
		// __cfaabi_dbg_print_buffer_local( "    Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec);

		__kernel_set_timer( caped );
	}
}

// Update the preemption of a processor and notify interested parties
void update_preemption( processor * this, Duration duration ) {
	alarm_node_t * alarm = this->preemption_alarm;

	// Alarms need to be enabled
	if ( duration > 0 && ! alarm->set ) {
		alarm->alarm = __kernel_get_time() + duration;
		alarm->period = duration;
		register_self( alarm );
	}
	// Zero duration but alarm is set
	else if ( duration == 0 && alarm->set ) {
		unregister_self( alarm );
		alarm->alarm = 0;
		alarm->period = 0;
	}
	// If alarm is different from previous, change it
	else if ( duration > 0 && alarm->period != duration ) {
		unregister_self( alarm );
		alarm->alarm = __kernel_get_time() + duration;
		alarm->period = duration;
		register_self( alarm );
	}
}

//=============================================================================================
// Kernel Signal Tools
//=============================================================================================

__cfaabi_dbg_debug_do( static thread_local void * last_interrupt = 0; )

extern "C" {
	// Disable interrupts by incrementing the counter
	void disable_interrupts() {
		with( kernelTLS.preemption_state ) {
			#if GCC_VERSION > 50000
			static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
			#endif

			// Set enabled flag to false
			// should be atomic to avoid preemption in the middle of the operation.
			// use memory order RELAXED since there is no inter-thread on this variable requirements
			__atomic_store_n(&enabled, false, __ATOMIC_RELAXED);

			// Signal the compiler that a fence is needed but only for signal handlers
			__atomic_signal_fence(__ATOMIC_ACQUIRE);

			__attribute__((unused)) unsigned short new_val = disable_count + 1;
			disable_count = new_val;
			verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
		}
	}

	// Enable interrupts by decrementing the counter
	// If counter reaches 0, execute any pending CtxSwitch
	void enable_interrupts( __cfaabi_dbg_ctx_param ) {
		processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
		thread_desc * thrd = kernelTLS.this_thread;	  // Cache the thread now since interrupts can start happening after the atomic store

		with( kernelTLS.preemption_state ){
			unsigned short prev = disable_count;
			disable_count -= 1;
			verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );

			// Check if we need to prempt the thread because an interrupt was missed
			if( prev == 1 ) {
				#if GCC_VERSION > 50000
				static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
				#endif

				// Set enabled flag to true
				// should be atomic to avoid preemption in the middle of the operation.
				// use memory order RELAXED since there is no inter-thread on this variable requirements
				__atomic_store_n(&enabled, true, __ATOMIC_RELAXED);

				// Signal the compiler that a fence is needed but only for signal handlers
				__atomic_signal_fence(__ATOMIC_RELEASE);
				if( proc->pending_preemption ) {
					proc->pending_preemption = false;
					BlockInternal( thrd );
				}
			}
		}

		// For debugging purposes : keep track of the last person to enable the interrupts
		__cfaabi_dbg_debug_do( proc->last_enable = caller; )
	}

	// Disable interrupts by incrementint the counter
	// Don't execute any pending CtxSwitch even if counter reaches 0
	void enable_interrupts_noPoll() {
		unsigned short prev = kernelTLS.preemption_state.disable_count;
		kernelTLS.preemption_state.disable_count -= 1;
		verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
		if( prev == 1 ) {
			#if GCC_VERSION > 50000
			static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
			#endif
			// Set enabled flag to true
			// should be atomic to avoid preemption in the middle of the operation.
			// use memory order RELAXED since there is no inter-thread on this variable requirements
			__atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);

			// Signal the compiler that a fence is needed but only for signal handlers
			__atomic_signal_fence(__ATOMIC_RELEASE);
		}
	}
}

// sigprocmask wrapper : unblock a single signal
static inline void signal_unblock( int sig ) {
	sigset_t mask;
	sigemptyset( &mask );
	sigaddset( &mask, sig );

	if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) {
	    abort( "internal error, pthread_sigmask" );
	}
}

// sigprocmask wrapper : block a single signal
static inline void signal_block( int sig ) {
	sigset_t mask;
	sigemptyset( &mask );
	sigaddset( &mask, sig );

	if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
	    abort( "internal error, pthread_sigmask" );
	}
}

// kill wrapper : signal a processor
static void preempt( processor * this ) {
	sigval_t value = { PREEMPT_NORMAL };
	pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
}

// reserved for future use
static void timeout( thread_desc * this ) {
	//TODO : implement waking threads
}

// KERNEL ONLY
// Check if a CtxSwitch signal handler shoud defer
// If true  : preemption is safe
// If false : preemption is unsafe and marked as pending
static inline bool preemption_ready() {
	// Check if preemption is safe
	bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;

	// Adjust the pending flag accordingly
	kernelTLS.this_processor->pending_preemption = !ready;
	return ready;
}

//=============================================================================================
// Kernel Signal Startup/Shutdown logic
//=============================================================================================

// Startup routine to activate preemption
// Called from kernel_startup
void kernel_start_preemption() {
	__cfaabi_dbg_print_safe( "Kernel : Starting preemption\n" );

	// Start with preemption disabled until ready
	kernelTLS.preemption_state.enabled = false;
	kernelTLS.preemption_state.disable_count = 1;

	// Initialize the event kernel
	event_kernel = (event_kernel_t *)&storage_event_kernel;
	(*event_kernel){};

	// Setup proper signal handlers
	__cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART );         // CtxSwitch handler

	signal_block( SIGALRM );

	pthread_create( &alarm_thread, NULL, alarm_loop, NULL );
}

// Shutdown routine to deactivate preemption
// Called from kernel_shutdown
void kernel_stop_preemption() {
	__cfaabi_dbg_print_safe( "Kernel : Preemption stopping\n" );

	// Block all signals since we are already shutting down
	sigset_t mask;
	sigfillset( &mask );
	sigprocmask( SIG_BLOCK, &mask, NULL );

	// Notify the alarm thread of the shutdown
	sigval val = { 1 };
	pthread_sigqueue( alarm_thread, SIGALRM, val );

	// Wait for the preemption thread to finish
	pthread_join( alarm_thread, NULL );

	// Preemption is now fully stopped

	__cfaabi_dbg_print_safe( "Kernel : Preemption stopped\n" );
}

// Raii ctor/dtor for the preemption_scope
// Used by thread to control when they want to receive preemption signals
void ?{}( preemption_scope & this, processor * proc ) {
	(this.alarm){ proc, (Time){ 0 }, 0`s };
	this.proc = proc;
	this.proc->preemption_alarm = &this.alarm;

	update_preemption( this.proc, this.proc->cltr->preemption_rate );
}

void ^?{}( preemption_scope & this ) {
	disable_interrupts();

	update_preemption( this.proc, 0`s );
}

//=============================================================================================
// Kernel Signal Handlers
//=============================================================================================

// Context switch signal handler
// Receives SIGUSR1 signal and causes the current thread to yield
void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
	__cfaabi_dbg_debug_do( last_interrupt = (void *)(cxt->uc_mcontext.CFA_REG_IP); )

	// SKULLDUGGERY: if a thread creates a processor and the immediately deletes it,
	// the interrupt that is supposed to force the kernel thread to preempt might arrive
	// before the kernel thread has even started running. When that happens an iterrupt
	// we a null 'this_processor' will be caught, just ignore it.
	if(! kernelTLS.this_processor ) return;

	choose(sfp->si_value.sival_int) {
		case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
		case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
		default:
			abort( "internal error, signal value is %d", sfp->si_value.sival_int );
	}

	// Check if it is safe to preempt here
	if( !preemption_ready() ) { return; }

	__cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );

	// Sync flag : prevent recursive calls to the signal handler
	kernelTLS.preemption_state.in_progress = true;

	// Clear sighandler mask before context switching.
	#if GCC_VERSION > 50000
	static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
	#endif
	if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) {
		abort( "internal error, sigprocmask" );
	}

	// TODO: this should go in finish action
	// Clear the in progress flag
	kernelTLS.preemption_state.in_progress = false;

	// Preemption can occur here

	BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch
}

// Main of the alarm thread
// Waits on SIGALRM and send SIGUSR1 to whom ever needs it
void * alarm_loop( __attribute__((unused)) void * args ) {
	// Block sigalrms to control when they arrive
	sigset_t mask;
	sigfillset(&mask);
	if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
	    abort( "internal error, pthread_sigmask" );
	}

	sigemptyset( &mask );
	sigaddset( &mask, SIGALRM );

	// Main loop
	while( true ) {
		// Wait for a sigalrm
		siginfo_t info;
		int sig = sigwaitinfo( &mask, &info );

		if( sig < 0 ) {
			//Error!
			int err = errno;
			switch( err ) {
				case EAGAIN :
				case EINTR :
					{__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );}
					continue;
       			case EINVAL :
				 	abort( "Timeout was invalid." );
				default:
				 	abort( "Unhandled error %d", err);
			}
		}

		// If another signal arrived something went wrong
		assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int);

		// __cfaabi_dbg_print_safe( "Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
		// Switch on the code (a.k.a. the sender) to
		switch( info.si_code )
		{
		// Timers can apparently be marked as sent for the kernel
		// In either case, tick preemption
		case SI_TIMER:
		case SI_KERNEL:
			// __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" );
			lock( event_kernel->lock __cfaabi_dbg_ctx2 );
			tick_preemption();
			unlock( event_kernel->lock );
			break;
		// Signal was not sent by the kernel but by an other thread
		case SI_QUEUE:
			// For now, other thread only signal the alarm thread to shut it down
			// If this needs to change use info.si_value and handle the case here
			goto EXIT;
		}
	}

EXIT:
	__cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" );
	return NULL;
}

//=============================================================================================
// Kernel Signal Debug
//=============================================================================================

void __cfaabi_check_preemption() {
	bool ready = kernelTLS.preemption_state.enabled;
	if(!ready) { abort("Preemption should be ready"); }

	sigset_t oldset;
	int ret;
	ret = pthread_sigmask(0, NULL, &oldset);
	if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }

	ret = sigismember(&oldset, SIGUSR1);
	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
	if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }

	ret = sigismember(&oldset, SIGALRM);
	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
	if(ret == 0) { abort("ERROR SIGALRM is enabled"); }

	ret = sigismember(&oldset, SIGTERM);
	if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
	if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
}

#ifdef __CFA_WITH_VERIFY__
bool __cfaabi_dbg_in_kernel() {
	return !kernelTLS.preemption_state.enabled;
}
#endif

// Local Variables: //
// mode: c //
// tab-width: 4 //
// End: //
