//                              -*- Mode: CFA -*-
//
// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
//
// The contents of this file are covered under the licence agreement in the
// file "LICENCE" distributed with Cforall.
//
// kernel.c --
//
// Author           : Thierry Delisle
// Created On       : Tue Jan 17 12:27:26 2017
// Last Modified By : Thierry Delisle
// Last Modified On : --
// Update Count     : 0
//

#include "startup.h"

//Start and stop routine for the kernel, declared first to make sure they run first
void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));

//Header
#include "kernel_private.h"

//C Includes
#include <stddef.h>
extern "C" {
#include <stdio.h>
#include <fenv.h>
#include <sys/resource.h>
#include <signal.h>
#include <unistd.h>
}

//CFA Includes
#include "libhdr.h"

//Private includes
#define __CFA_INVOKE_PRIVATE__
#include "invoke.h"

//-----------------------------------------------------------------------------
// Kernel storage
#define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)]

KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
KERNEL_STORAGE(cluster, systemCluster);
KERNEL_STORAGE(processor, systemProcessor);
KERNEL_STORAGE(thread_desc, mainThread);
KERNEL_STORAGE(machine_context_t, mainThread_context);

cluster * systemCluster;
processor * systemProcessor;
thread_desc * mainThread;

//-----------------------------------------------------------------------------
// Global state

thread_local processor * this_processor;

coroutine_desc * this_coroutine(void) {
	return this_processor->current_coroutine;
}

thread_desc * this_thread(void) {
	return this_processor->current_thread;
}

//-----------------------------------------------------------------------------
// Main thread construction
struct current_stack_info_t {
	machine_context_t ctx;	
	unsigned int size;		// size of stack
	void *base;				// base of stack
	void *storage;			// pointer to stack
	void *limit;			// stack grows towards stack limit
	void *context;			// address of cfa_context_t
	void *top;				// address of top of storage
};

void ?{}( current_stack_info_t * this ) {
	CtxGet( &this->ctx );
	this->base = this->ctx.FP;
	this->storage = this->ctx.SP;

	rlimit r;
	getrlimit( RLIMIT_STACK, &r);
	this->size = r.rlim_cur;

	this->limit = (void *)(((intptr_t)this->base) - this->size);
	this->context = &mainThread_context_storage;
	this->top = this->base;
}

void ?{}( coStack_t * this, current_stack_info_t * info) {
	this->size = info->size;
	this->storage = info->storage;
	this->limit = info->limit;
	this->base = info->base;
	this->context = info->context;
	this->top = info->top;
	this->userStack = true;
}

void ?{}( coroutine_desc * this, current_stack_info_t * info) {
	(&this->stack){ info };	
	this->name = "Main Thread";
	this->errno_ = 0;
	this->state = Start;
}

void ?{}( thread_desc * this, current_stack_info_t * info) {
	(&this->cor){ info };
}

//-----------------------------------------------------------------------------
// Processor coroutine
void ?{}(processorCtx_t * this, processor * proc) {
	(&this->__cor){};
	this->proc = proc;
	proc->runner = this;
}

void ?{}(processorCtx_t * this, processor * proc, current_stack_info_t * info) {
	(&this->__cor){ info };
	this->proc = proc;
	proc->runner = this;
}

void ?{}(processor * this) {
	this{ systemCluster };
}

void ?{}(processor * this, cluster * cltr) {
	this->cltr = cltr;
	this->current_coroutine = NULL;
	this->current_thread = NULL;
	(&this->terminated){};
	this->is_terminated = false;

	start( this );
}

void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) {
	this->cltr = cltr;
	this->current_coroutine = NULL;
	this->current_thread = NULL;
	(&this->terminated){};
	this->is_terminated = false;

	this->runner = runner;
	LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);
	runner{ this };
}

void ^?{}(processor * this) {
	if( ! this->is_terminated ) {
		LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this);
		this->is_terminated = true;
		wait( &this->terminated );
	}
}

void ?{}(cluster * this) {
	( &this->ready_queue ){};
	( &this->lock ){};
}

void ^?{}(cluster * this) {
	
}

//=============================================================================================
// Kernel Scheduling logic
//=============================================================================================
//Main of the processor contexts
void main(processorCtx_t * runner) {
	processor * this = runner->proc;
	LIB_DEBUG_PRINT_SAFE("Kernel : core %p starting\n", this);

	thread_desc * readyThread = NULL;
	for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 
	{
		readyThread = nextThread( this->cltr );

		if(readyThread) 
		{
			runThread(this, readyThread);

			//Some actions need to be taken from the kernel
			finishRunning(this);

			spin_count = 0;
		} 
		else 
		{
			spin(this, &spin_count);
		}		
	}

	LIB_DEBUG_PRINT_SAFE("Kernel : core %p unlocking thread\n", this);
	signal( &this->terminated );
	LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this);
}

// runThread runs a thread by context switching 
// from the processor coroutine to the target thread 
void runThread(processor * this, thread_desc * dst) {
	coroutine_desc * proc_cor = get_coroutine(this->runner);
	coroutine_desc * thrd_cor = get_coroutine(dst);
	
	//Reset the terminating actions here
	this->finish.action_code = No_Action;

	//Update global state
	this->current_thread = dst;

	// Context Switch to the thread
	ThreadCtxSwitch(proc_cor, thrd_cor);
	// when ThreadCtxSwitch returns we are back in the processor coroutine
}

// Once a thread has finished running, some of 
// its final actions must be executed from the kernel
void finishRunning(processor * this) {
	if( this->finish.action_code == Release ) {
		unlock( this->finish.lock );
	}
	else if( this->finish.action_code == Schedule ) {
		ScheduleThread( this->finish.thrd );
	}
	else if( this->finish.action_code == Release_Schedule ) {
		unlock( this->finish.lock );		
		ScheduleThread( this->finish.thrd );
	}
	else if( this->finish.action_code == Release_Multi ) {
		for(int i = 0; i < this->finish.lock_count; i++) {
			unlock( this->finish.locks[i] );
		}
	}
	else if( this->finish.action_code == Release_Multi_Schedule ) {
		for(int i = 0; i < this->finish.lock_count; i++) {
			unlock( this->finish.locks[i] );
		}
		for(int i = 0; i < this->finish.thrd_count; i++) {
			ScheduleThread( this->finish.thrds[i] );
		}
	}
	else {
		assert(this->finish.action_code == No_Action);
	}
}

// Handles spinning logic
// TODO : find some strategy to put cores to sleep after some time
void spin(processor * this, unsigned int * spin_count) {
	(*spin_count)++;
}

// Context invoker for processors
// This is the entry point for processors (kernel threads)
// It effectively constructs a coroutine by stealing the pthread stack
void * CtxInvokeProcessor(void * arg) {
	processor * proc = (processor *) arg;
	this_processor = proc;
	// SKULLDUGGERY: We want to create a context for the processor coroutine
	// which is needed for the 2-step context switch. However, there is no reason
	// to waste the perfectly valid stack create by pthread. 
	current_stack_info_t info;
	machine_context_t ctx;
	info.context = &ctx;
	processorCtx_t proc_cor_storage = { proc, &info };

	LIB_DEBUG_PRINT_SAFE("Coroutine : created stack %p\n", proc_cor_storage.__cor.stack.base);

	//Set global state
	proc->current_coroutine = &proc->runner->__cor;
	proc->current_thread = NULL;

	//We now have a proper context from which to schedule threads
	LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);

	// SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 
	// resume it to start it like it normally would, it will just context switch 
	// back to here. Instead directly call the main since we already are on the 
	// appropriate stack.
	proc_cor_storage.__cor.state = Active;
      main( &proc_cor_storage );
      proc_cor_storage.__cor.state = Halted;

	// Main routine of the core returned, the core is now fully terminated
	LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner);	

	return NULL;
}

void start(processor * this) {
	LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this);
	
	// pthread_attr_t attributes;
	// pthread_attr_init( &attributes );

	pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );

	// pthread_attr_destroy( &attributes );

	LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);	
}

//-----------------------------------------------------------------------------
// Scheduler routines
void ScheduleThread( thread_desc * thrd ) {
	if( !thrd ) return;

	assertf( thrd->next == NULL, "Expected null got %p", thrd->next );
	
	lock( &systemProcessor->cltr->lock );
	append( &systemProcessor->cltr->ready_queue, thrd );
	unlock( &systemProcessor->cltr->lock );
}

thread_desc * nextThread(cluster * this) {
	lock( &this->lock );
	thread_desc * head = pop_head( &this->ready_queue );
	unlock( &this->lock );
	return head;
}

void ScheduleInternal() {
	suspend();
}

void ScheduleInternal( spinlock * lock ) {
	this_processor->finish.action_code = Release;
	this_processor->finish.lock = lock;
	suspend();
}

void ScheduleInternal( thread_desc * thrd ) {
	this_processor->finish.action_code = Schedule;
	this_processor->finish.thrd = thrd;
	suspend();
}

void ScheduleInternal( spinlock * lock, thread_desc * thrd ) {
	this_processor->finish.action_code = Release_Schedule;
	this_processor->finish.lock = lock;
	this_processor->finish.thrd = thrd;
	suspend();
}

void ScheduleInternal(spinlock ** locks, unsigned short count) {
	this_processor->finish.action_code = Release_Multi;
	this_processor->finish.locks = locks;
	this_processor->finish.lock_count = count;
	suspend();
}

void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
	this_processor->finish.action_code = Release_Multi_Schedule;
	this_processor->finish.locks = locks;
	this_processor->finish.lock_count = lock_count;
	this_processor->finish.thrds = thrds;
	this_processor->finish.thrd_count = thrd_count;
	suspend();
}

//-----------------------------------------------------------------------------
// Kernel boot procedures
void kernel_startup(void) {
	LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");	

	// Start by initializing the main thread
	// SKULLDUGGERY: the mainThread steals the process main thread 
	// which will then be scheduled by the systemProcessor normally
	mainThread = (thread_desc *)&mainThread_storage;
	current_stack_info_t info;
	mainThread{ &info };

	// Initialize the system cluster
	systemCluster = (cluster *)&systemCluster_storage;
	systemCluster{};

	// Initialize the system processor and the system processor ctx
	// (the coroutine that contains the processing control flow)
	systemProcessor = (processor *)&systemProcessor_storage;
	systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };

	// Add the main thread to the ready queue 
	// once resume is called on systemProcessor->ctx the mainThread needs to be scheduled like any normal thread
	ScheduleThread(mainThread);

	//initialize the global state variables
	this_processor = systemProcessor;
	this_processor->current_thread = mainThread;
	this_processor->current_coroutine = &mainThread->cor;

	// SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
	// context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
	// mainThread is on the ready queue when this call is made. 
	resume(systemProcessor->runner);



	// THE SYSTEM IS NOW COMPLETELY RUNNING
	LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n");
}

void kernel_shutdown(void) {
	LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n");

	// SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
	// When its coroutine terminates, it return control to the mainThread
	// which is currently here
	systemProcessor->is_terminated = true;
	suspend();

	// THE SYSTEM IS NOW COMPLETELY STOPPED

	// Destroy the system processor and its context in reverse order of construction
	// These were manually constructed so we need manually destroy them
	^(systemProcessor->runner){};
	^(systemProcessor){};

	// Final step, destroy the main thread since it is no longer needed
	// Since we provided a stack to this taxk it will not destroy anything
	^(mainThread){};

	LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");	
}

static spinlock kernel_abort_lock;
static spinlock kernel_debug_lock;
static bool kernel_abort_called = false;

void * kernel_abort    (void) __attribute__ ((__nothrow__)) {
	// abort cannot be recursively entered by the same or different processors because all signal handlers return when
	// the globalAbort flag is true.
	lock( &kernel_abort_lock );

	// first task to abort ?
	if ( !kernel_abort_called ) {			// not first task to abort ?
		kernel_abort_called = true;
		unlock( &kernel_abort_lock );
	} 
	else {
		unlock( &kernel_abort_lock );
		
		sigset_t mask;
		sigemptyset( &mask );
		sigaddset( &mask, SIGALRM );			// block SIGALRM signals
		sigaddset( &mask, SIGUSR1 );			// block SIGUSR1 signals
		sigsuspend( &mask );				// block the processor to prevent further damage during abort
		_exit( EXIT_FAILURE );				// if processor unblocks before it is killed, terminate it		
	}

	return this_thread();
}

void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) {
	thread_desc * thrd = kernel_data;

	int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->cor.name, thrd );
	__lib_debug_write( STDERR_FILENO, abort_text, len );

	if ( thrd != this_coroutine() ) {
		len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine()->name, this_coroutine() );
		__lib_debug_write( STDERR_FILENO, abort_text, len );
	} 
	else {
		__lib_debug_write( STDERR_FILENO, ".\n", 2 );
	}
}

extern "C" {
	void __lib_debug_acquire() {
		lock(&kernel_debug_lock);
	}

	void __lib_debug_release() {
		unlock(&kernel_debug_lock);
	}
}

//-----------------------------------------------------------------------------
// Locks
void ?{}( spinlock * this ) {
	this->lock = 0;
}
void ^?{}( spinlock * this ) {

}

void lock( spinlock * this ) {
	for ( unsigned int i = 1;; i += 1 ) {
	  	if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break;
	}
}

void unlock( spinlock * this ) {
	__sync_lock_release_4( &this->lock );
}

void ?{}( signal_once * this ) {
	this->cond = false;
}
void ^?{}( signal_once * this ) {

}

void wait( signal_once * this ) {
	lock( &this->lock );
	if( !this->cond ) {
		append( &this->blocked, this_thread() );
		ScheduleInternal( &this->lock );
		lock( &this->lock );
	}
	unlock( &this->lock );
}

void signal( signal_once * this ) {
	lock( &this->lock );
	{
		this->cond = true;

		thread_desc * it;
		while( it = pop_head( &this->blocked) ) {
			ScheduleThread( it );
		}
	}
	unlock( &this->lock );
}

//-----------------------------------------------------------------------------
// Queues
void ?{}( __thread_queue_t * this ) {
	this->head = NULL;
	this->tail = &this->head;
}

void append( __thread_queue_t * this, thread_desc * t ) {
	assert(this->tail != NULL);
	*this->tail = t;
	this->tail = &t->next;
}

thread_desc * pop_head( __thread_queue_t * this ) {
	thread_desc * head = this->head;
	if( head ) {
		this->head = head->next;
		if( !head->next ) {
			this->tail = &this->head;
		}
		head->next = NULL;
	}	
	return head;
}

void ?{}( __condition_stack_t * this ) {
	this->top = NULL;
}

void push( __condition_stack_t * this, __condition_criterion_t * t ) {
	assert( !t->next );
	t->next = this->top;
	this->top = t;
}

__condition_criterion_t * pop( __condition_stack_t * this ) {
	__condition_criterion_t * top = this->top;
	if( top ) {
		this->top = top->next;
		top->next = NULL;
	}	
	return top;
}
// Local Variables: //
// mode: c //
// tab-width: 4 //
// End: //
