//                              -*- Mode: CFA -*-
//
// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
//
// The contents of this file are covered under the licence agreement in the
// file "LICENCE" distributed with Cforall.
//
// kernel_private.h --
//
// Author           : Thierry Delisle
// Created On       : Mon Feb 13 12:27:26 2017
// Last Modified By : Thierry Delisle
// Last Modified On : --
// Update Count     : 0
//

#ifndef KERNEL_PRIVATE_H
#define KERNEL_PRIVATE_H

#include "kernel"
#include "thread"

#include "alarm.h"

#include "libhdr.h"

//-----------------------------------------------------------------------------
// Scheduler
void ScheduleThread( thread_desc * );
thread_desc * nextThread(cluster * this);

void ScheduleInternal(void);
void ScheduleInternal(spinlock * lock);
void ScheduleInternal(thread_desc * thrd);
void ScheduleInternal(spinlock * lock, thread_desc * thrd);
void ScheduleInternal(spinlock ** locks, unsigned short count);
void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);

//-----------------------------------------------------------------------------
// Processor
coroutine processorCtx_t {
	processor * proc;
};

void main(processorCtx_t *);
void start(processor * this);
void runThread(processor * this, thread_desc * dst);
void finishRunning(processor * this);
void spin(processor * this, unsigned int * spin_count);

struct system_proc_t {
	processor proc;

	alarm_list_t alarms;
	spinlock alarm_lock;

	bool pending_alarm;
};

extern cluster * systemCluster;
extern system_proc_t * systemProcessor;
extern thread_local processor * this_processor;

static inline void disable_interrupts() {
	__attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST );
	assert( prev != (unsigned short) -1 );
}

static inline void enable_interrupts_noRF() {
	__attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
	verify( prev != (unsigned short) 0 );
}

static inline void enable_interrupts() {
	__attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
	verify( prev != (unsigned short) 0 );
	if( prev == 1 && this_processor->pending_preemption ) {
		ScheduleInternal( this_processor->current_thread );
		this_processor->pending_preemption = false;
	}
}

//-----------------------------------------------------------------------------
// Threads
extern "C" {
      forall(dtype T | is_thread(T))
      void CtxInvokeThread(T * this);
}

extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);

#endif //KERNEL_PRIVATE_H

// Local Variables: //
// mode: c //
// tab-width: 4 //
// End: //
