//
// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
//
// The contents of this file are covered under the licence agreement in the
// file "LICENCE" distributed with Cforall.
//
// invoke.h --
//
// Author           : Thierry Delisle
// Created On       : Tue Jan 17 12:27:26 2016
// Last Modified By : Peter A. Buhr
// Last Modified On : Sat Jun 22 18:19:13 2019
// Update Count     : 40
//

#include "bits/containers.hfa"
#include "bits/defs.hfa"
#include "bits/locks.hfa"

#ifdef __cforall
extern "C" {
#endif

#if ! defined(__CFA_INVOKE_PRIVATE__)
#ifndef _INVOKE_H_
#define _INVOKE_H_

#ifdef __ARM_ARCH
	// function prototypes are only really used by these macros on ARM
	void disable_global_interrupts();
	void enable_global_interrupts();

	#define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \
                disable_global_interrupts(); \
                target = kernelTLS.member; \
                enable_global_interrupts(); \
                target; } )
	#define TL_SET( member, value ) disable_global_interrupts(); \
		kernelTLS.member = value; \
		enable_global_interrupts();
#else
	#define TL_GET( member ) kernelTLS.member
	#define TL_SET( member, value ) kernelTLS.member = value;
#endif

	#ifdef __cforall
	extern "Cforall" {
		extern thread_local struct KernelThreadData {
			struct thread_desc    * volatile this_thread;
			struct processor      * volatile this_processor;

			struct {
				volatile unsigned short disable_count;
				volatile bool enabled;
				volatile bool in_progress;
			} preemption_state;
		} kernelTLS __attribute__ ((tls_model ( "initial-exec" )));
	}
	#endif

	struct __stack_context_t {
		void * SP;
		void * FP;
	};

	// low adresses  :           +----------------------+ <- start of allocation
	//                           |  optional guard page |
	//                           +----------------------+ <- __stack_t.limit
	//                           |                      |
	//                           |       /\ /\ /\       |
	//                           |       || || ||       |
	//                           |                      |
	//                           |    program  stack    |
	//                           |                      |
	// __stack_info_t.storage -> +----------------------+ <- __stack_t.base
	//                           |      __stack_t       |
	// high adresses :           +----------------------+ <- end of allocation

	struct __stack_t {
		// stack grows towards stack limit
		void * limit;

		// base of stack
		void * base;
	};

	struct __stack_info_t {
		// pointer to stack
		struct __stack_t * storage;
	};

	enum coroutine_state { Halted, Start, Inactive, Active, Primed };

	struct coroutine_desc {
		// context that is switch during a CtxSwitch
		struct __stack_context_t context;

		// stack information of the coroutine
		struct __stack_info_t stack;

		// textual name for coroutine/task
		const char * name;

		// current execution status for coroutine
		enum coroutine_state state;

		// first coroutine to resume this one
		struct coroutine_desc * starter;

		// last coroutine to resume this one
		struct coroutine_desc * last;

		// If non-null stack must be unwound with this exception
		struct _Unwind_Exception * cancellation;

	};

	// struct which calls the monitor is accepting
	struct __waitfor_mask_t {
		// the index of the accepted function, -1 if none
		short * accepted;

		// list of acceptable functions, null if any
		__cfa_anonymous_object( __small_array_t(struct __acceptable_t) );
	};

	struct monitor_desc {
		// spinlock to protect internal data
		struct __spinlock_t lock;

		// current owner of the monitor
		struct thread_desc * owner;

		// queue of threads that are blocked waiting for the monitor
		__queue_t(struct thread_desc) entry_queue;

		// stack of conditions to run next once we exit the monitor
		__stack_t(struct __condition_criterion_t) signal_stack;

		// monitor routines can be called recursively, we need to keep track of that
		unsigned int recursion;

		// mask used to know if some thread is waiting for something while holding the monitor
		struct __waitfor_mask_t mask;

		// node used to signal the dtor in a waitfor dtor
		struct __condition_node_t * dtor_node;
	};

	struct __monitor_group_t {
		// currently held monitors
		__cfa_anonymous_object( __small_array_t(monitor_desc*) );

		// last function that acquired monitors
		fptr_t func;
	};

	struct thread_desc {
		// Core threading fields
		// context that is switch during a CtxSwitch
		struct __stack_context_t context;

		// current execution status for coroutine
		enum coroutine_state state;

		//SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it

		// coroutine body used to store context
		struct coroutine_desc  self_cor;

		// current active context
		struct coroutine_desc * curr_cor;

		// monitor body used for mutual exclusion
		struct monitor_desc    self_mon;

		// pointer to monitor with sufficient lifetime for current monitors
		struct monitor_desc *  self_mon_p;

		// pointer to the cluster on which the thread is running
		struct cluster * curr_cluster;

		// monitors currently held by this thread
		struct __monitor_group_t monitors;

		// Link lists fields
		// instrusive link field for threads
		struct thread_desc * next;

		struct {
			struct thread_desc * next;
			struct thread_desc * prev;
		} node;
	};

	#ifdef __cforall
	extern "Cforall" {
		static inline thread_desc *& get_next( thread_desc & this ) {
			return this.next;
		}

		static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) {
			return this.node.[next, prev];
		}

		static inline void ?{}(__monitor_group_t & this) {
			(this.data){NULL};
			(this.size){0};
			(this.func){NULL};
		}

		static inline void ?{}(__monitor_group_t & this, struct monitor_desc ** data, __lock_size_t size, fptr_t func) {
			(this.data){data};
			(this.size){size};
			(this.func){func};
		}

		static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
			if( (lhs.data != 0) != (rhs.data != 0) ) return false;
			if( lhs.size != rhs.size ) return false;
			if( lhs.func != rhs.func ) return false;

			// Check that all the monitors match
			for( int i = 0; i < lhs.size; i++ ) {
				// If not a match, check next function
				if( lhs[i] != rhs[i] ) return false;
			}

			return true;
		}

		static inline void ?=?(__monitor_group_t & lhs, const __monitor_group_t & rhs) {
			lhs.data = rhs.data;
			lhs.size = rhs.size;
			lhs.func = rhs.func;
		}
	}
	#endif

#endif //_INVOKE_H_
#else //! defined(__CFA_INVOKE_PRIVATE__)
#ifndef _INVOKE_PRIVATE_H_
#define _INVOKE_PRIVATE_H_

	struct machine_context_t {
		void *SP;
		void *FP;
		void *PC;
	};

	// assembler routines that performs the context switch
	extern void CtxInvokeStub( void );
	extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");
	// void CtxStore ( void * this ) asm ("CtxStore");
	// void CtxRet   ( void * dst  ) asm ("CtxRet");

#endif //_INVOKE_PRIVATE_H_
#endif //! defined(__CFA_INVOKE_PRIVATE__)
#ifdef __cforall
}
#endif

// Local Variables: //
// mode: c //
// tab-width: 4 //
// End: //
