// // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // kernel -- // // Author : Thierry Delisle // Created On : Tue Jan 17 12:27:26 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Tue Feb 4 12:29:26 2020 // Update Count : 22 // #pragma once #include #include #include "invoke.h" #include "time_t.hfa" #include "coroutine.hfa" #include "containers/stackLockFree.hfa" extern "C" { #include #include } //----------------------------------------------------------------------------- // Locks struct semaphore { __spinlock_t lock; int count; __queue_t($thread) waiting; }; void ?{}(semaphore & this, int count = 1); void ^?{}(semaphore & this); bool P (semaphore & this); bool V (semaphore & this); bool V (semaphore & this, unsigned count); //----------------------------------------------------------------------------- // Processor extern struct cluster * mainCluster; // Processor id, required for scheduling threads struct __processor_id_t { unsigned id; #if !defined(__CFA_NO_STATISTICS__) struct __stats_t * stats; #endif }; coroutine processorCtx_t { struct processor * proc; }; // Wrapper around kernel threads struct __attribute__((aligned(128))) processor { // Main state inline __processor_id_t; // Cluster from which to get threads struct cluster * cltr; // Set to true to notify the processor should terminate volatile bool do_terminate; // Coroutine ctx who does keeps the state of the processor struct processorCtx_t runner; // Name of the processor const char * name; // Handle to pthreads pthread_t kernel_thread; // RunThread data // Action to do after a thread is ran $thread * destroyer; // Preemption data // Node which is added in the discrete event simulaiton struct alarm_node_t * preemption_alarm; // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible bool pending_preemption; // Idle lock (kernel semaphore) __bin_sem_t idle; // Termination synchronisation (user semaphore) semaphore terminated; // pthread Stack void * stack; // Link lists fields Link(processor) link; #if !defined(__CFA_NO_STATISTICS__) int print_stats; bool print_halts; #endif #ifdef __CFA_DEBUG__ // Last function to enable preemption on this processor const char * last_enable; #endif }; void ?{}(processor & this, const char name[], struct cluster & cltr); void ^?{}(processor & this); static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } static inline Link(processor) * ?`next( processor * this ) { return &this->link; } //----------------------------------------------------------------------------- // I/O struct __io_data; #define CFA_CLUSTER_IO_POLLER_USER_THREAD (1 << 0) // 0x01 #define CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS (1 << 1) // 0x02 #define CFA_CLUSTER_IO_EAGER_SUBMITS (1 << 2) // 0x04 #define CFA_CLUSTER_IO_KERNEL_POLL_SUBMITS (1 << 3) // 0x08 #define CFA_CLUSTER_IO_KERNEL_POLL_COMPLETES (1 << 4) // 0x10 #define CFA_CLUSTER_IO_BUFFLEN_OFFSET 16 //----------------------------------------------------------------------------- // Cluster Tools // Intrusives lanes which are used by the relaxed ready queue struct __attribute__((aligned(128))) __intrusive_lane_t; void ?{}(__intrusive_lane_t & this); void ^?{}(__intrusive_lane_t & this); // Counter used for wether or not the lanes are all empty struct __attribute__((aligned(128))) __snzi_node_t; struct __snzi_t { unsigned mask; int root; __snzi_node_t * nodes; }; void ?{}( __snzi_t & this, unsigned depth ); void ^?{}( __snzi_t & this ); //TODO adjust cache size to ARCHITECTURE // Structure holding the relaxed ready queue struct __ready_queue_t { // Data tracking how many/which lanes are used // Aligned to 128 for cache locality __snzi_t snzi; // Data tracking the actual lanes // On a seperate cacheline from the used struct since // used can change on each push/pop but this data // only changes on shrink/grow struct { // Arary of lanes __intrusive_lane_t * volatile data; // Number of lanes (empty or not) volatile size_t count; } lanes; }; void ?{}(__ready_queue_t & this); void ^?{}(__ready_queue_t & this); //----------------------------------------------------------------------------- // Cluster struct __attribute__((aligned(128))) cluster { // Ready queue for threads __ready_queue_t ready_queue; // Name of the cluster const char * name; // Preemption rate on this cluster Duration preemption_rate; // List of idle processors StackLF(processor) idles; volatile unsigned int nprocessors; // List of threads __spinlock_t thread_list_lock; __dllist_t(struct $thread) threads; unsigned int nthreads; // Link lists fields struct __dbg_node_cltr { cluster * next; cluster * prev; } node; struct __io_data * io; #if !defined(__CFA_NO_STATISTICS__) struct __stats_t * stats; int print_stats; #endif }; extern Duration default_preemption(); void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned flags); void ^?{}(cluster & this); static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption(), 0}; } static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate, 0}; } static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption(), 0}; } static inline void ?{} (cluster & this, unsigned flags) { this{"Anonymous Cluster", default_preemption(), flags}; } static inline void ?{} (cluster & this, Duration preemption_rate, unsigned flags) { this{"Anonymous Cluster", preemption_rate, flags}; } static inline void ?{} (cluster & this, const char name[], unsigned flags) { this{name, default_preemption(), flags}; } static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; } static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; } #if !defined(__CFA_NO_STATISTICS__) static inline void print_stats_at_exit( cluster & this, int flags ) { this.print_stats |= flags; } static inline void print_stats_at_exit( processor & this, int flags ) { this.print_stats |= flags; } void print_halts( processor & this ); #endif // Local Variables: // // mode: c // // tab-width: 4 // // End: //