// // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // kernel -- // // Author : Thierry Delisle // Created On : Tue Jan 17 12:27:26 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Tue Feb 4 12:29:26 2020 // Update Count : 22 // #pragma once #include #include #include "invoke.h" #include "time_t.hfa" #include "coroutine.hfa" extern "C" { #include #include } //----------------------------------------------------------------------------- // Locks struct semaphore { __spinlock_t lock; int count; __queue_t($thread) waiting; }; void ?{}(semaphore & this, int count = 1); void ^?{}(semaphore & this); void P (semaphore & this); bool V (semaphore & this); bool V (semaphore & this, unsigned count); //----------------------------------------------------------------------------- // Processor extern struct cluster * mainCluster; // Processor coroutine processorCtx_t { struct processor * proc; }; // Wrapper around kernel threads struct processor { // Main state // Coroutine ctx who does keeps the state of the processor struct processorCtx_t runner; // Cluster from which to get threads struct cluster * cltr; // Name of the processor const char * name; // Handle to pthreads pthread_t kernel_thread; // RunThread data // Action to do after a thread is ran $thread * destroyer; // Preemption data // Node which is added in the discrete event simulaiton struct alarm_node_t * preemption_alarm; // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible bool pending_preemption; // Idle lock (kernel semaphore) __bin_sem_t idle; // Termination // Set to true to notify the processor should terminate volatile bool do_terminate; // Termination synchronisation (user semaphore) semaphore terminated; // pthread Stack void * stack; // Link lists fields struct __dbg_node_proc { struct processor * next; struct processor * prev; } node; #ifdef __CFA_DEBUG__ // Last function to enable preemption on this processor const char * last_enable; #endif }; void ?{}(processor & this, const char name[], struct cluster & cltr); void ^?{}(processor & this); static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; } //----------------------------------------------------------------------------- // I/O #if defined(HAVE_LINUX_IO_URING_H) struct io_uring_sq { // Head and tail of the ring (associated with array) volatile uint32_t * head; volatile uint32_t * tail; // The actual kernel ring which uses head/tail // indexes into the sqes arrays uint32_t * array; // number of entries and mask to go with it const uint32_t * num; const uint32_t * mask; // Submission flags (Not sure what for) uint32_t * flags; // number of sqes not submitted (whatever that means) uint32_t * dropped; // Like head/tail but not seen by the kernel volatile uint32_t alloc; __spinlock_t lock; // A buffer of sqes (not the actual ring) struct io_uring_sqe * sqes; // The location and size of the mmaped area void * ring_ptr; size_t ring_sz; // Statistics #if !defined(__CFA_NO_STATISTICS__) struct { struct { unsigned long long int val; unsigned long long int cnt; } submit_avg; } stats; #endif }; struct io_uring_cq { // Head and tail of the ring volatile uint32_t * head; volatile uint32_t * tail; // number of entries and mask to go with it const uint32_t * mask; const uint32_t * num; // number of cqes not submitted (whatever that means) uint32_t * overflow; // the kernel ring struct io_uring_cqe * cqes; // The location and size of the mmaped area void * ring_ptr; size_t ring_sz; // Statistics #if !defined(__CFA_NO_STATISTICS__) struct { struct { unsigned long long int val; unsigned long long int cnt; } completed_avg; } stats; #endif }; struct io_ring { struct io_uring_sq submit_q; struct io_uring_cq completion_q; uint32_t flags; int fd; pthread_t poller; void * stack; volatile bool done; semaphore submit; }; #endif //----------------------------------------------------------------------------- // Cluster struct cluster { // Ready queue locks __spinlock_t ready_queue_lock; // Ready queue for threads __queue_t($thread) ready_queue; // Name of the cluster const char * name; // Preemption rate on this cluster Duration preemption_rate; // List of processors __spinlock_t idle_lock; __dllist_t(struct processor) procs; __dllist_t(struct processor) idles; unsigned int nprocessors; // List of threads __spinlock_t thread_list_lock; __dllist_t(struct $thread) threads; unsigned int nthreads; // Link lists fields struct __dbg_node_cltr { cluster * next; cluster * prev; } node; #if defined(HAVE_LINUX_IO_URING_H) struct io_ring io; #endif #if !defined(__CFA_NO_STATISTICS__) bool print_stats; #endif }; extern Duration default_preemption(); void ?{} (cluster & this, const char name[], Duration preemption_rate); void ^?{}(cluster & this); static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; } static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; } static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption()}; } static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; } static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; } #if !defined(__CFA_NO_STATISTICS__) static inline void print_stats_at_exit( cluster & this ) { this.print_stats = true; } #endif // Local Variables: // // mode: c // // tab-width: 4 // // End: //