source: libcfa/src/concurrency/kernel_private.hfa @ 6a490b2

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-astnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 6a490b2 was 6a490b2, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Merge branch 'master' into relaxed_ready

  • Property mode set to 100644
File size: 7.1 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel_private.hfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Feb 13 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Sat Nov 30 19:25:02 2019
13// Update Count     : 8
14//
15
16#pragma once
17
18#include "kernel.hfa"
19#include "thread.hfa"
20
21#include "alarm.hfa"
22
23
24//-----------------------------------------------------------------------------
25// Scheduler
26
27extern "C" {
28        void disable_interrupts() OPTIONAL_THREAD;
29        void enable_interrupts_noPoll();
30        void enable_interrupts( __cfaabi_dbg_ctx_param );
31}
32
33void __schedule_thread( $thread * ) __attribute__((nonnull (1)));
34
35//Block current thread and release/wake-up the following resources
36void __leave_thread() __attribute__((noreturn));
37
38//-----------------------------------------------------------------------------
39// Processor
40void main(processorCtx_t *);
41
42void * __create_pthread( pthread_t *, void * (*)(void *), void * );
43
44
45
46struct event_kernel_t {
47        alarm_list_t alarms;
48        __spinlock_t lock;
49};
50
51extern event_kernel_t * event_kernel;
52
53struct __cfa_kernel_preemption_state_t {
54        bool enabled;
55        bool in_progress;
56        unsigned short disable_count;
57};
58
59extern volatile thread_local __cfa_kernel_preemption_state_t preemption_state __attribute__ ((tls_model ( "initial-exec" )));
60
61extern cluster * mainCluster;
62
63//-----------------------------------------------------------------------------
64// Threads
65extern "C" {
66      void __cfactx_invoke_thread(void (*main)(void *), void * this);
67}
68
69__cfaabi_dbg_debug_do(
70        extern void __cfaabi_dbg_thread_register  ( $thread * thrd );
71        extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
72)
73
74// KERNEL ONLY unpark with out disabling interrupts
75void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 );
76
77//-----------------------------------------------------------------------------
78// I/O
79void __kernel_io_startup     ( cluster &, int, bool );
80void __kernel_io_finish_start( cluster & );
81void __kernel_io_prepare_stop( cluster & );
82void __kernel_io_shutdown    ( cluster &, bool );
83
84//-----------------------------------------------------------------------------
85// Utils
86#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
87
88static inline uint32_t __tls_rand() {
89        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
90        kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
91        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7;
92        return kernelTLS.rand_seed;
93}
94
95
96void doregister( struct cluster & cltr );
97void unregister( struct cluster & cltr );
98
99void doregister( struct cluster * cltr, struct $thread & thrd );
100void unregister( struct cluster * cltr, struct $thread & thrd );
101
102//=======================================================================
103// Cluster lock API
104//=======================================================================
105struct __attribute__((aligned(64))) __processor_id {
106        processor * volatile handle;
107        volatile bool lock;
108};
109
110// Lock-Free registering/unregistering of threads
111// Register a processor to a given cluster and get its unique id in return
112unsigned doregister( struct cluster * cltr, struct processor * proc );
113
114// Unregister a processor from a given cluster using its id, getting back the original pointer
115void     unregister( struct cluster * cltr, struct processor * proc );
116
117//=======================================================================
118// Reader-writer lock implementation
119// Concurrent with doregister/unregister,
120//    i.e., threads can be added at any point during or between the entry/exit
121
122//-----------------------------------------------------------------------
123// simple spinlock underlying the RWLock
124// Blocking acquire
125static inline void __atomic_acquire(volatile bool * ll) {
126        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
127                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
128                        asm volatile("pause");
129        }
130        /* paranoid */ verify(*ll);
131}
132
133// Non-Blocking acquire
134static inline bool __atomic_try_acquire(volatile bool * ll) {
135        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
136}
137
138// Release
139static inline void __atomic_unlock(volatile bool * ll) {
140        /* paranoid */ verify(*ll);
141        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
142}
143
144//-----------------------------------------------------------------------
145// Reader side : acquire when using the ready queue to schedule but not
146//  creating/destroying queues
147static inline void ready_schedule_lock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
148        unsigned iproc = proc->id;
149        /*paranoid*/ verify(data[iproc].handle == proc);
150        /*paranoid*/ verify(iproc < ready);
151
152        // Step 1 : make sure no writer are in the middle of the critical section
153        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
154                asm volatile("pause");
155
156        // Fence needed because we don't want to start trying to acquire the lock
157        // before we read a false.
158        // Not needed on x86
159        // std::atomic_thread_fence(std::memory_order_seq_cst);
160
161        // Step 2 : acquire our local lock
162        __atomic_acquire( &data[iproc].lock );
163        /*paranoid*/ verify(data[iproc].lock);
164}
165
166static inline void ready_schedule_unlock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
167        unsigned iproc = proc->id;
168        /*paranoid*/ verify(data[iproc].handle == proc);
169        /*paranoid*/ verify(iproc < ready);
170        /*paranoid*/ verify(data[iproc].lock);
171        __atomic_unlock(&data[iproc].lock);
172}
173
174//-----------------------------------------------------------------------
175// Writer side : acquire when changing the ready queue, e.g. adding more
176//  queues or removing them.
177uint_fast32_t ready_mutate_lock( struct cluster & cltr );
178
179void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t /* value returned by lock */ );
180
181//=======================================================================
182// Ready-Queue API
183//-----------------------------------------------------------------------
184// push thread onto a ready queue for a cluster
185// returns true if the list was previously empty, false otherwise
186__attribute__((hot)) bool push(struct cluster * cltr, struct thread_desc * thrd);
187
188//-----------------------------------------------------------------------
189// pop thread from the ready queue of a cluster
190// returns 0p if empty
191__attribute__((hot)) thread_desc * pop(struct cluster * cltr);
192
193//-----------------------------------------------------------------------
194// Increase the width of the ready queue (number of lanes) by 4
195void ready_queue_grow  (struct cluster * cltr);
196
197//-----------------------------------------------------------------------
198// Decrease the width of the ready queue (number of lanes) by 4
199void ready_queue_shrink(struct cluster * cltr);
200
201//-----------------------------------------------------------------------
202// Statics call at the end of each thread to register statistics
203#if !defined(__CFA_NO_STATISTICS__)
204void stats_tls_tally(struct cluster * cltr);
205#else
206static inline void stats_tls_tally(struct cluster * cltr) {}
207#endif
208
209// Local Variables: //
210// mode: c //
211// tab-width: 4 //
212// End: //
Note: See TracBrowser for help on using the repository browser.