source: libcfa/src/concurrency/kernel_private.hfa @ 0da5cd5

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-astnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 0da5cd5 was 2f1cb37, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Merge branch 'master' into relaxed_ready

  • Property mode set to 100644
File size: 7.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel_private.hfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Feb 13 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Sat Nov 30 19:25:02 2019
13// Update Count     : 8
14//
15
16#pragma once
17
18#include "kernel.hfa"
19#include "thread.hfa"
20
21#include "alarm.hfa"
22
23
24//-----------------------------------------------------------------------------
25// Scheduler
26
27extern "C" {
28        void disable_interrupts() OPTIONAL_THREAD;
29        void enable_interrupts_noPoll();
30        void enable_interrupts( __cfaabi_dbg_ctx_param );
31}
32
33void __schedule_thread( $thread * ) __attribute__((nonnull (1)));
34
35//Block current thread and release/wake-up the following resources
36void __leave_thread() __attribute__((noreturn));
37
38//-----------------------------------------------------------------------------
39// Processor
40void main(processorCtx_t *);
41
42void * __create_pthread( pthread_t *, void * (*)(void *), void * );
43
44
45
46struct event_kernel_t {
47        alarm_list_t alarms;
48        __spinlock_t lock;
49};
50
51extern event_kernel_t * event_kernel;
52
53struct __cfa_kernel_preemption_state_t {
54        bool enabled;
55        bool in_progress;
56        unsigned short disable_count;
57};
58
59extern volatile thread_local __cfa_kernel_preemption_state_t preemption_state __attribute__ ((tls_model ( "initial-exec" )));
60
61extern cluster * mainCluster;
62
63//-----------------------------------------------------------------------------
64// Threads
65extern "C" {
66      void __cfactx_invoke_thread(void (*main)(void *), void * this);
67}
68
69__cfaabi_dbg_debug_do(
70        extern void __cfaabi_dbg_thread_register  ( $thread * thrd );
71        extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
72)
73
74// KERNEL ONLY unpark with out disabling interrupts
75void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 );
76
77//-----------------------------------------------------------------------------
78// I/O
79void __kernel_io_startup     ( cluster &, unsigned, bool );
80void __kernel_io_finish_start( cluster & );
81void __kernel_io_prepare_stop( cluster & );
82void __kernel_io_shutdown    ( cluster &, bool );
83
84//-----------------------------------------------------------------------------
85// Utils
86#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
87
88static inline uint32_t __tls_rand() {
89        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
90        kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
91        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7;
92        return kernelTLS.rand_seed;
93}
94
95
96void doregister( struct cluster & cltr );
97void unregister( struct cluster & cltr );
98
99void doregister( struct cluster * cltr, struct $thread & thrd );
100void unregister( struct cluster * cltr, struct $thread & thrd );
101
102void doregister( struct cluster * cltr, struct processor * proc );
103void unregister( struct cluster * cltr, struct processor * proc );
104
105//=======================================================================
106// Cluster lock API
107//=======================================================================
108struct __attribute__((aligned(64))) __processor_id {
109        processor * volatile handle;
110        volatile bool lock;
111};
112
113// Lock-Free registering/unregistering of threads
114// Register a processor to a given cluster and get its unique id in return
115unsigned doregister2( struct cluster * cltr, struct processor * proc );
116
117// Unregister a processor from a given cluster using its id, getting back the original pointer
118void     unregister2( struct cluster * cltr, struct processor * proc );
119
120//=======================================================================
121// Reader-writer lock implementation
122// Concurrent with doregister/unregister,
123//    i.e., threads can be added at any point during or between the entry/exit
124
125//-----------------------------------------------------------------------
126// simple spinlock underlying the RWLock
127// Blocking acquire
128static inline void __atomic_acquire(volatile bool * ll) {
129        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
130                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
131                        asm volatile("pause");
132        }
133        /* paranoid */ verify(*ll);
134}
135
136// Non-Blocking acquire
137static inline bool __atomic_try_acquire(volatile bool * ll) {
138        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
139}
140
141// Release
142static inline void __atomic_unlock(volatile bool * ll) {
143        /* paranoid */ verify(*ll);
144        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
145}
146
147//-----------------------------------------------------------------------
148// Reader side : acquire when using the ready queue to schedule but not
149//  creating/destroying queues
150static inline void ready_schedule_lock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
151        unsigned iproc = proc->id;
152        /*paranoid*/ verify(data[iproc].handle == proc);
153        /*paranoid*/ verify(iproc < ready);
154
155        // Step 1 : make sure no writer are in the middle of the critical section
156        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
157                asm volatile("pause");
158
159        // Fence needed because we don't want to start trying to acquire the lock
160        // before we read a false.
161        // Not needed on x86
162        // std::atomic_thread_fence(std::memory_order_seq_cst);
163
164        // Step 2 : acquire our local lock
165        __atomic_acquire( &data[iproc].lock );
166        /*paranoid*/ verify(data[iproc].lock);
167}
168
169static inline void ready_schedule_unlock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
170        unsigned iproc = proc->id;
171        /*paranoid*/ verify(data[iproc].handle == proc);
172        /*paranoid*/ verify(iproc < ready);
173        /*paranoid*/ verify(data[iproc].lock);
174        __atomic_unlock(&data[iproc].lock);
175}
176
177//-----------------------------------------------------------------------
178// Writer side : acquire when changing the ready queue, e.g. adding more
179//  queues or removing them.
180uint_fast32_t ready_mutate_lock( struct cluster & cltr );
181
182void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t /* value returned by lock */ );
183
184//=======================================================================
185// Ready-Queue API
186//-----------------------------------------------------------------------
187// push thread onto a ready queue for a cluster
188// returns true if the list was previously empty, false otherwise
189__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd);
190
191//-----------------------------------------------------------------------
192// pop thread from the ready queue of a cluster
193// returns 0p if empty
194__attribute__((hot)) struct $thread * pop(struct cluster * cltr);
195
196//-----------------------------------------------------------------------
197// Increase the width of the ready queue (number of lanes) by 4
198void ready_queue_grow  (struct cluster * cltr);
199
200//-----------------------------------------------------------------------
201// Decrease the width of the ready queue (number of lanes) by 4
202void ready_queue_shrink(struct cluster * cltr);
203
204//-----------------------------------------------------------------------
205// Statics call at the end of each thread to register statistics
206#if !defined(__CFA_NO_STATISTICS__)
207void stats_tls_tally(struct cluster * cltr);
208#else
209static inline void stats_tls_tally(struct cluster * cltr) {}
210#endif
211
212// Local Variables: //
213// mode: c //
214// tab-width: 4 //
215// End: //
Note: See TracBrowser for help on using the repository browser.