source: libcfa/src/concurrency/kernel_private.hfa @ 504a7dc

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-astnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 504a7dc was 504a7dc, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Some fixes after the merge, compiles but still has livelocks

  • Property mode set to 100644
File size: 7.2 KB
RevLine 
[75f3522]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[73abe95]7// kernel_private.hfa --
[75f3522]8//
9// Author           : Thierry Delisle
10// Created On       : Mon Feb 13 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[1805b1b]12// Last Modified On : Sat Nov 30 19:25:02 2019
13// Update Count     : 8
[75f3522]14//
15
[6b0b624]16#pragma once
[75f3522]17
[58b6d1b]18#include "kernel.hfa"
19#include "thread.hfa"
[75f3522]20
[73abe95]21#include "alarm.hfa"
[fa21ac9]22
[4aa2fb2]23
[75f3522]24//-----------------------------------------------------------------------------
25// Scheduler
[1c273d0]26
27extern "C" {
[2026bb6]28        void disable_interrupts() OPTIONAL_THREAD;
[969b3fe]29        void enable_interrupts_noPoll();
[36982fc]30        void enable_interrupts( __cfaabi_dbg_ctx_param );
[1c273d0]31}
32
[ac2b598]33void __schedule_thread( $thread * ) __attribute__((nonnull (1)));
[75f3522]34
[e60e0dc]35//Block current thread and release/wake-up the following resources
[b0c7419]36void __leave_thread() __attribute__((noreturn));
[db6f06a]37
[75f3522]38//-----------------------------------------------------------------------------
39// Processor
40void main(processorCtx_t *);
[85b1deb]41
[8c50aed]42void * __create_pthread( pthread_t *, void * (*)(void *), void * );
[1805b1b]43
[85b1deb]44
[75f3522]45
[e60e0dc]46struct event_kernel_t {
[fa21ac9]47        alarm_list_t alarms;
[ea7d2b0]48        __spinlock_t lock;
[fa21ac9]49};
50
[e60e0dc]51extern event_kernel_t * event_kernel;
52
[d8548e2]53struct __cfa_kernel_preemption_state_t {
[b69ea6b]54        bool enabled;
55        bool in_progress;
56        unsigned short disable_count;
57};
58
[afc2427]59extern volatile thread_local __cfa_kernel_preemption_state_t preemption_state __attribute__ ((tls_model ( "initial-exec" )));
[c81ebf9]60
[6502a2b]61extern cluster * mainCluster;
62
[75f3522]63//-----------------------------------------------------------------------------
64// Threads
65extern "C" {
[c7a900a]66      void __cfactx_invoke_thread(void (*main)(void *), void * this);
[75f3522]67}
68
[f7d6bb0]69__cfaabi_dbg_debug_do(
[ac2b598]70        extern void __cfaabi_dbg_thread_register  ( $thread * thrd );
71        extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
[f7d6bb0]72)
73
[2d8f7b0]74// KERNEL ONLY unpark with out disabling interrupts
75void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 );
76
[92976d9]77//-----------------------------------------------------------------------------
78// I/O
[b6f2b21]79void __kernel_io_startup     ( cluster &, int, bool );
[f6660520]80void __kernel_io_finish_start( cluster & );
81void __kernel_io_prepare_stop( cluster & );
82void __kernel_io_shutdown    ( cluster &, bool );
[92976d9]83
[969b3fe]84//-----------------------------------------------------------------------------
85// Utils
[7768b8d]86#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
[969b3fe]87
[8c50aed]88static inline uint32_t __tls_rand() {
[21184e3]89        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
90        kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
91        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7;
92        return kernelTLS.rand_seed;
93}
94
[de94a60]95
[a1a17a74]96void doregister( struct cluster & cltr );
97void unregister( struct cluster & cltr );
[de94a60]98
[ac2b598]99void doregister( struct cluster * cltr, struct $thread & thrd );
100void unregister( struct cluster * cltr, struct $thread & thrd );
[de94a60]101
[504a7dc]102void doregister( struct cluster * cltr, struct processor * proc );
103void unregister( struct cluster * cltr, struct processor * proc );
104
[7768b8d]105//=======================================================================
106// Cluster lock API
107//=======================================================================
108struct __attribute__((aligned(64))) __processor_id {
109        processor * volatile handle;
110        volatile bool lock;
111};
112
113// Lock-Free registering/unregistering of threads
114// Register a processor to a given cluster and get its unique id in return
[504a7dc]115unsigned doregister2( struct cluster * cltr, struct processor * proc );
[7768b8d]116
117// Unregister a processor from a given cluster using its id, getting back the original pointer
[504a7dc]118void     unregister2( struct cluster * cltr, struct processor * proc );
[7768b8d]119
120//=======================================================================
121// Reader-writer lock implementation
122// Concurrent with doregister/unregister,
123//    i.e., threads can be added at any point during or between the entry/exit
[dca5802]124
125//-----------------------------------------------------------------------
126// simple spinlock underlying the RWLock
127// Blocking acquire
[7768b8d]128static inline void __atomic_acquire(volatile bool * ll) {
129        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
130                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
131                        asm volatile("pause");
132        }
133        /* paranoid */ verify(*ll);
134}
135
[dca5802]136// Non-Blocking acquire
[7768b8d]137static inline bool __atomic_try_acquire(volatile bool * ll) {
[b798713]138        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
[7768b8d]139}
140
[dca5802]141// Release
[7768b8d]142static inline void __atomic_unlock(volatile bool * ll) {
143        /* paranoid */ verify(*ll);
144        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
145}
146
147//-----------------------------------------------------------------------
148// Reader side : acquire when using the ready queue to schedule but not
149//  creating/destroying queues
[b798713]150static inline void ready_schedule_lock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
[7768b8d]151        unsigned iproc = proc->id;
152        /*paranoid*/ verify(data[iproc].handle == proc);
153        /*paranoid*/ verify(iproc < ready);
154
155        // Step 1 : make sure no writer are in the middle of the critical section
156        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
157                asm volatile("pause");
158
159        // Fence needed because we don't want to start trying to acquire the lock
160        // before we read a false.
161        // Not needed on x86
162        // std::atomic_thread_fence(std::memory_order_seq_cst);
163
164        // Step 2 : acquire our local lock
165        __atomic_acquire( &data[iproc].lock );
166        /*paranoid*/ verify(data[iproc].lock);
167}
168
[b798713]169static inline void ready_schedule_unlock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
[7768b8d]170        unsigned iproc = proc->id;
171        /*paranoid*/ verify(data[iproc].handle == proc);
172        /*paranoid*/ verify(iproc < ready);
173        /*paranoid*/ verify(data[iproc].lock);
[dca5802]174        __atomic_unlock(&data[iproc].lock);
[7768b8d]175}
176
177//-----------------------------------------------------------------------
178// Writer side : acquire when changing the ready queue, e.g. adding more
179//  queues or removing them.
180uint_fast32_t ready_mutate_lock( struct cluster & cltr );
181
[dca5802]182void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t /* value returned by lock */ );
[7768b8d]183
[b798713]184//=======================================================================
185// Ready-Queue API
[dca5802]186//-----------------------------------------------------------------------
187// push thread onto a ready queue for a cluster
188// returns true if the list was previously empty, false otherwise
[504a7dc]189__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd);
[dca5802]190
191//-----------------------------------------------------------------------
192// pop thread from the ready queue of a cluster
193// returns 0p if empty
[504a7dc]194__attribute__((hot)) struct $thread * pop(struct cluster * cltr);
[dca5802]195
196//-----------------------------------------------------------------------
197// Increase the width of the ready queue (number of lanes) by 4
[b798713]198void ready_queue_grow  (struct cluster * cltr);
[dca5802]199
200//-----------------------------------------------------------------------
201// Decrease the width of the ready queue (number of lanes) by 4
[b798713]202void ready_queue_shrink(struct cluster * cltr);
203
[dca5802]204//-----------------------------------------------------------------------
205// Statics call at the end of each thread to register statistics
[b798713]206#if !defined(__CFA_NO_STATISTICS__)
207void stats_tls_tally(struct cluster * cltr);
208#else
209static inline void stats_tls_tally(struct cluster * cltr) {}
210#endif
[de94a60]211
[75f3522]212// Local Variables: //
213// mode: c //
214// tab-width: 4 //
[4aa2fb2]215// End: //
Note: See TracBrowser for help on using the repository browser.