source: libcfa/src/concurrency/thread.cfa @ 1553a55

ADTast-experimental
Last change on this file since 1553a55 was 1553a55, checked in by Thierry Delisle <tdelisle@…>, 2 years ago

Explicitly zero initialize the intrusive link fields

  • Property mode set to 100644
File size: 7.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// thread.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Sat Feb 12 15:24:18 2022
13// Update Count     : 66
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19#include "thread.hfa"
20
21#include "exception.hfa"
22#include "kernel/private.hfa"
23#include "limits.hfa"
24
25#define __CFA_INVOKE_PRIVATE__
26#include "invoke.h"
27
28extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask;
29
30#pragma GCC visibility push(default)
31
32//-----------------------------------------------------------------------------
33// Thread ctors and dtors
34void ?{}( thread$ & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
35        context{ 0p, 0p };
36        self_cor{ name, storage, storageSize };
37        ticket = TICKET_RUNNING;
38        state = Start;
39        preempted = __NO_PREEMPTION;
40        corctx_flag = false;
41        curr_cor = &self_cor;
42        self_mon.owner = &this;
43        self_mon.recursion = 1;
44        self_mon_p = &self_mon;
45        curr_cluster = &cl;
46        rdy_link.next = 0p;
47        rdy_link.ts   = MAX;
48        user_link.next = 0p;
49        user_link.prev = 0p;
50        cltr_link.next = 0p;
51        cltr_link.prev = 0p;
52        preferred = ready_queue_new_preferred();
53        last_proc = 0p;
54        random_state = __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl();
55        #if defined( __CFA_WITH_VERIFY__ )
56                executing = 0p;
57                canary = 0x0D15EA5E0D15EA5Ep;
58        #endif
59
60        clh_node = malloc( );
61        *clh_node = false;
62
63        doregister(curr_cluster, this);
64        monitors{ &self_mon_p, 1, (fptr_t)0 };
65}
66
67void ^?{}(thread$& this) with( this ) {
68        #if defined( __CFA_WITH_VERIFY__ )
69                canary = 0xDEADDEADDEADDEADp;
70        #endif
71        free(clh_node);
72        unregister(curr_cluster, this);
73        ^self_cor{};
74}
75
76forall(T &)
77void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src) {
78        dst->virtual_table = src->virtual_table;
79        dst->the_thread = src->the_thread;
80        dst->the_exception = src->the_exception;
81}
82
83forall(T &)
84const char * msg(ThreadCancelled(T) *) {
85        return "ThreadCancelled(...)";
86}
87
88forall(T &)
89static void default_thread_cancel_handler(ThreadCancelled(T) & ) {
90        // Improve this error message, can I do formatting?
91        abort( "Unhandled thread cancellation.\n" );
92}
93
94forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
95    | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
96void ?{}( thread_dtor_guard_t & this,
97                T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) {
98        monitor$ * m = get_monitor(thrd);
99        thread$ * desc = get_thread(thrd);
100
101        // Setup the monitor guard
102        void (*dtor)(T& mutex this) = ^?{};
103        bool join = cancelHandler != (void(*)(ThreadCancelled(T)&))0;
104        (this.mg){&m, (void(*)())dtor, join};
105
106
107        /* paranoid */ verifyf( Halted == desc->state || Cancelled == desc->state, "Expected thread to be Halted or Cancelled, was %d\n", (int)desc->state );
108
109        // After the guard set-up and any wait, check for cancellation.
110        struct _Unwind_Exception * cancellation = desc->self_cor.cancellation;
111        if ( likely( 0p == cancellation ) ) {
112                return;
113        } else if ( Cancelled == desc->state ) {
114                return;
115        }
116        desc->state = Cancelled;
117        void(*defaultResumptionHandler)(ThreadCancelled(T) &) =
118                join ? cancelHandler : default_thread_cancel_handler;
119
120        // TODO: Remove explitate vtable set once trac#186 is fixed.
121        ThreadCancelled(T) except;
122        except.virtual_table = &_default_vtable;
123        except.the_thread = &thrd;
124        except.the_exception = __cfaehm_cancellation_exception( cancellation );
125        // Why is this cast required?
126        throwResume (ThreadCancelled(T) &)except;
127
128        except.the_exception->virtual_table->free( except.the_exception );
129        free( cancellation );
130        desc->self_cor.cancellation = 0p;
131}
132
133void ^?{}( thread_dtor_guard_t & this ) {
134        ^(this.mg){};
135}
136
137//-----------------------------------------------------------------------------
138// Starting and stopping threads
139forall( T & | is_thread(T) )
140void __thrd_start( T & this, void (*main_p)(T &) ) {
141        thread$ * this_thrd = get_thread(this);
142
143        disable_interrupts();
144        __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
145
146        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
147        /* paranoid */ verify( this_thrd->context.SP );
148
149        schedule_thread$( this_thrd, UNPARK_LOCAL );
150        enable_interrupts();
151}
152
153//-----------------------------------------------------------------------------
154// Support for threads that don't ues the thread keyword
155forall( T & | sized(T) | is_thread(T) | { void ?{}(T&); } )
156void ?{}( scoped(T)& this ) with( this ) {
157        handle{};
158        __thrd_start(handle, main);
159}
160
161forall( T &, P... | sized(T) | is_thread(T) | { void ?{}(T&, P); } )
162void ?{}( scoped(T)& this, P params ) with( this ) {
163        handle{ params };
164        __thrd_start(handle, main);
165}
166
167forall( T & | sized(T) | is_thread(T) )
168void ^?{}( scoped(T)& this ) with( this ) {
169        ^handle{};
170}
171
172//-----------------------------------------------------------------------------
173forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
174        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
175T & join( T & this ) {
176        thread_dtor_guard_t guard = { this, defaultResumptionHandler };
177        return this;
178}
179
180//-----------------------------------------------------------------------------
181bool migrate( thread$ * thrd, struct cluster & cl ) {
182
183        monitor$ * tmon = get_monitor(thrd);
184        monitor$ * __monitors[] = { tmon };
185        monitor_guard_t __guard = { __monitors, 1 };
186
187
188        {
189                // if nothing needs to be done, return false
190                if( thrd->curr_cluster == &cl ) return false;
191
192                // are we migrating ourself?
193                const bool local = thrd == active_thread();
194
195                /* paranoid */ verify( !local || &cl != active_cluster() );
196                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
197                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
198                /* paranoid */ verify( local || tmon->signal_stack.top->owner->waiting_thread == thrd );
199                /* paranoid */ verify( local || tmon->signal_stack.top );
200
201                // make sure we aren't interrupted while doing this
202                // not as important if we aren't local
203                disable_interrupts();
204
205                // actually move the thread
206                unregister( thrd->curr_cluster, *thrd );
207                thrd->curr_cluster = &cl;
208                doregister( thrd->curr_cluster, *thrd );
209
210                // restore interrupts
211                enable_interrupts();
212
213                // if this is the local thread, we are still running on the old cluster
214                if(local) yield();
215
216                /* paranoid */ verify( !local || &cl == active_cluster() );
217                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
218                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
219                /* paranoid */ verify(  local || tmon->signal_stack.top );
220                /* paranoid */ verify(  local || tmon->signal_stack.top->owner->waiting_thread == thrd );
221
222                return true;
223        }
224}
225
226//-----------------------------------------------------------------------------
227#define GENERATOR LCG
228
229void set_seed( uint32_t seed ) {
230        uint32_t & state = active_thread()->random_state;
231        state = __global_random_seed = seed;
232        GENERATOR( state );
233        __global_random_prime = state;
234        __global_random_mask = true;
235} // set_seed
236
237uint32_t prng( void ) {                                                                 // [0,UINT_MAX]
238        uint32_t & state = active_thread()->random_state;
239        return GENERATOR( state );
240} // prng
241
242// Local Variables: //
243// mode: c //
244// tab-width: 4 //
245// End: //
Note: See TracBrowser for help on using the repository browser.