source: libcfa/src/concurrency/thread.cfa @ 15c93d8

ADTast-experimental
Last change on this file since 15c93d8 was 15c93d8, checked in by Thierry Delisle <tdelisle@…>, 18 months ago

Renamed ready-queue link fields to rdy_link

  • Property mode set to 100644
File size: 7.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// thread.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Sat Feb 12 15:24:18 2022
13// Update Count     : 66
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19#include "thread.hfa"
20
21#include "exception.hfa"
22#include "kernel/private.hfa"
23#include "limits.hfa"
24
25#define __CFA_INVOKE_PRIVATE__
26#include "invoke.h"
27
28extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask;
29
30#pragma GCC visibility push(default)
31
32//-----------------------------------------------------------------------------
33// Thread ctors and dtors
34void ?{}( thread$ & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
35        context{ 0p, 0p };
36        self_cor{ name, storage, storageSize };
37        ticket = TICKET_RUNNING;
38        state = Start;
39        preempted = __NO_PREEMPTION;
40        corctx_flag = false;
41        curr_cor = &self_cor;
42        self_mon.owner = &this;
43        self_mon.recursion = 1;
44        self_mon_p = &self_mon;
45        curr_cluster = &cl;
46        rdy_link.next = 0p;
47        rdy_link.ts   = MAX;
48        preferred = ready_queue_new_preferred();
49        last_proc = 0p;
50        random_state = __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl();
51        #if defined( __CFA_WITH_VERIFY__ )
52                executing = 0p;
53                canary = 0x0D15EA5E0D15EA5Ep;
54        #endif
55
56        node.next = 0p;
57        node.prev = 0p;
58
59        clh_node = malloc( );
60        *clh_node = false;
61
62        doregister(curr_cluster, this);
63        monitors{ &self_mon_p, 1, (fptr_t)0 };
64}
65
66void ^?{}(thread$& this) with( this ) {
67        #if defined( __CFA_WITH_VERIFY__ )
68                canary = 0xDEADDEADDEADDEADp;
69        #endif
70        free(clh_node);
71        unregister(curr_cluster, this);
72        ^self_cor{};
73}
74
75forall(T &)
76void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src) {
77        dst->virtual_table = src->virtual_table;
78        dst->the_thread = src->the_thread;
79        dst->the_exception = src->the_exception;
80}
81
82forall(T &)
83const char * msg(ThreadCancelled(T) *) {
84        return "ThreadCancelled(...)";
85}
86
87forall(T &)
88static void default_thread_cancel_handler(ThreadCancelled(T) & ) {
89        // Improve this error message, can I do formatting?
90        abort( "Unhandled thread cancellation.\n" );
91}
92
93forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
94    | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
95void ?{}( thread_dtor_guard_t & this,
96                T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) {
97        monitor$ * m = get_monitor(thrd);
98        thread$ * desc = get_thread(thrd);
99
100        // Setup the monitor guard
101        void (*dtor)(T& mutex this) = ^?{};
102        bool join = cancelHandler != (void(*)(ThreadCancelled(T)&))0;
103        (this.mg){&m, (void(*)())dtor, join};
104
105
106        /* paranoid */ verifyf( Halted == desc->state || Cancelled == desc->state, "Expected thread to be Halted or Cancelled, was %d\n", (int)desc->state );
107
108        // After the guard set-up and any wait, check for cancellation.
109        struct _Unwind_Exception * cancellation = desc->self_cor.cancellation;
110        if ( likely( 0p == cancellation ) ) {
111                return;
112        } else if ( Cancelled == desc->state ) {
113                return;
114        }
115        desc->state = Cancelled;
116        void(*defaultResumptionHandler)(ThreadCancelled(T) &) =
117                join ? cancelHandler : default_thread_cancel_handler;
118
119        // TODO: Remove explitate vtable set once trac#186 is fixed.
120        ThreadCancelled(T) except;
121        except.virtual_table = &_default_vtable;
122        except.the_thread = &thrd;
123        except.the_exception = __cfaehm_cancellation_exception( cancellation );
124        // Why is this cast required?
125        throwResume (ThreadCancelled(T) &)except;
126
127        except.the_exception->virtual_table->free( except.the_exception );
128        free( cancellation );
129        desc->self_cor.cancellation = 0p;
130}
131
132void ^?{}( thread_dtor_guard_t & this ) {
133        ^(this.mg){};
134}
135
136//-----------------------------------------------------------------------------
137// Starting and stopping threads
138forall( T & | is_thread(T) )
139void __thrd_start( T & this, void (*main_p)(T &) ) {
140        thread$ * this_thrd = get_thread(this);
141
142        disable_interrupts();
143        __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
144
145        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
146        /* paranoid */ verify( this_thrd->context.SP );
147
148        schedule_thread$( this_thrd, UNPARK_LOCAL );
149        enable_interrupts();
150}
151
152//-----------------------------------------------------------------------------
153// Support for threads that don't ues the thread keyword
154forall( T & | sized(T) | is_thread(T) | { void ?{}(T&); } )
155void ?{}( scoped(T)& this ) with( this ) {
156        handle{};
157        __thrd_start(handle, main);
158}
159
160forall( T &, P... | sized(T) | is_thread(T) | { void ?{}(T&, P); } )
161void ?{}( scoped(T)& this, P params ) with( this ) {
162        handle{ params };
163        __thrd_start(handle, main);
164}
165
166forall( T & | sized(T) | is_thread(T) )
167void ^?{}( scoped(T)& this ) with( this ) {
168        ^handle{};
169}
170
171//-----------------------------------------------------------------------------
172forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
173        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
174T & join( T & this ) {
175        thread_dtor_guard_t guard = { this, defaultResumptionHandler };
176        return this;
177}
178
179//-----------------------------------------------------------------------------
180bool migrate( thread$ * thrd, struct cluster & cl ) {
181
182        monitor$ * tmon = get_monitor(thrd);
183        monitor$ * __monitors[] = { tmon };
184        monitor_guard_t __guard = { __monitors, 1 };
185
186
187        {
188                // if nothing needs to be done, return false
189                if( thrd->curr_cluster == &cl ) return false;
190
191                // are we migrating ourself?
192                const bool local = thrd == active_thread();
193
194                /* paranoid */ verify( !local || &cl != active_cluster() );
195                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
196                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
197                /* paranoid */ verify( local || tmon->signal_stack.top->owner->waiting_thread == thrd );
198                /* paranoid */ verify( local || tmon->signal_stack.top );
199
200                // make sure we aren't interrupted while doing this
201                // not as important if we aren't local
202                disable_interrupts();
203
204                // actually move the thread
205                unregister( thrd->curr_cluster, *thrd );
206                thrd->curr_cluster = &cl;
207                doregister( thrd->curr_cluster, *thrd );
208
209                // restore interrupts
210                enable_interrupts();
211
212                // if this is the local thread, we are still running on the old cluster
213                if(local) yield();
214
215                /* paranoid */ verify( !local || &cl == active_cluster() );
216                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
217                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
218                /* paranoid */ verify(  local || tmon->signal_stack.top );
219                /* paranoid */ verify(  local || tmon->signal_stack.top->owner->waiting_thread == thrd );
220
221                return true;
222        }
223}
224
225//-----------------------------------------------------------------------------
226#define GENERATOR LCG
227
228void set_seed( uint32_t seed ) {
229        uint32_t & state = active_thread()->random_state;
230        state = __global_random_seed = seed;
231        GENERATOR( state );
232        __global_random_prime = state;
233        __global_random_mask = true;
234} // set_seed
235
236uint32_t prng( void ) {                                                                 // [0,UINT_MAX]
237        uint32_t & state = active_thread()->random_state;
238        return GENERATOR( state );
239} // prng
240
241// Local Variables: //
242// mode: c //
243// tab-width: 4 //
244// End: //
Note: See TracBrowser for help on using the repository browser.