source: libcfa/src/concurrency/thread.cfa @ e1d66c84

ADTast-experimental
Last change on this file since e1d66c84 was dd46fd3, checked in by Peter A. Buhr <pabuhr@…>, 2 years ago

generalization of PRNG

  • Property mode set to 100644
File size: 7.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// thread.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Wed Nov 30 18:14:07 2022
13// Update Count     : 95
14//
15
16#define __cforall_thread__
17#define _GNU_SOURCE
18
19#include "thread.hfa"
20
21#include "exception.hfa"
22#include "kernel/private.hfa"
23#include "limits.hfa"
24
25#define __CFA_INVOKE_PRIVATE__
26#include "invoke.h"
27
28extern size_t __global_random_seed;
29extern size_t __global_random_prime;
30extern bool __global_random_mask;
31
32#pragma GCC visibility push(default)
33
34//-----------------------------------------------------------------------------
35// Thread ctors and dtors
36void ?{}( thread$ & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
37        context{ 0p, 0p };
38        self_cor{ name, storage, storageSize };
39        ticket = TICKET_RUNNING;
40        state = Start;
41        preempted = __NO_PREEMPTION;
42        corctx_flag = false;
43        curr_cor = &self_cor;
44        self_mon.owner = &this;
45        self_mon.recursion = 1;
46        self_mon_p = &self_mon;
47        curr_cluster = &cl;
48        rdy_link.next = 0p;
49        rdy_link.ts   = MAX;
50        user_link.next = 0p;
51        user_link.prev = 0p;
52        cltr_link.next = 0p;
53        cltr_link.prev = 0p;
54        preferred = ready_queue_new_preferred();
55        last_proc = 0p;
56        PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() );
57        #if defined( __CFA_WITH_VERIFY__ )
58                executing = 0p;
59                canary = 0x0D15EA5E0D15EA5Ep;
60        #endif
61
62        clh_node = malloc( );
63        *clh_node = false;
64
65        doregister(curr_cluster, this);
66        monitors{ &self_mon_p, 1, (fptr_t)0 };
67}
68
69void ^?{}(thread$& this) with( this ) {
70        #if defined( __CFA_WITH_VERIFY__ )
71                canary = 0xDEADDEADDEADDEADp;
72        #endif
73        free(clh_node);
74        unregister(curr_cluster, this);
75        ^self_cor{};
76}
77
78forall(T &)
79void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src) {
80        dst->virtual_table = src->virtual_table;
81        dst->the_thread = src->the_thread;
82        dst->the_exception = src->the_exception;
83}
84
85forall(T &)
86const char * msg(ThreadCancelled(T) *) {
87        return "ThreadCancelled(...)";
88}
89
90forall(T &)
91static void default_thread_cancel_handler(ThreadCancelled(T) & ) {
92        // Improve this error message, can I do formatting?
93        abort( "Unhandled thread cancellation.\n" );
94}
95
96forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
97    | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
98void ?{}( thread_dtor_guard_t & this,
99                T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) {
100        monitor$ * m = get_monitor(thrd);
101        thread$ * desc = get_thread(thrd);
102
103        // Setup the monitor guard
104        void (*dtor)(T& mutex this) = ^?{};
105        bool join = cancelHandler != (void(*)(ThreadCancelled(T)&))0;
106        (this.mg){&m, (void(*)())dtor, join};
107
108
109        /* paranoid */ verifyf( Halted == desc->state || Cancelled == desc->state, "Expected thread to be Halted or Cancelled, was %d\n", (int)desc->state );
110
111        // After the guard set-up and any wait, check for cancellation.
112        struct _Unwind_Exception * cancellation = desc->self_cor.cancellation;
113        if ( likely( 0p == cancellation ) ) {
114                return;
115        } else if ( Cancelled == desc->state ) {
116                return;
117        }
118        desc->state = Cancelled;
119        void(*defaultResumptionHandler)(ThreadCancelled(T) &) =
120                join ? cancelHandler : default_thread_cancel_handler;
121
122        // TODO: Remove explitate vtable set once trac#186 is fixed.
123        ThreadCancelled(T) except;
124        except.virtual_table = &_default_vtable;
125        except.the_thread = &thrd;
126        except.the_exception = __cfaehm_cancellation_exception( cancellation );
127        // Why is this cast required?
128        throwResume (ThreadCancelled(T) &)except;
129
130        except.the_exception->virtual_table->free( except.the_exception );
131        free( cancellation );
132        desc->self_cor.cancellation = 0p;
133}
134
135void ^?{}( thread_dtor_guard_t & this ) {
136        ^(this.mg){};
137}
138
139//-----------------------------------------------------------------------------
140// Starting and stopping threads
141forall( T & | is_thread(T) )
142void __thrd_start( T & this, void (*main_p)(T &) ) {
143        thread$ * this_thrd = get_thread(this);
144
145        disable_interrupts();
146        __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread);
147
148        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
149        /* paranoid */ verify( this_thrd->context.SP );
150
151        schedule_thread$( this_thrd, UNPARK_LOCAL );
152        enable_interrupts();
153}
154
155//-----------------------------------------------------------------------------
156// Support for threads that don't ues the thread keyword
157forall( T & | sized(T) | is_thread(T) | { void ?{}(T&); } )
158void ?{}( scoped(T)& this ) with( this ) {
159        handle{};
160        __thrd_start(handle, main);
161}
162
163forall( T &, P... | sized(T) | is_thread(T) | { void ?{}(T&, P); } )
164void ?{}( scoped(T)& this, P params ) with( this ) {
165        handle{ params };
166        __thrd_start(handle, main);
167}
168
169forall( T & | sized(T) | is_thread(T) )
170void ^?{}( scoped(T)& this ) with( this ) {
171        ^handle{};
172}
173
174//-----------------------------------------------------------------------------
175forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
176        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
177T & join( T & this ) {
178        thread_dtor_guard_t guard = { this, defaultResumptionHandler };
179        return this;
180}
181
182//-----------------------------------------------------------------------------
183bool migrate( thread$ * thrd, struct cluster & cl ) {
184
185        monitor$ * tmon = get_monitor(thrd);
186        monitor$ * __monitors[] = { tmon };
187        monitor_guard_t __guard = { __monitors, 1 };
188
189
190        {
191                // if nothing needs to be done, return false
192                if( thrd->curr_cluster == &cl ) return false;
193
194                // are we migrating ourself?
195                const bool local = thrd == active_thread();
196
197                /* paranoid */ verify( !local || &cl != active_cluster() );
198                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
199                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
200                /* paranoid */ verify( local || tmon->signal_stack.top->owner->waiting_thread == thrd );
201                /* paranoid */ verify( local || tmon->signal_stack.top );
202
203                // make sure we aren't interrupted while doing this
204                // not as important if we aren't local
205                disable_interrupts();
206
207                // actually move the thread
208                unregister( thrd->curr_cluster, *thrd );
209                thrd->curr_cluster = &cl;
210                doregister( thrd->curr_cluster, *thrd );
211
212                // restore interrupts
213                enable_interrupts();
214
215                // if this is the local thread, we are still running on the old cluster
216                if(local) yield();
217
218                /* paranoid */ verify( !local || &cl == active_cluster() );
219                /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() );
220                /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr );
221                /* paranoid */ verify(  local || tmon->signal_stack.top );
222                /* paranoid */ verify(  local || tmon->signal_stack.top->owner->waiting_thread == thrd );
223
224                return true;
225        }
226}
227
228//-----------------------------------------------------------------------------
229
230void set_seed( size_t seed ) {
231        PRNG_STATE_T & state = active_thread()->random_state;
232        __global_random_seed = seed;
233        PRNG_SET_SEED( state, seed );
234        (void)PRNG_NAME( state );                                                       // prime PRNG
235        __global_random_prime = seed;
236        __global_random_mask = true;
237} // set_seed
238
239size_t prng( void ) {                                                                   // [0,UINT_MAX]
240        PRNG_STATE_T( & state ) = active_thread()->random_state;
241        return PRNG_NAME( state );
242} // prng
243
244// Local Variables: //
245// mode: c //
246// tab-width: 4 //
247// End: //
Note: See TracBrowser for help on using the repository browser.