// // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // thread.c -- // // Author : Thierry Delisle // Created On : Tue Jan 17 12:27:26 2017 // Last Modified By : Peter A. Buhr // Last Modified On : Wed Nov 30 18:14:07 2022 // Update Count : 95 // #define __cforall_thread__ #define _GNU_SOURCE #include "thread.hfa" #include "exception.hfa" #include "kernel/private.hfa" #include "limits.hfa" #define __CFA_INVOKE_PRIVATE__ #include "invoke.h" extern size_t __global_random_seed; extern size_t __global_random_prime; extern bool __global_random_mask; #pragma GCC visibility push(default) //----------------------------------------------------------------------------- // Thread ctors and dtors void ?{}( thread$ & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { context{ 0p, 0p }; self_cor{ name, storage, storageSize }; ticket = TICKET_RUNNING; state = Start; preempted = __NO_PREEMPTION; corctx_flag = false; curr_cor = &self_cor; self_mon.owner = &this; self_mon.recursion = 1; self_mon_p = &self_mon; curr_cluster = &cl; rdy_link.next = 0p; rdy_link.ts = MAX; user_link.next = 0p; user_link.prev = 0p; cltr_link.next = 0p; cltr_link.prev = 0p; preferred = ready_queue_new_preferred(); last_proc = 0p; PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() ); #if defined( __CFA_WITH_VERIFY__ ) executing = 0p; canary = 0x0D15EA5E0D15EA5Ep; #endif clh_node = malloc( ); *clh_node = false; doregister(curr_cluster, this); monitors{ &self_mon_p, 1, (fptr_t)0 }; } void ^?{}(thread$& this) with( this ) { #if defined( __CFA_WITH_VERIFY__ ) canary = 0xDEADDEADDEADDEADp; #endif free(clh_node); unregister(curr_cluster, this); ^self_cor{}; } forall(T &) void copy(ThreadCancelled(T) * dst, ThreadCancelled(T) * src) { dst->virtual_table = src->virtual_table; dst->the_thread = src->the_thread; dst->the_exception = src->the_exception; } forall(T &) const char * msg(ThreadCancelled(T) *) { return "ThreadCancelled(...)"; } forall(T &) static void default_thread_cancel_handler(ThreadCancelled(T) & ) { // Improve this error message, can I do formatting? abort( "Unhandled thread cancellation.\n" ); } forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T)) | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) void ?{}( thread_dtor_guard_t & this, T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) { monitor$ * m = get_monitor(thrd); thread$ * desc = get_thread(thrd); // Setup the monitor guard void (*dtor)(T& mutex this) = ^?{}; bool join = cancelHandler != (void(*)(ThreadCancelled(T)&))0; (this.mg){&m, (void(*)())dtor, join}; /* paranoid */ verifyf( Halted == desc->state || Cancelled == desc->state, "Expected thread to be Halted or Cancelled, was %d\n", (int)desc->state ); // After the guard set-up and any wait, check for cancellation. struct _Unwind_Exception * cancellation = desc->self_cor.cancellation; if ( likely( 0p == cancellation ) ) { return; } else if ( Cancelled == desc->state ) { return; } desc->state = Cancelled; void(*defaultResumptionHandler)(ThreadCancelled(T) &) = join ? cancelHandler : default_thread_cancel_handler; // TODO: Remove explitate vtable set once trac#186 is fixed. ThreadCancelled(T) except; except.virtual_table = &_default_vtable; except.the_thread = &thrd; except.the_exception = __cfaehm_cancellation_exception( cancellation ); // Why is this cast required? throwResume (ThreadCancelled(T) &)except; except.the_exception->virtual_table->free( except.the_exception ); free( cancellation ); desc->self_cor.cancellation = 0p; } void ^?{}( thread_dtor_guard_t & this ) { ^(this.mg){}; } //----------------------------------------------------------------------------- // Starting and stopping threads forall( T & | is_thread(T) ) void __thrd_start( T & this, void (*main_p)(T &) ) { thread$ * this_thrd = get_thread(this); disable_interrupts(); __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread); this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; /* paranoid */ verify( this_thrd->context.SP ); schedule_thread$( this_thrd, UNPARK_LOCAL ); enable_interrupts(); } //----------------------------------------------------------------------------- // Support for threads that don't ues the thread keyword forall( T & | sized(T) | is_thread(T) | { void ?{}(T&); } ) void ?{}( scoped(T)& this ) with( this ) { handle{}; __thrd_start(handle, main); } forall( T &, P... | sized(T) | is_thread(T) | { void ?{}(T&, P); } ) void ?{}( scoped(T)& this, P params ) with( this ) { handle{ params }; __thrd_start(handle, main); } forall( T & | sized(T) | is_thread(T) ) void ^?{}( scoped(T)& this ) with( this ) { ^handle{}; } //----------------------------------------------------------------------------- forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T)) | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) T & join( T & this ) { thread_dtor_guard_t guard = { this, defaultResumptionHandler }; return this; } //----------------------------------------------------------------------------- bool migrate( thread$ * thrd, struct cluster & cl ) { monitor$ * tmon = get_monitor(thrd); monitor$ * __monitors[] = { tmon }; monitor_guard_t __guard = { __monitors, 1 }; { // if nothing needs to be done, return false if( thrd->curr_cluster == &cl ) return false; // are we migrating ourself? const bool local = thrd == active_thread(); /* paranoid */ verify( !local || &cl != active_cluster() ); /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() ); /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr ); /* paranoid */ verify( local || tmon->signal_stack.top->owner->waiting_thread == thrd ); /* paranoid */ verify( local || tmon->signal_stack.top ); // make sure we aren't interrupted while doing this // not as important if we aren't local disable_interrupts(); // actually move the thread unregister( thrd->curr_cluster, *thrd ); thrd->curr_cluster = &cl; doregister( thrd->curr_cluster, *thrd ); // restore interrupts enable_interrupts(); // if this is the local thread, we are still running on the old cluster if(local) yield(); /* paranoid */ verify( !local || &cl == active_cluster() ); /* paranoid */ verify( !local || thrd->curr_cluster == active_cluster() ); /* paranoid */ verify( !local || thrd->curr_cluster == active_processor()->cltr ); /* paranoid */ verify( local || tmon->signal_stack.top ); /* paranoid */ verify( local || tmon->signal_stack.top->owner->waiting_thread == thrd ); return true; } } //----------------------------------------------------------------------------- void set_seed( size_t seed ) { PRNG_STATE_T & state = active_thread()->random_state; __global_random_seed = seed; PRNG_SET_SEED( state, seed ); (void)PRNG_NAME( state ); // prime PRNG __global_random_prime = seed; __global_random_mask = true; } // set_seed size_t prng( void ) { // [0,UINT_MAX] PRNG_STATE_T( & state ) = active_thread()->random_state; return PRNG_NAME( state ); } // prng // Local Variables: // // mode: c // // tab-width: 4 // // End: //