source: libcfa/src/concurrency/kernel/startup.cfa @ 4c925cd

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-astnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 4c925cd was 62502cc4, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Fixed deadlock where threads could acquire the central scheduler lock for writing while preemption was enabled, leading to any attempt at running any thread to deadlock.
Also added runtime checks to catch new code which could forget to disable interrupts

  • Property mode set to 100644
File size: 21.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2020 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel/startup.cfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Thu Jul 30 15:12:54 2020
11// Last Modified By :
12// Last Modified On :
13// Update Count     :
14//
15
16#define __cforall_thread__
17
18// C Includes
19#include <errno.h>              // errno
20#include <string.h>             // strerror
21#include <unistd.h>             // sysconf
22extern "C" {
23      #include <limits.h>       // PTHREAD_STACK_MIN
24      #include <sys/mman.h>     // mprotect
25      #include <sys/resource.h> // getrlimit
26}
27
28// CFA Includes
29#include "kernel_private.hfa"
30#include "startup.hfa"          // STARTUP_PRIORITY_XXX
31
32//-----------------------------------------------------------------------------
33// Some assembly required
34#if defined( __i386 )
35        #define CtxGet( ctx )        \
36                __asm__ volatile (     \
37                        "movl %%esp,%0\n"\
38                        "movl %%ebp,%1\n"\
39                        : "=rm" (ctx.SP),\
40                                "=rm" (ctx.FP) \
41                )
42#elif defined( __x86_64 )
43        #define CtxGet( ctx )        \
44                __asm__ volatile (     \
45                        "movq %%rsp,%0\n"\
46                        "movq %%rbp,%1\n"\
47                        : "=rm" (ctx.SP),\
48                                "=rm" (ctx.FP) \
49                )
50#elif defined( __ARM_ARCH )
51#define CtxGet( ctx ) __asm__ ( \
52                "mov %0,%%sp\n"   \
53                "mov %1,%%r11\n"   \
54        : "=rm" (ctx.SP), "=rm" (ctx.FP) )
55#else
56        #error unknown hardware architecture
57#endif
58
59//-----------------------------------------------------------------------------
60// Start and stop routine for the kernel, declared first to make sure they run first
61static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
62static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
63
64//-----------------------------------------------------------------------------
65// Static Forward Declarations
66struct current_stack_info_t;
67
68static void * __invoke_processor(void * arg);
69static void __kernel_first_resume( processor * this );
70static void __kernel_last_resume ( processor * this );
71static void init(processor & this, const char name[], cluster & _cltr);
72static void deinit(processor & this);
73static void doregister( struct cluster & cltr );
74static void unregister( struct cluster & cltr );
75static void ?{}( $coroutine & this, current_stack_info_t * info);
76static void ?{}( $thread & this, current_stack_info_t * info);
77static void ?{}(processorCtx_t & this) {}
78static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info);
79
80//-----------------------------------------------------------------------------
81// Forward Declarations for other modules
82extern void __kernel_alarm_startup(void);
83extern void __kernel_alarm_shutdown(void);
84extern void __kernel_io_startup (void);
85extern void __kernel_io_shutdown(void);
86
87//-----------------------------------------------------------------------------
88// Other Forward Declarations
89extern void __wake_proc(processor *);
90
91//-----------------------------------------------------------------------------
92// Kernel storage
93KERNEL_STORAGE(cluster,              mainCluster);
94KERNEL_STORAGE(processor,            mainProcessor);
95KERNEL_STORAGE($thread,              mainThread);
96KERNEL_STORAGE(__stack_t,            mainThreadCtx);
97KERNEL_STORAGE(io_context,           mainPollerThread);
98KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
99#if !defined(__CFA_NO_STATISTICS__)
100KERNEL_STORAGE(__stats_t, mainProcStats);
101#endif
102
103cluster              * mainCluster;
104processor            * mainProcessor;
105$thread              * mainThread;
106__scheduler_RWLock_t * __scheduler_lock;
107
108extern "C" {
109        struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
110}
111
112size_t __page_size = 0;
113
114//-----------------------------------------------------------------------------
115// Global state
116thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) @= {
117        NULL,                                                                                           // cannot use 0p
118        NULL,
119        NULL,
120        { 1, false, false },
121};
122
123//-----------------------------------------------------------------------------
124// Struct to steal stack
125struct current_stack_info_t {
126        __stack_t * storage;  // pointer to stack object
127        void * base;          // base of stack
128        void * limit;         // stack grows towards stack limit
129        void * context;       // address of cfa_context_t
130};
131
132void ?{}( current_stack_info_t & this ) {
133        __stack_context_t ctx;
134        CtxGet( ctx );
135        this.base = ctx.FP;
136
137        rlimit r;
138        getrlimit( RLIMIT_STACK, &r);
139        size_t size = r.rlim_cur;
140
141        this.limit = (void *)(((intptr_t)this.base) - size);
142        this.context = &storage_mainThreadCtx;
143}
144
145
146
147//=============================================================================================
148// Kernel Setup logic
149//=============================================================================================
150//-----------------------------------------------------------------------------
151// Kernel boot procedures
152static void __kernel_startup(void) {
153        verify( ! kernelTLS.preemption_state.enabled );
154        __cfadbg_print_safe(runtime_core, "Kernel : Starting\n");
155
156        __page_size = sysconf( _SC_PAGESIZE );
157
158        __cfa_dbg_global_clusters.list{ __get };
159        __cfa_dbg_global_clusters.lock{};
160
161        // Initialize the global scheduler lock
162        __scheduler_lock = (__scheduler_RWLock_t*)&storage___scheduler_lock;
163        (*__scheduler_lock){};
164
165        // Initialize the main cluster
166        mainCluster = (cluster *)&storage_mainCluster;
167        (*mainCluster){"Main Cluster", 0};
168
169        __cfadbg_print_safe(runtime_core, "Kernel : Main cluster ready\n");
170
171        // Start by initializing the main thread
172        // SKULLDUGGERY: the mainThread steals the process main thread
173        // which will then be scheduled by the mainProcessor normally
174        mainThread = ($thread *)&storage_mainThread;
175        current_stack_info_t info;
176        info.storage = (__stack_t*)&storage_mainThreadCtx;
177        (*mainThread){ &info };
178
179        __cfadbg_print_safe(runtime_core, "Kernel : Main thread ready\n");
180
181
182
183        // Construct the processor context of the main processor
184        void ?{}(processorCtx_t & this, processor * proc) {
185                (this.__cor){ "Processor" };
186                this.__cor.starter = 0p;
187                this.proc = proc;
188        }
189
190        void ?{}(processor & this) with( this ) {
191                ( this.idle ){};
192                ( this.terminated ){ 0 };
193                ( this.runner ){};
194                init( this, "Main Processor", *mainCluster );
195                kernel_thread = pthread_self();
196
197                runner{ &this };
198                __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner);
199        }
200
201        // Initialize the main processor and the main processor ctx
202        // (the coroutine that contains the processing control flow)
203        mainProcessor = (processor *)&storage_mainProcessor;
204        (*mainProcessor){};
205
206        //initialize the global state variables
207        kernelTLS.this_processor = mainProcessor;
208        kernelTLS.this_thread    = mainThread;
209
210        #if !defined( __CFA_NO_STATISTICS__ )
211                kernelTLS.this_stats = (__stats_t *)& storage_mainProcStats;
212                __init_stats( kernelTLS.this_stats );
213        #endif
214
215        // Enable preemption
216        __kernel_alarm_startup();
217
218        // Start IO
219        __kernel_io_startup();
220
221        // Add the main thread to the ready queue
222        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
223        __schedule_thread((__processor_id_t *)mainProcessor, mainThread);
224
225        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
226        // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that
227        // mainThread is on the ready queue when this call is made.
228        __kernel_first_resume( kernelTLS.this_processor );
229
230
231        // THE SYSTEM IS NOW COMPLETELY RUNNING
232
233
234        // SKULLDUGGERY: The constructor for the mainCluster will call alloc with a dimension of 0
235        // malloc *can* return a non-null value, we should free it if that is the case
236        free( mainCluster->io.ctxs );
237
238        // Now that the system is up, finish creating systems that need threading
239        mainCluster->io.ctxs = (io_context *)&storage_mainPollerThread;
240        mainCluster->io.cnt  = 1;
241        (*mainCluster->io.ctxs){ *mainCluster };
242
243        __cfadbg_print_safe(runtime_core, "Kernel : Started\n--------------------------------------------------\n\n");
244
245        verify( ! kernelTLS.preemption_state.enabled );
246        enable_interrupts( __cfaabi_dbg_ctx );
247        verify( TL_GET( preemption_state.enabled ) );
248}
249
250static void __kernel_shutdown(void) {
251        //Before we start shutting things down, wait for systems that need threading to shutdown
252        ^(*mainCluster->io.ctxs){};
253        mainCluster->io.cnt  = 0;
254        mainCluster->io.ctxs = 0p;
255
256        /* paranoid */ verify( TL_GET( preemption_state.enabled ) );
257        disable_interrupts();
258        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
259
260        __cfadbg_print_safe(runtime_core, "\n--------------------------------------------------\nKernel : Shutting down\n");
261
262        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
263        // When its coroutine terminates, it return control to the mainThread
264        // which is currently here
265        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
266        __kernel_last_resume( kernelTLS.this_processor );
267        mainThread->self_cor.state = Halted;
268
269        // THE SYSTEM IS NOW COMPLETELY STOPPED
270
271        // Disable preemption
272        __kernel_alarm_shutdown();
273
274        // Stop IO
275        __kernel_io_shutdown();
276
277        // Destroy the main processor and its context in reverse order of construction
278        // These were manually constructed so we need manually destroy them
279        void ^?{}(processor & this) with( this ){
280                deinit( this );
281
282                /* paranoid */ verify( this.do_terminate == true );
283                __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
284        }
285
286        ^(*mainProcessor){};
287
288        // Final step, destroy the main thread since it is no longer needed
289
290        // Since we provided a stack to this taxk it will not destroy anything
291        /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
292        ^(*mainThread){};
293
294        ^(*mainCluster){};
295
296        ^(*__scheduler_lock){};
297
298        ^(__cfa_dbg_global_clusters.list){};
299        ^(__cfa_dbg_global_clusters.lock){};
300
301        __cfadbg_print_safe(runtime_core, "Kernel : Shutdown complete\n");
302}
303
304//=============================================================================================
305// Kernel Initial Scheduling logic
306//=============================================================================================
307
308// Context invoker for processors
309// This is the entry point for processors (kernel threads) *except* for the main processor
310// It effectively constructs a coroutine by stealing the pthread stack
311static void * __invoke_processor(void * arg) {
312        #if !defined( __CFA_NO_STATISTICS__ )
313                __stats_t local_stats;
314                __init_stats( &local_stats );
315                kernelTLS.this_stats = &local_stats;
316        #endif
317
318        processor * proc = (processor *) arg;
319        kernelTLS.this_processor = proc;
320        kernelTLS.this_thread    = 0p;
321        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
322        // SKULLDUGGERY: We want to create a context for the processor coroutine
323        // which is needed for the 2-step context switch. However, there is no reason
324        // to waste the perfectly valid stack create by pthread.
325        current_stack_info_t info;
326        __stack_t ctx;
327        info.storage = &ctx;
328        (proc->runner){ proc, &info };
329
330        __cfaabi_dbg_print_safe("Coroutine : created stack %p\n", get_coroutine(proc->runner)->stack.storage);
331
332        //Set global state
333        kernelTLS.this_thread = 0p;
334
335        //We now have a proper context from which to schedule threads
336        __cfadbg_print_safe(runtime_core, "Kernel : core %p created (%p, %p)\n", proc, &proc->runner, &ctx);
337
338        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
339        // resume it to start it like it normally would, it will just context switch
340        // back to here. Instead directly call the main since we already are on the
341        // appropriate stack.
342        get_coroutine(proc->runner)->state = Active;
343        main( proc->runner );
344        get_coroutine(proc->runner)->state = Halted;
345
346        // Main routine of the core returned, the core is now fully terminated
347        __cfadbg_print_safe(runtime_core, "Kernel : core %p main ended (%p)\n", proc, &proc->runner);
348
349        #if !defined(__CFA_NO_STATISTICS__)
350                __tally_stats(proc->cltr->stats, &local_stats);
351                if( 0 != proc->print_stats ) {
352                        __print_stats( &local_stats, proc->print_stats, true, proc->name, (void*)proc );
353                }
354        #endif
355
356        return 0p;
357}
358
359static void __kernel_first_resume( processor * this ) {
360        $thread * src = mainThread;
361        $coroutine * dst = get_coroutine(this->runner);
362
363        verify( ! kernelTLS.preemption_state.enabled );
364
365        kernelTLS.this_thread->curr_cor = dst;
366        __stack_prepare( &dst->stack, 65000 );
367        __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine);
368
369        verify( ! kernelTLS.preemption_state.enabled );
370
371        dst->last = &src->self_cor;
372        dst->starter = dst->starter ? dst->starter : &src->self_cor;
373
374        // make sure the current state is still correct
375        /* paranoid */ verify(src->state == Ready);
376
377        // context switch to specified coroutine
378        verify( dst->context.SP );
379        __cfactx_switch( &src->context, &dst->context );
380        // when __cfactx_switch returns we are back in the src coroutine
381
382        mainThread->curr_cor = &mainThread->self_cor;
383
384        // make sure the current state has been update
385        /* paranoid */ verify(src->state == Active);
386
387        verify( ! kernelTLS.preemption_state.enabled );
388}
389
390// KERNEL_ONLY
391static void __kernel_last_resume( processor * this ) {
392        $coroutine * src = &mainThread->self_cor;
393        $coroutine * dst = get_coroutine(this->runner);
394
395        verify( ! kernelTLS.preemption_state.enabled );
396        verify( dst->starter == src );
397        verify( dst->context.SP );
398
399        // SKULLDUGGERY in debug the processors check that the
400        // stack is still within the limit of the stack limits after running a thread.
401        // that check doesn't make sense if we context switch to the processor using the
402        // coroutine semantics. Since this is a special case, use the current context
403        // info to populate these fields.
404        __cfaabi_dbg_debug_do(
405                __stack_context_t ctx;
406                CtxGet( ctx );
407                mainThread->context.SP = ctx.SP;
408                mainThread->context.FP = ctx.FP;
409        )
410
411        // context switch to the processor
412        __cfactx_switch( &src->context, &dst->context );
413}
414
415
416//=============================================================================================
417// Kernel Object Constructors logic
418//=============================================================================================
419//-----------------------------------------------------------------------------
420// Main thread construction
421static void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
422        stack.storage = info->storage;
423        with(*stack.storage) {
424                limit     = info->limit;
425                base      = info->base;
426        }
427        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
428        *istorage |= 0x1;
429        name = "Main Thread";
430        state = Start;
431        starter = 0p;
432        last = 0p;
433        cancellation = 0p;
434}
435
436static void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
437        ticket = 1;
438        state = Start;
439        self_cor{ info };
440        curr_cor = &self_cor;
441        curr_cluster = mainCluster;
442        self_mon.owner = &this;
443        self_mon.recursion = 1;
444        self_mon_p = &self_mon;
445        link.next = 0p;
446        link.prev = 0p;
447
448        node.next = 0p;
449        node.prev = 0p;
450        doregister(curr_cluster, this);
451
452        monitors{ &self_mon_p, 1, (fptr_t)0 };
453}
454
455//-----------------------------------------------------------------------------
456// Processor
457// Construct the processor context of non-main processors
458static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
459        (this.__cor){ info };
460        this.proc = proc;
461}
462
463static void init(processor & this, const char name[], cluster & _cltr) with( this ) {
464        this.name = name;
465        this.cltr = &_cltr;
466        id = -1u;
467        destroyer = 0p;
468        do_terminate = false;
469        preemption_alarm = 0p;
470        pending_preemption = false;
471
472        #if !defined(__CFA_NO_STATISTICS__)
473                print_stats = 0;
474                print_halts = false;
475        #endif
476
477        lock( this.cltr->idles );
478                int target = this.cltr->idles.total += 1u;
479        unlock( this.cltr->idles );
480
481        id = doregister((__processor_id_t*)&this);
482
483        // Lock the RWlock so no-one pushes/pops while we are changing the queue
484        uint_fast32_t last_size = ready_mutate_lock();
485
486                // Adjust the ready queue size
487                ready_queue_grow( cltr, target );
488
489        // Unlock the RWlock
490        ready_mutate_unlock( last_size );
491
492        __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this);
493}
494
495// Not a ctor, it just preps the destruction but should not destroy members
496static void deinit(processor & this) {
497        lock( this.cltr->idles );
498                int target = this.cltr->idles.total -= 1u;
499        unlock( this.cltr->idles );
500
501        // Lock the RWlock so no-one pushes/pops while we are changing the queue
502        uint_fast32_t last_size = ready_mutate_lock();
503
504                // Adjust the ready queue size
505                ready_queue_shrink( this.cltr, target );
506
507        // Unlock the RWlock
508        ready_mutate_unlock( last_size );
509
510        // Finally we don't need the read_lock any more
511        unregister((__processor_id_t*)&this);
512}
513
514void ?{}(processor & this, const char name[], cluster & _cltr) {
515        ( this.idle ){};
516        ( this.terminated ){ 0 };
517        ( this.runner ){};
518
519        disable_interrupts();
520                init( this, name, _cltr );
521        enable_interrupts( __cfaabi_dbg_ctx );
522
523        __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
524
525        this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this );
526
527}
528
529void ^?{}(processor & this) with( this ){
530        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
531                __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
532
533                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
534                __wake_proc( &this );
535
536                P( terminated );
537                verify( kernelTLS.this_processor != &this);
538        }
539
540        int err = pthread_join( kernel_thread, 0p );
541        if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err));
542
543        free( this.stack );
544
545        disable_interrupts();
546                deinit( this );
547        enable_interrupts( __cfaabi_dbg_ctx );
548}
549
550//-----------------------------------------------------------------------------
551// Cluster
552static void ?{}(__cluster_idles & this) {
553        this.lock  = 0;
554        this.idle  = 0;
555        this.total = 0;
556        (this.list){};
557}
558
559void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {
560        this.name = name;
561        this.preemption_rate = preemption_rate;
562        ready_queue{};
563
564        #if !defined(__CFA_NO_STATISTICS__)
565                print_stats = 0;
566                stats = alloc();
567                __init_stats( stats );
568        #endif
569
570        threads{ __get };
571
572        doregister(this);
573
574        // Lock the RWlock so no-one pushes/pops while we are changing the queue
575        uint_fast32_t last_size = ready_mutate_lock();
576
577                // Adjust the ready queue size
578                ready_queue_grow( &this, 0 );
579
580        // Unlock the RWlock
581        ready_mutate_unlock( last_size );
582
583        this.io.cnt  = num_io;
584        this.io.ctxs = aalloc(num_io);
585        for(i; this.io.cnt) {
586                (this.io.ctxs[i]){ this, io_params };
587        }
588}
589
590void ^?{}(cluster & this) {
591        for(i; this.io.cnt) {
592                ^(this.io.ctxs[i]){ true };
593        }
594        free(this.io.ctxs);
595
596        // Lock the RWlock so no-one pushes/pops while we are changing the queue
597        uint_fast32_t last_size = ready_mutate_lock();
598
599                // Adjust the ready queue size
600                ready_queue_shrink( &this, 0 );
601
602        // Unlock the RWlock
603        ready_mutate_unlock( last_size );
604
605        #if !defined(__CFA_NO_STATISTICS__)
606                if( 0 != this.print_stats ) {
607                        __print_stats( this.stats, this.print_stats, true, this.name, (void*)&this );
608                }
609                free( this.stats );
610        #endif
611
612        unregister(this);
613}
614
615//=============================================================================================
616// Miscellaneous Initialization
617//=============================================================================================
618//-----------------------------------------------------------------------------
619// Global Queues
620static void doregister( cluster     & cltr ) {
621        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
622        push_front( __cfa_dbg_global_clusters.list, cltr );
623        unlock    ( __cfa_dbg_global_clusters.lock );
624}
625
626static void unregister( cluster     & cltr ) {
627        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
628        remove( __cfa_dbg_global_clusters.list, cltr );
629        unlock( __cfa_dbg_global_clusters.lock );
630}
631
632void doregister( cluster * cltr, $thread & thrd ) {
633        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
634        cltr->nthreads += 1;
635        push_front(cltr->threads, thrd);
636        unlock    (cltr->thread_list_lock);
637}
638
639void unregister( cluster * cltr, $thread & thrd ) {
640        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
641        remove(cltr->threads, thrd );
642        cltr->nthreads -= 1;
643        unlock(cltr->thread_list_lock);
644}
645
646static void check( int ret, const char func[] ) {
647        if ( ret ) {                                                                            // pthread routines return errno values
648                abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) );
649        } // if
650} // Abort
651
652void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
653        pthread_attr_t attr;
654
655        check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute
656
657        size_t stacksize;
658        // default stack size, normally defined by shell limit
659        check( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" );
660        assert( stacksize >= PTHREAD_STACK_MIN );
661
662        void * stack;
663        __cfaabi_dbg_debug_do(
664                stack = memalign( __page_size, stacksize + __page_size );
665                // pthread has no mechanism to create the guard page in user supplied stack.
666                if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) {
667                        abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
668                } // if
669        );
670        __cfaabi_dbg_no_debug_do(
671                stack = malloc( stacksize );
672        );
673
674        check( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" );
675
676        check( pthread_create( pthread, &attr, start, arg ), "pthread_create" );
677        return stack;
678}
Note: See TracBrowser for help on using the repository browser.