Changeset 9b1dcc2 for libcfa/src/concurrency/kernel_private.hfa
- Timestamp:
- Jun 12, 2020, 1:49:17 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- cb196f2
- Parents:
- b388ee81
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel_private.hfa
rb388ee81 r9b1dcc2 25 25 // Scheduler 26 26 27 struct __attribute__((aligned(64))) __scheduler_lock_id_t; 28 27 29 extern "C" { 28 30 void disable_interrupts() OPTIONAL_THREAD; … … 31 33 } 32 34 33 void __schedule_thread( $thread * ) __attribute__((nonnull (1)));35 void __schedule_thread( struct __processor_id_t *, $thread * ) __attribute__((nonnull (2))); 34 36 35 37 //Block current thread and release/wake-up the following resources … … 73 75 74 76 // KERNEL ONLY unpark with out disabling interrupts 75 void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 );77 void __unpark( struct __processor_id_t *, $thread * thrd __cfaabi_dbg_ctx_param2 ); 76 78 77 79 //----------------------------------------------------------------------------- … … 108 110 // Cells use by the reader writer lock 109 111 // while not generic it only relies on a opaque pointer 110 struct __attribute__((aligned(64))) __ processor_id{111 processor* volatile handle;112 struct __attribute__((aligned(64))) __scheduler_lock_id_t { 113 __processor_id_t * volatile handle; 112 114 volatile bool lock; 113 115 }; … … 115 117 // Lock-Free registering/unregistering of threads 116 118 // Register a processor to a given cluster and get its unique id in return 117 unsigned doregister( struct processor* proc );119 unsigned doregister( struct __processor_id_t * proc ); 118 120 119 121 // Unregister a processor from a given cluster using its id, getting back the original pointer 120 void unregister( struct processor* proc );122 void unregister( struct __processor_id_t * proc ); 121 123 122 124 //======================================================================= … … 167 169 168 170 // data pointer 169 __ processor_id* data;171 __scheduler_lock_id_t * data; 170 172 }; 171 173 … … 178 180 // Reader side : acquire when using the ready queue to schedule but not 179 181 // creating/destroying queues 180 static inline void ready_schedule_lock( struct processor* proc) with(*__scheduler_lock) {182 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 181 183 unsigned iproc = proc->id; 182 184 /*paranoid*/ verify(data[iproc].handle == proc); … … 197 199 } 198 200 199 static inline void ready_schedule_unlock( struct processor* proc) with(*__scheduler_lock) {201 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 200 202 unsigned iproc = proc->id; 201 203 /*paranoid*/ verify(data[iproc].handle == proc);
Note: See TracChangeset
for help on using the changeset viewer.