Changes in / [66b8773:3eab0ef6]
- Location:
- src
- Files:
-
- 1 added
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
src/Concurrency/Keywords.cc
r66b8773 r3eab0ef6 200 200 std::list<DeclarationWithType*> findMutexArgs( FunctionDecl* ); 201 201 void validate( DeclarationWithType * ); 202 void addStatments( CompoundStmt *, const std::list<DeclarationWithType * > &);202 void addStatments( FunctionDecl* func, CompoundStmt *, const std::list<DeclarationWithType * > &); 203 203 204 204 static void implement( std::list< Declaration * > & translationUnit ) { … … 210 210 StructDecl* monitor_decl = nullptr; 211 211 StructDecl* guard_decl = nullptr; 212 213 static std::unique_ptr< Type > generic_func; 212 214 }; 215 216 std::unique_ptr< Type > MutexKeyword::generic_func = std::unique_ptr< Type >( 217 new FunctionType( 218 noQualifiers, 219 true 220 ) 221 ); 213 222 214 223 //----------------------------------------------------------------------------- … … 394 403 // Mutex keyword implementation 395 404 //============================================================================================= 405 396 406 void MutexKeyword::visit(FunctionDecl* decl) { 397 407 Visitor::visit(decl); … … 410 420 if( !guard_decl ) throw SemanticError( "mutex keyword requires monitors to be in scope, add #include <monitor>", decl ); 411 421 412 addStatments( body, mutexArgs );422 addStatments( decl, body, mutexArgs ); 413 423 } 414 424 … … 456 466 } 457 467 458 void MutexKeyword::addStatments( CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {468 void MutexKeyword::addStatments( FunctionDecl* func, CompoundStmt * body, const std::list<DeclarationWithType * > & args ) { 459 469 ObjectDecl * monitors = new ObjectDecl( 460 470 "__monitors", … … 487 497 ); 488 498 499 assert(generic_func); 500 489 501 //in reverse order : 490 // monitor_guard_t __guard = { __monitors, # };502 // monitor_guard_t __guard = { __monitors, #, func }; 491 503 body->push_front( 492 504 new DeclStmt( noLabels, new ObjectDecl( … … 502 514 { 503 515 new SingleInit( new VariableExpr( monitors ) ), 504 new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) ) 516 new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) ), 517 new SingleInit( new CastExpr( new VariableExpr( func ), generic_func->clone() ) ) 505 518 }, 506 519 noDesignators, -
src/libcfa/concurrency/invoke.h
r66b8773 r3eab0ef6 28 28 #define thread_local _Thread_local 29 29 30 typedef void (*fptr_t)(); 31 30 32 struct spinlock { 31 33 volatile int lock; … … 50 52 void append( struct __thread_queue_t *, struct thread_desc * ); 51 53 struct thread_desc * pop_head( struct __thread_queue_t * ); 54 struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** ); 52 55 53 56 void ?{}( struct __condition_stack_t * ); … … 87 90 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 88 91 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 89 }; 92 93 struct __acceptable_t * acceptables; // list of acceptable functions, null if any 94 unsigned short acceptable_count; // number of acceptable functions 95 short accepted_index; // the index of the accepted function, -1 if none 96 }; 90 97 91 98 struct thread_desc { 99 // Core threading fields 92 100 struct coroutine_desc cor; // coroutine body used to store context 93 101 struct monitor_desc mon; // monitor body used for mutual exclusion 102 103 // Link lists fields 94 104 struct thread_desc * next; // instrusive link field for threads 105 106 // Current status related to monitors 95 107 struct monitor_desc ** current_monitors; // currently held monitors 96 108 unsigned short current_monitor_count; // number of currently held monitors 97 }; 109 fptr_t current_monitor_func; // last function that acquired monitors 110 }; 98 111 99 112 #endif //_INVOKE_H_ -
src/libcfa/concurrency/kernel.c
r66b8773 r3eab0ef6 366 366 367 367 void BlockInternal( thread_desc * thrd ) { 368 assert(thrd); 368 369 disable_interrupts(); 369 370 assert( thrd->cor.state != Halted ); … … 379 380 380 381 void BlockInternal( spinlock * lock, thread_desc * thrd ) { 382 assert(thrd); 381 383 disable_interrupts(); 382 384 this_processor->finish.action_code = Release_Schedule; … … 666 668 } 667 669 670 thread_desc * remove( __thread_queue_t * this, thread_desc ** it ) { 671 thread_desc * thrd = *it; 672 verify( thrd ); 673 674 (*it) = thrd->next; 675 676 if( this->tail == &thrd->next ) { 677 this->tail = it; 678 } 679 680 thrd->next = NULL; 681 682 verify( (this->head == NULL) == (&this->head == this->tail) ); 683 verify( *this->tail == NULL ); 684 return thrd; 685 } 686 687 688 668 689 void ?{}( __condition_stack_t * this ) { 669 690 this->top = NULL; -
src/libcfa/concurrency/monitor
r66b8773 r3eab0ef6 23 23 24 24 static inline void ?{}(monitor_desc * this) { 25 (&this->lock){}; 25 26 this->owner = NULL; 27 (&this->entry_queue){}; 28 (&this->signal_stack){}; 26 29 this->recursion = 0; 30 this->acceptables = NULL; 31 this->acceptable_count = 0; 32 this->accepted_index = -1; 27 33 } 28 34 … … 32 38 monitor_desc ** prev_mntrs; 33 39 unsigned short prev_count; 40 fptr_t prev_func; 34 41 }; 35 42 … … 38 45 } 39 46 40 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count );47 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() ); 41 48 void ^?{}( monitor_guard_t * this ); 42 49 … … 89 96 uintptr_t front( condition * this ); 90 97 98 //----------------------------------------------------------------------------- 99 // External scheduling 100 91 101 struct __acceptable_t { 92 void (*func)(void);102 fptr_t func; 93 103 unsigned short count; 94 monitor_desc * monitors[1];104 monitor_desc ** monitors; 95 105 }; 96 106 97 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void));107 int __accept_internal( unsigned short count, __acceptable_t * acceptables ); 98 108 99 109 // Local Variables: // -
src/libcfa/concurrency/monitor.c
r66b8773 r3eab0ef6 25 25 static inline void set_owner( monitor_desc * this, thread_desc * owner ); 26 26 static inline thread_desc * next_thread( monitor_desc * this ); 27 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ); 27 28 28 29 static inline void lock_all( spinlock ** locks, unsigned short count ); … … 34 35 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ); 35 36 37 static inline void init ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ); 38 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ); 39 36 40 static inline thread_desc * check_condition( __condition_criterion_t * ); 37 41 static inline void brand_condition( condition * ); 38 42 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ); 39 43 44 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ); 45 46 //----------------------------------------------------------------------------- 47 // Useful defines 48 #define wait_ctx(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 49 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 50 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 51 init( count, monitors, &waiter, criteria ); /* Link everything together */ \ 52 53 #define wait_ctx_primed(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 54 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 55 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 56 init_push( count, monitors, &waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \ 57 58 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 59 monitor_desc ** monitors = mons; /* Save the targeted monitors */ \ 60 unsigned short count = cnt; /* Save the count to a local variable */ \ 61 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ 62 spinlock * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \ 63 40 64 //----------------------------------------------------------------------------- 41 65 // Enter/Leave routines … … 43 67 44 68 extern "C" { 45 void __enter_monitor_desc( monitor_desc * this ) { 69 // Enter single monitor 70 static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) { 71 // Lock the monitor spinlock, lock_yield to reduce contention 46 72 lock_yield( &this->lock DEBUG_CTX2 ); 47 73 thread_desc * thrd = this_thread; 48 74 49 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 50 75 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 76 77 this->accepted_index = -1; 51 78 if( !this->owner ) { 52 // No one has the monitor, just take it79 // No one has the monitor, just take it 53 80 set_owner( this, thrd ); 81 82 LIB_DEBUG_PRINT_SAFE("Kernel : mon is free \n"); 54 83 } 55 84 else if( this->owner == thrd) { 56 // We already have the monitor, just not how many times we took it85 // We already have the monitor, just not how many times we took it 57 86 verify( this->recursion > 0 ); 58 87 this->recursion += 1; 88 89 LIB_DEBUG_PRINT_SAFE("Kernel : mon already owned \n"); 90 } 91 else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) { 92 // Some one was waiting for us, enter 93 set_owner( this, thrd ); 94 95 LIB_DEBUG_PRINT_SAFE("Kernel : mon accepts \n"); 59 96 } 60 97 else { 61 //Some one else has the monitor, wait in line for it 98 LIB_DEBUG_PRINT_SAFE("Kernel : blocking \n"); 99 100 // Some one else has the monitor, wait in line for it 62 101 append( &this->entry_queue, thrd ); 63 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);64 102 BlockInternal( &this->lock ); 65 103 66 //BlockInternal will unlock spinlock, no need to unlock ourselves 104 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered mon %p\n", thrd, this); 105 106 // BlockInternal will unlock spinlock, no need to unlock ourselves 67 107 return; 68 108 } 69 109 110 LIB_DEBUG_PRINT_SAFE("Kernel : %10p Entered mon %p\n", thrd, this); 111 112 // Release the lock and leave 70 113 unlock( &this->lock ); 71 114 return; 72 115 } 73 116 74 // leave pseudo code : 75 // TODO 117 // Leave single monitor 76 118 void __leave_monitor_desc( monitor_desc * this ) { 119 // Lock the monitor spinlock, lock_yield to reduce contention 77 120 lock_yield( &this->lock DEBUG_CTX2 ); 78 121 79 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);80 122 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion ); 81 123 82 // Leaving a recursion level, decrement the counter124 // Leaving a recursion level, decrement the counter 83 125 this->recursion -= 1; 84 126 85 // If we haven't left the last level of recursion86 // it means we don't need to do anything127 // If we haven't left the last level of recursion 128 // it means we don't need to do anything 87 129 if( this->recursion != 0) { 88 130 unlock( &this->lock ); … … 90 132 } 91 133 134 // Get the next thread, will be null on low contention monitor 92 135 thread_desc * new_owner = next_thread( this ); 93 136 94 // We can now let other threads in safely137 // We can now let other threads in safely 95 138 unlock( &this->lock ); 96 97 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);98 139 99 140 //We need to wake-up the thread … … 101 142 } 102 143 144 // Leave the thread monitor 145 // last routine called by a thread. 146 // Should never return 103 147 void __leave_thread_monitor( thread_desc * thrd ) { 104 148 monitor_desc * this = &thrd->mon; 149 150 // Lock the monitor now 105 151 lock_yield( &this->lock DEBUG_CTX2 ); 106 152 … … 111 157 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion ); 112 158 113 // Leaving a recursion level, decrement the counter159 // Leaving a recursion level, decrement the counter 114 160 this->recursion -= 1; 115 161 116 //If we haven't left the last level of recursion 117 //it means we don't need to do anything 118 if( this->recursion != 0) { 119 unlock( &this->lock ); 120 return; 121 } 122 162 // If we haven't left the last level of recursion 163 // it must mean there is an error 164 if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); } 165 166 // Fetch the next thread, can be null 123 167 thread_desc * new_owner = next_thread( this ); 124 168 169 // Leave the thread, this will unlock the spinlock 170 // Use leave thread instead of BlockInternal which is 171 // specialized for this case and supports null new_owner 125 172 LeaveThread( &this->lock, new_owner ); 126 } 127 } 128 129 static inline void enter(monitor_desc ** monitors, int count) { 173 174 // Control flow should never reach here! 175 } 176 } 177 178 // Enter multiple monitor 179 // relies on the monitor array being sorted 180 static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) { 130 181 for(int i = 0; i < count; i++) { 131 __enter_monitor_desc( monitors[i] ); 132 } 133 } 134 182 __enter_monitor_desc( monitors[i], monitors, count, func ); 183 } 184 } 185 186 // Leave multiple monitor 187 // relies on the monitor array being sorted 135 188 static inline void leave(monitor_desc ** monitors, int count) { 136 189 for(int i = count - 1; i >= 0; i--) { … … 139 192 } 140 193 141 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) { 194 // Ctor for monitor guard 195 // Sorts monitors before entering 196 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() ) { 197 // Store current array 142 198 this->m = m; 143 199 this->count = count; 200 201 // Sort monitors based on address -> TODO use a sort specialized for small numbers 144 202 qsort(this->m, count); 145 enter( this->m, this->count ); 146 203 204 // Save previous thread context 147 205 this->prev_mntrs = this_thread->current_monitors; 148 206 this->prev_count = this_thread->current_monitor_count; 149 207 this->prev_func = this_thread->current_monitor_func; 208 209 // Update thread context (needed for conditions) 150 210 this_thread->current_monitors = m; 151 211 this_thread->current_monitor_count = count; 152 } 153 212 this_thread->current_monitor_func = func; 213 214 // Enter the monitors in order 215 enter( this->m, this->count, func ); 216 } 217 218 // Dtor for monitor guard 154 219 void ^?{}( monitor_guard_t * this ) { 220 // Leave the monitors in order 155 221 leave( this->m, this->count ); 156 222 223 // Restore thread context 157 224 this_thread->current_monitors = this->prev_mntrs; 158 225 this_thread->current_monitor_count = this->prev_count; 159 } 226 this_thread->current_monitor_func = this->prev_func; 227 } 228 229 //----------------------------------------------------------------------------- 230 // Internal scheduling types 160 231 161 232 void ?{}(__condition_node_t * this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) { … … 183 254 // Internal scheduling 184 255 void wait( condition * this, uintptr_t user_info = 0 ) { 185 // LIB_DEBUG_PRINT_SAFE("Waiting\n");186 187 256 brand_condition( this ); 188 257 189 // Check that everything is as expected258 // Check that everything is as expected 190 259 assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors ); 191 260 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count ); 192 261 verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count ); 193 262 194 unsigned short count = this->monitor_count; 195 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 196 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 197 198 // LIB_DEBUG_PRINT_SAFE("count %i\n", count); 199 200 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info }; 201 202 __condition_criterion_t criteria[count]; 203 for(int i = 0; i < count; i++) { 204 (&criteria[i]){ this->monitors[i], &waiter }; 205 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 206 } 207 208 waiter.criteria = criteria; 263 // Create storage for monitor context 264 monitor_ctx( this->monitors, this->monitor_count ); 265 266 // Create the node specific to this wait operation 267 wait_ctx( this_thread, user_info ); 268 269 // Append the current wait operation to the ones already queued on the condition 270 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 209 271 append( &this->blocked, &waiter ); 210 272 211 lock_all( this->monitors, locks, count ); 212 save_recursion( this->monitors, recursions, count ); 213 //DON'T unlock, ask the kernel to do it 214 215 //Find the next thread(s) to run 273 // Lock all monitors (aggregates the lock them as well) 274 lock_all( monitors, locks, count ); 275 276 // DON'T unlock, ask the kernel to do it 277 278 // Save monitor state 279 save_recursion( monitors, recursions, count ); 280 281 // Find the next thread(s) to run 216 282 unsigned short thread_count = 0; 217 283 thread_desc * threads[ count ]; … … 220 286 } 221 287 288 // Remove any duplicate threads 222 289 for( int i = 0; i < count; i++) { 223 thread_desc * new_owner = next_thread( this->monitors[i] );290 thread_desc * new_owner = next_thread( monitors[i] ); 224 291 thread_count = insert_unique( threads, thread_count, new_owner ); 225 292 } 226 227 // LIB_DEBUG_PRINT_SAFE("Will unblock: ");228 for(int i = 0; i < thread_count; i++) {229 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);230 }231 // LIB_DEBUG_PRINT_SAFE("\n");232 293 233 294 // Everything is ready to go to sleep … … 235 296 236 297 237 // WE WOKE UP238 239 240 // We are back, restore the owners and recursions298 // WE WOKE UP 299 300 301 // We are back, restore the owners and recursions 241 302 lock_all( locks, count ); 242 restore_recursion( this->monitors, recursions, count );303 restore_recursion( monitors, recursions, count ); 243 304 unlock_all( locks, count ); 244 305 } 245 306 246 307 bool signal( condition * this ) { 247 if( is_empty( this ) ) { 248 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n"); 249 return false; 250 } 308 if( is_empty( this ) ) { return false; } 251 309 252 310 //Check that everything is as expected 253 311 verify( this->monitors ); 254 312 verify( this->monitor_count != 0 ); 255 256 unsigned short count = this->monitor_count;257 313 258 314 //Some more checking in debug … … 261 317 if ( this->monitor_count != this_thrd->current_monitor_count ) { 262 318 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count ); 263 } // if319 } 264 320 265 321 for(int i = 0; i < this->monitor_count; i++) { 266 322 if ( this->monitors[i] != this_thrd->current_monitors[i] ) { 267 323 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] ); 268 } // if324 } 269 325 } 270 326 ); 271 327 272 //Lock all the monitors 328 unsigned short count = this->monitor_count; 329 330 // Lock all monitors 273 331 lock_all( this->monitors, NULL, count ); 274 // LIB_DEBUG_PRINT_SAFE("Signalling");275 332 276 333 //Pop the head of the waiting queue … … 280 337 for(int i = 0; i < count; i++) { 281 338 __condition_criterion_t * crit = &node->criteria[i]; 282 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target);283 339 assert( !crit->ready ); 284 340 push( &crit->target->signal_stack, crit ); 285 341 } 286 342 287 // LIB_DEBUG_PRINT_SAFE("\n");288 289 343 //Release 290 344 unlock_all( this->monitors, count ); … … 294 348 295 349 bool signal_block( condition * this ) { 296 if( !this->blocked.head ) { 297 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n"); 298 return false; 299 } 350 if( !this->blocked.head ) { return false; } 300 351 301 352 //Check that everything is as expected … … 303 354 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count ); 304 355 305 unsigned short count = this->monitor_count; 306 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 307 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 308 309 lock_all( this->monitors, locks, count ); 310 311 //create creteria 312 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 }; 313 314 __condition_criterion_t criteria[count]; 315 for(int i = 0; i < count; i++) { 316 (&criteria[i]){ this->monitors[i], &waiter }; 317 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 318 push( &criteria[i].target->signal_stack, &criteria[i] ); 319 } 320 321 waiter.criteria = criteria; 356 // Create storage for monitor context 357 monitor_ctx( this->monitors, this->monitor_count ); 358 359 // Lock all monitors (aggregates the locks them as well) 360 lock_all( monitors, locks, count ); 361 362 // Create the node specific to this wait operation 363 wait_ctx_primed( this_thread, 0 ) 322 364 323 365 //save contexts 324 save_recursion( this->monitors, recursions, count );366 save_recursion( monitors, recursions, count ); 325 367 326 368 //Find the thread to run 327 369 thread_desc * signallee = pop_head( &this->blocked )->waiting_thread; 328 370 for(int i = 0; i < count; i++) { 329 set_owner( this->monitors[i], signallee ); 330 } 331 332 LIB_DEBUG_PRINT_SAFE( "Waiting on signal block\n" ); 371 set_owner( monitors[i], signallee ); 372 } 333 373 334 374 //Everything is ready to go to sleep … … 336 376 337 377 338 339 340 LIB_DEBUG_PRINT_SAFE( "Back from signal block\n" ); 378 // WE WOKE UP 379 341 380 342 381 //We are back, restore the owners and recursions 343 382 lock_all( locks, count ); 344 restore_recursion( this->monitors, recursions, count );383 restore_recursion( monitors, recursions, count ); 345 384 unlock_all( locks, count ); 346 385 … … 348 387 } 349 388 389 // Access the user_info of the thread waiting at the front of the queue 350 390 uintptr_t front( condition * this ) { 351 391 verifyf( !is_empty(this), … … 358 398 //----------------------------------------------------------------------------- 359 399 // Internal scheduling 360 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) { 361 // thread_desc * this = this_thread; 362 363 // unsigned short count = this->current_monitor_count; 364 // unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 365 // spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 366 367 // lock_all( this->current_monitors, locks, count ); 368 369 370 371 372 373 // // // Everything is ready to go to sleep 374 // // BlockInternal( locks, count, threads, thread_count ); 375 376 377 // //WE WOKE UP 378 379 380 // //We are back, restore the owners and recursions 381 // lock_all( locks, count ); 382 // restore_recursion( this->monitors, recursions, count ); 383 // unlock_all( locks, count ); 400 int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) { 401 thread_desc * thrd = this_thread; 402 403 // Create storage for monitor context 404 monitor_ctx( acceptables->monitors, acceptables->count ); 405 406 // Lock all monitors (aggregates the lock them as well) 407 lock_all( monitors, locks, count ); 408 409 // Create the node specific to this wait operation 410 wait_ctx_primed( thrd, 0 ); 411 412 // Check if the entry queue 413 thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count ); 414 415 LIB_DEBUG_PRINT_SAFE("Owner(s) :"); 416 for(int i = 0; i < count; i++) { 417 LIB_DEBUG_PRINT_SAFE(" %p", monitors[i]->owner ); 418 } 419 LIB_DEBUG_PRINT_SAFE("\n"); 420 421 LIB_DEBUG_PRINT_SAFE("Passing mon to %p\n", next); 422 423 if( !next ) { 424 // Update acceptables on the current monitors 425 for(int i = 0; i < count; i++) { 426 monitors[i]->acceptables = acceptables; 427 monitors[i]->acceptable_count = acc_count; 428 } 429 } 430 else { 431 for(int i = 0; i < count; i++) { 432 set_owner( monitors[i], next ); 433 } 434 } 435 436 437 save_recursion( monitors, recursions, count ); 438 439 440 // Everything is ready to go to sleep 441 BlockInternal( locks, count, &next, next ? 1 : 0 ); 442 443 444 //WE WOKE UP 445 446 447 //We are back, restore the owners and recursions 448 lock_all( locks, count ); 449 restore_recursion( monitors, recursions, count ); 450 int acc_idx = monitors[0]->accepted_index; 451 unlock_all( locks, count ); 452 453 return acc_idx; 384 454 } 385 455 … … 415 485 } 416 486 487 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) { 488 __acceptable_t* accs = this->acceptables; // Optim 489 int acc_cnt = this->acceptable_count; 490 491 // Check if there are any acceptable functions 492 if( !accs ) return -1; 493 494 // If this isn't the first monitor to test this, there is no reason to repeat the test. 495 if( this != group[0] ) return group[0]->accepted_index; 496 497 // For all acceptable functions check if this is the current function. 498 OUT_LOOP: 499 for( int i = 0; i < acc_cnt; i++ ) { 500 __acceptable_t * acc = &accs[i]; 501 502 // if function matches, check the monitors 503 if( acc->func == func ) { 504 505 // If the group count is different then it can't be a match 506 if( acc->count != group_cnt ) return -1; 507 508 // Check that all the monitors match 509 for( int j = 0; j < group_cnt; j++ ) { 510 // If not a match, check next function 511 if( acc->monitors[j] != group[j] ) continue OUT_LOOP; 512 } 513 514 // It's a complete match, accept the call 515 return i; 516 } 517 } 518 519 // No function matched 520 return -1; 521 } 522 523 static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) { 524 for(int i = 0; i < count; i++) { 525 (&criteria[i]){ monitors[i], waiter }; 526 } 527 528 waiter->criteria = criteria; 529 } 530 531 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) { 532 for(int i = 0; i < count; i++) { 533 (&criteria[i]){ monitors[i], waiter }; 534 push( &criteria[i].target->signal_stack, &criteria[i] ); 535 } 536 537 waiter->criteria = criteria; 538 } 539 417 540 static inline void lock_all( spinlock ** locks, unsigned short count ) { 418 541 for( int i = 0; i < count; i++ ) { … … 505 628 } 506 629 630 static inline bool match( __acceptable_t * acc, thread_desc * thrd ) { 631 verify( thrd ); 632 verify( acc ); 633 if( acc->func != thrd->current_monitor_func ) return false; 634 635 return true; 636 } 637 638 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) { 639 640 __thread_queue_t * entry_queue = &monitors[0]->entry_queue; 641 642 // For each thread in the entry-queue 643 for( thread_desc ** thrd_it = &entry_queue->head; 644 *thrd_it; 645 thrd_it = &(*thrd_it)->next) 646 { 647 // For each acceptable check if it matches 648 __acceptable_t * acc_end = acceptables + acc_count; 649 for( __acceptable_t * acc_it = acceptables; acc_it != acc_end; acc_it++ ) { 650 // Check if we have a match 651 if( match( acc_it, *thrd_it ) ) { 652 653 // If we have a match return it 654 // after removeing it from the entry queue 655 return remove( entry_queue, thrd_it ); 656 } 657 } 658 } 659 660 return NULL; 661 } 662 507 663 void ?{}( __condition_blocked_queue_t * this ) { 508 664 this->head = NULL; -
src/libcfa/concurrency/preemption.c
r66b8773 r3eab0ef6 332 332 assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int); 333 333 334 LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );334 // LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int ); 335 335 // Switch on the code (a.k.a. the sender) to 336 336 switch( info.si_code ) … … 340 340 case SI_TIMER: 341 341 case SI_KERNEL: 342 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n");342 // LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n"); 343 343 lock( &event_kernel->lock DEBUG_CTX2 ); 344 344 tick_preemption(); -
src/tests/preempt_longrun/stack.c
r66b8773 r3eab0ef6 15 15 16 16 void main(worker_t * this) { 17 volatile long p = 5_021_609ul;18 volatile long a = 326_417ul;19 volatile long n = 1l;20 for (volatile long i = 0; i < p; i++) {17 volatile long long p = 5_021_609ul; 18 volatile long long a = 326_417ul; 19 volatile long long n = 1l; 20 for (volatile long long i = 0; i < p; i++) { 21 21 n *= a; 22 22 n %= p; -
src/tests/sched-int-disjoint.c
r66b8773 r3eab0ef6 3 3 #include <monitor> 4 4 #include <thread> 5 6 #include <time.h> 5 7 6 8 static const unsigned long N = 10_000ul; … … 107 109 // Main loop 108 110 int main(int argc, char* argv[]) { 111 rand48seed( time( NULL ) ); 109 112 all_done = false; 110 113 processor p; -
src/tests/sched-int-wait.c
r66b8773 r3eab0ef6 5 5 #include <thread> 6 6 7 static const unsigned long N = 10_000ul; 7 #include <time.h> 8 9 static const unsigned long N = 2_500ul; 8 10 9 11 #ifndef PREEMPTION_RATE … … 119 121 // Main 120 122 int main(int argc, char* argv[]) { 123 rand48seed( time( NULL ) ); 121 124 waiter_left = 4; 122 125 processor p[2];
Note: See TracChangeset
for help on using the changeset viewer.