Changeset 97e3296
- Timestamp:
- Aug 17, 2017, 3:42:16 PM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 6ac5223
- Parents:
- f710aca
- Location:
- src
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
src/Concurrency/Keywords.cc
rf710aca r97e3296 200 200 std::list<DeclarationWithType*> findMutexArgs( FunctionDecl* ); 201 201 void validate( DeclarationWithType * ); 202 void addStatments( CompoundStmt *, const std::list<DeclarationWithType * > &);202 void addStatments( FunctionDecl* func, CompoundStmt *, const std::list<DeclarationWithType * > &); 203 203 204 204 static void implement( std::list< Declaration * > & translationUnit ) { … … 210 210 StructDecl* monitor_decl = nullptr; 211 211 StructDecl* guard_decl = nullptr; 212 213 static std::unique_ptr< Type > generic_func; 212 214 }; 215 216 std::unique_ptr< Type > MutexKeyword::generic_func = std::unique_ptr< Type >( 217 new FunctionType( 218 noQualifiers, 219 true 220 ) 221 ); 213 222 214 223 //----------------------------------------------------------------------------- … … 394 403 // Mutex keyword implementation 395 404 //============================================================================================= 405 396 406 void MutexKeyword::visit(FunctionDecl* decl) { 397 407 Visitor::visit(decl); … … 410 420 if( !guard_decl ) throw SemanticError( "mutex keyword requires monitors to be in scope, add #include <monitor>", decl ); 411 421 412 addStatments( body, mutexArgs );422 addStatments( decl, body, mutexArgs ); 413 423 } 414 424 … … 456 466 } 457 467 458 void MutexKeyword::addStatments( CompoundStmt * body, const std::list<DeclarationWithType * > & args ) {468 void MutexKeyword::addStatments( FunctionDecl* func, CompoundStmt * body, const std::list<DeclarationWithType * > & args ) { 459 469 ObjectDecl * monitors = new ObjectDecl( 460 470 "__monitors", … … 487 497 ); 488 498 499 assert(generic_func); 500 489 501 //in reverse order : 490 // monitor_guard_t __guard = { __monitors, # };502 // monitor_guard_t __guard = { __monitors, #, func }; 491 503 body->push_front( 492 504 new DeclStmt( noLabels, new ObjectDecl( … … 502 514 { 503 515 new SingleInit( new VariableExpr( monitors ) ), 504 new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) ) 516 new SingleInit( new ConstantExpr( Constant::from_ulong( args.size() ) ) ), 517 new SingleInit( new CastExpr( new VariableExpr( func ), generic_func->clone() ) ) 505 518 }, 506 519 noDesignators, -
src/libcfa/concurrency/invoke.h
rf710aca r97e3296 87 87 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 88 88 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 89 }; 89 90 struct __acceptable_t * acceptables; // list of acceptable functions, null if any 91 unsigned short acceptable_count; // number of acceptable functions 92 short accepted_index; // the index of the accepted function, -1 if none 93 void (*pre_accept)(void); // function to run before an accept 94 }; 90 95 91 96 struct thread_desc { … … 95 100 struct monitor_desc ** current_monitors; // currently held monitors 96 101 unsigned short current_monitor_count; // number of currently held monitors 97 102 }; 98 103 99 104 #endif //_INVOKE_H_ -
src/libcfa/concurrency/kernel.c
rf710aca r97e3296 366 366 367 367 void BlockInternal( thread_desc * thrd ) { 368 assert(thrd); 368 369 disable_interrupts(); 369 370 assert( thrd->cor.state != Halted ); … … 379 380 380 381 void BlockInternal( spinlock * lock, thread_desc * thrd ) { 382 assert(thrd); 381 383 disable_interrupts(); 382 384 this_processor->finish.action_code = Release_Schedule; -
src/libcfa/concurrency/monitor
rf710aca r97e3296 23 23 24 24 static inline void ?{}(monitor_desc * this) { 25 (&this->lock){}; 25 26 this->owner = NULL; 27 (&this->entry_queue){}; 28 (&this->signal_stack){}; 26 29 this->recursion = 0; 30 this->acceptables = NULL; 31 this->acceptable_count = 0; 32 this->accepted_index = -1; 33 this->pre_accept = 0; 27 34 } 28 35 … … 38 45 } 39 46 40 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count );47 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() ); 41 48 void ^?{}( monitor_guard_t * this ); 42 49 … … 89 96 uintptr_t front( condition * this ); 90 97 98 //----------------------------------------------------------------------------- 99 // External scheduling 100 101 typedef void (*void_fptr_t)(void); 102 91 103 struct __acceptable_t { 92 void (*func)(void);104 void_fptr_t func; 93 105 unsigned short count; 94 monitor_desc * monitors[1]; 106 monitor_desc ** monitors; 107 bool run_preaccept; 95 108 }; 96 109 97 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void));110 int __accept_internal( unsigned short count, __acceptable_t * acceptables ); 98 111 99 112 // Local Variables: // -
src/libcfa/concurrency/monitor.c
rf710aca r97e3296 25 25 static inline void set_owner( monitor_desc * this, thread_desc * owner ); 26 26 static inline thread_desc * next_thread( monitor_desc * this ); 27 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ); 27 28 28 29 static inline void lock_all( spinlock ** locks, unsigned short count ); … … 34 35 static inline void restore_recursion( monitor_desc ** ctx, unsigned int * /*in */ recursions, unsigned short count ); 35 36 37 static inline void init ( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ); 38 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ); 39 36 40 static inline thread_desc * check_condition( __condition_criterion_t * ); 37 41 static inline void brand_condition( condition * ); 38 42 static inline unsigned short insert_unique( thread_desc ** thrds, unsigned short end, thread_desc * val ); 39 43 44 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ); 45 46 //----------------------------------------------------------------------------- 47 // Useful defines 48 #define wait_ctx(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 49 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 50 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 51 init( count, monitors, &waiter, criteria ); /* Link everything together */ \ 52 53 #define wait_ctx_primed(thrd, user_info) /* Create the necessary information to use the signaller stack */ \ 54 __condition_node_t waiter = { thrd, count, user_info }; /* Create the node specific to this wait operation */ \ 55 __condition_criterion_t criteria[count]; /* Create the creteria this wait operation needs to wake up */ \ 56 init_push( count, monitors, &waiter, criteria ); /* Link everything together and push it to the AS-Stack */ \ 57 58 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 59 monitor_desc ** monitors = mons; /* Save the targeted monitors */ \ 60 unsigned short count = cnt; /* Save the count to a local variable */ \ 61 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ 62 spinlock * locks [ count ]; /* We need to pass-in an array of locks to BlockInternal */ \ 63 40 64 //----------------------------------------------------------------------------- 41 65 // Enter/Leave routines … … 43 67 44 68 extern "C" { 45 void __enter_monitor_desc( monitor_desc * this ) { 69 // Enter single monitor 70 static void __enter_monitor_desc( monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) { 71 // Lock the monitor spinlock, lock_yield to reduce contention 46 72 lock_yield( &this->lock DEBUG_CTX2 ); 47 73 thread_desc * thrd = this_thread; 48 74 49 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 50 75 this->accepted_index = -1; 51 76 if( !this->owner ) { 52 // No one has the monitor, just take it77 // No one has the monitor, just take it 53 78 set_owner( this, thrd ); 54 79 } 55 80 else if( this->owner == thrd) { 56 // We already have the monitor, just not how many times we took it81 // We already have the monitor, just not how many times we took it 57 82 verify( this->recursion > 0 ); 58 83 this->recursion += 1; 59 84 } 85 else if( (this->accepted_index = is_accepted( thrd, this, group, group_cnt, func)) >= 0 ) { 86 // Some one was waiting for us, enter 87 set_owner( this, thrd ); 88 } 60 89 else { 61 // Some one else has the monitor, wait in line for it90 // Some one else has the monitor, wait in line for it 62 91 append( &this->entry_queue, thrd ); 63 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);64 92 BlockInternal( &this->lock ); 65 93 66 // BlockInternal will unlock spinlock, no need to unlock ourselves94 // BlockInternal will unlock spinlock, no need to unlock ourselves 67 95 return; 68 96 } 69 97 98 // Release the lock and leave 70 99 unlock( &this->lock ); 71 100 return; 72 101 } 73 102 74 // leave pseudo code : 75 // TODO 103 // Leave single monitor 76 104 void __leave_monitor_desc( monitor_desc * this ) { 105 // Lock the monitor spinlock, lock_yield to reduce contention 77 106 lock_yield( &this->lock DEBUG_CTX2 ); 78 107 79 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);80 108 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion ); 81 109 82 // Leaving a recursion level, decrement the counter110 // Leaving a recursion level, decrement the counter 83 111 this->recursion -= 1; 84 112 85 // If we haven't left the last level of recursion86 // it means we don't need to do anything113 // If we haven't left the last level of recursion 114 // it means we don't need to do anything 87 115 if( this->recursion != 0) { 88 116 unlock( &this->lock ); … … 90 118 } 91 119 120 // Get the next thread, will be null on low contention monitor 92 121 thread_desc * new_owner = next_thread( this ); 93 122 94 // We can now let other threads in safely123 // We can now let other threads in safely 95 124 unlock( &this->lock ); 96 97 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);98 125 99 126 //We need to wake-up the thread … … 101 128 } 102 129 130 // Leave the thread monitor 131 // last routine called by a thread. 132 // Should never return 103 133 void __leave_thread_monitor( thread_desc * thrd ) { 104 134 monitor_desc * this = &thrd->mon; 135 136 // Lock the monitor now 105 137 lock_yield( &this->lock DEBUG_CTX2 ); 106 138 … … 111 143 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion ); 112 144 113 // Leaving a recursion level, decrement the counter145 // Leaving a recursion level, decrement the counter 114 146 this->recursion -= 1; 115 147 116 //If we haven't left the last level of recursion 117 //it means we don't need to do anything 118 if( this->recursion != 0) { 119 unlock( &this->lock ); 120 return; 121 } 122 148 // If we haven't left the last level of recursion 149 // it must mean there is an error 150 if( this->recursion != 0) { abortf("Thread internal monitor has unbalanced recursion"); } 151 152 // Fetch the next thread, can be null 123 153 thread_desc * new_owner = next_thread( this ); 124 154 155 // Leave the thread, this will unlock the spinlock 156 // Use leave thread instead of BlockInternal which is 157 // specialized for this case and supports null new_owner 125 158 LeaveThread( &this->lock, new_owner ); 126 } 127 } 128 129 static inline void enter(monitor_desc ** monitors, int count) { 159 160 // Control flow should never reach here! 161 } 162 } 163 164 // Enter multiple monitor 165 // relies on the monitor array being sorted 166 static inline void enter(monitor_desc ** monitors, int count, void (*func)() ) { 130 167 for(int i = 0; i < count; i++) { 131 __enter_monitor_desc( monitors[i] ); 132 } 133 } 134 168 __enter_monitor_desc( monitors[i], monitors, count, func ); 169 } 170 171 int acc_idx = monitors[0]->accepted_index; 172 if( acc_idx >= 0 && monitors[0]->acceptables[ acc_idx ].run_preaccept ) { 173 assert( monitors[0]->pre_accept ); 174 monitors[0]->pre_accept(); 175 } 176 } 177 178 // Leave multiple monitor 179 // relies on the monitor array being sorted 135 180 static inline void leave(monitor_desc ** monitors, int count) { 136 181 for(int i = count - 1; i >= 0; i--) { … … 139 184 } 140 185 141 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count ) { 186 // Ctor for monitor guard 187 // Sorts monitors before entering 188 void ?{}( monitor_guard_t * this, monitor_desc ** m, int count, void (*func)() ) { 189 // Store current array 142 190 this->m = m; 143 191 this->count = count; 192 193 // Sort monitors based on address -> TODO use a sort specialized for small numbers 144 194 qsort(this->m, count); 145 enter( this->m, this->count ); 146 195 196 // Enter the monitors in order 197 enter( this->m, this->count, func ); 198 199 // Save previous thread context 147 200 this->prev_mntrs = this_thread->current_monitors; 148 201 this->prev_count = this_thread->current_monitor_count; 149 202 203 // Update thread context (needed for conditions) 150 204 this_thread->current_monitors = m; 151 205 this_thread->current_monitor_count = count; 152 206 } 153 207 208 // Dtor for monitor guard 154 209 void ^?{}( monitor_guard_t * this ) { 210 // Leave the monitors in order 155 211 leave( this->m, this->count ); 156 212 213 // Restore thread context 157 214 this_thread->current_monitors = this->prev_mntrs; 158 215 this_thread->current_monitor_count = this->prev_count; 159 216 } 217 218 //----------------------------------------------------------------------------- 219 // Internal scheduling types 160 220 161 221 void ?{}(__condition_node_t * this, thread_desc * waiting_thread, unsigned short count, uintptr_t user_info ) { … … 183 243 // Internal scheduling 184 244 void wait( condition * this, uintptr_t user_info = 0 ) { 185 // LIB_DEBUG_PRINT_SAFE("Waiting\n");186 187 245 brand_condition( this ); 188 246 189 // Check that everything is as expected247 // Check that everything is as expected 190 248 assertf( this->monitors != NULL, "Waiting with no monitors (%p)", this->monitors ); 191 249 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count ); 192 250 verifyf( this->monitor_count < 32u, "Excessive monitor count (%i)", this->monitor_count ); 193 251 194 unsigned short count = this->monitor_count; 195 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 196 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 197 198 // LIB_DEBUG_PRINT_SAFE("count %i\n", count); 199 200 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info }; 201 202 __condition_criterion_t criteria[count]; 203 for(int i = 0; i < count; i++) { 204 (&criteria[i]){ this->monitors[i], &waiter }; 205 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 206 } 207 208 waiter.criteria = criteria; 252 // Create storage for monitor context 253 monitor_ctx( this->monitors, this->monitor_count ); 254 255 // Create the node specific to this wait operation 256 wait_ctx( this_thread, user_info ); 257 258 // Append the current wait operation to the ones already queued on the condition 259 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 209 260 append( &this->blocked, &waiter ); 210 261 211 lock_all( this->monitors, locks, count ); 212 save_recursion( this->monitors, recursions, count ); 213 //DON'T unlock, ask the kernel to do it 214 215 //Find the next thread(s) to run 262 // Lock all monitors (aggregates the lock them as well) 263 lock_all( monitors, locks, count ); 264 265 // DON'T unlock, ask the kernel to do it 266 267 // Save monitor state 268 save_recursion( monitors, recursions, count ); 269 270 // Find the next thread(s) to run 216 271 unsigned short thread_count = 0; 217 272 thread_desc * threads[ count ]; … … 220 275 } 221 276 277 // Remove any duplicate threads 222 278 for( int i = 0; i < count; i++) { 223 thread_desc * new_owner = next_thread( this->monitors[i] );279 thread_desc * new_owner = next_thread( monitors[i] ); 224 280 thread_count = insert_unique( threads, thread_count, new_owner ); 225 281 } 226 227 // LIB_DEBUG_PRINT_SAFE("Will unblock: ");228 for(int i = 0; i < thread_count; i++) {229 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);230 }231 // LIB_DEBUG_PRINT_SAFE("\n");232 282 233 283 // Everything is ready to go to sleep … … 235 285 236 286 237 // WE WOKE UP238 239 240 // We are back, restore the owners and recursions287 // WE WOKE UP 288 289 290 // We are back, restore the owners and recursions 241 291 lock_all( locks, count ); 242 restore_recursion( this->monitors, recursions, count );292 restore_recursion( monitors, recursions, count ); 243 293 unlock_all( locks, count ); 244 294 } 245 295 246 296 bool signal( condition * this ) { 247 if( is_empty( this ) ) { 248 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n"); 249 return false; 250 } 297 if( is_empty( this ) ) { return false; } 251 298 252 299 //Check that everything is as expected 253 300 verify( this->monitors ); 254 301 verify( this->monitor_count != 0 ); 255 256 unsigned short count = this->monitor_count;257 302 258 303 //Some more checking in debug … … 261 306 if ( this->monitor_count != this_thrd->current_monitor_count ) { 262 307 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count ); 263 } // if308 } 264 309 265 310 for(int i = 0; i < this->monitor_count; i++) { 266 311 if ( this->monitors[i] != this_thrd->current_monitors[i] ) { 267 312 abortf( "Signal on condition %p made with different monitor, expected %p got %i", this, this->monitors[i], this_thrd->current_monitors[i] ); 268 } // if313 } 269 314 } 270 315 ); 271 316 272 //Lock all the monitors 317 unsigned short count = this->monitor_count; 318 319 // Lock all monitors 273 320 lock_all( this->monitors, NULL, count ); 274 // LIB_DEBUG_PRINT_SAFE("Signalling");275 321 276 322 //Pop the head of the waiting queue … … 280 326 for(int i = 0; i < count; i++) { 281 327 __condition_criterion_t * crit = &node->criteria[i]; 282 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target);283 328 assert( !crit->ready ); 284 329 push( &crit->target->signal_stack, crit ); 285 330 } 286 331 287 // LIB_DEBUG_PRINT_SAFE("\n");288 289 332 //Release 290 333 unlock_all( this->monitors, count ); … … 294 337 295 338 bool signal_block( condition * this ) { 296 if( !this->blocked.head ) { 297 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n"); 298 return false; 299 } 339 if( !this->blocked.head ) { return false; } 300 340 301 341 //Check that everything is as expected … … 303 343 verifyf( this->monitor_count != 0, "Waiting with 0 monitors (%i)", this->monitor_count ); 304 344 305 unsigned short count = this->monitor_count; 306 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 307 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 308 309 lock_all( this->monitors, locks, count ); 310 311 //create creteria 312 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 }; 313 314 __condition_criterion_t criteria[count]; 315 for(int i = 0; i < count; i++) { 316 (&criteria[i]){ this->monitors[i], &waiter }; 317 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 318 push( &criteria[i].target->signal_stack, &criteria[i] ); 319 } 320 321 waiter.criteria = criteria; 345 // Create storage for monitor context 346 monitor_ctx( this->monitors, this->monitor_count ); 347 348 // Lock all monitors (aggregates the locks them as well) 349 lock_all( monitors, locks, count ); 350 351 // Create the node specific to this wait operation 352 wait_ctx_primed( this_thread, 0 ) 322 353 323 354 //save contexts 324 save_recursion( this->monitors, recursions, count );355 save_recursion( monitors, recursions, count ); 325 356 326 357 //Find the thread to run 327 358 thread_desc * signallee = pop_head( &this->blocked )->waiting_thread; 328 359 for(int i = 0; i < count; i++) { 329 set_owner( this->monitors[i], signallee ); 330 } 331 332 LIB_DEBUG_PRINT_SAFE( "Waiting on signal block\n" ); 360 set_owner( monitors[i], signallee ); 361 } 333 362 334 363 //Everything is ready to go to sleep … … 336 365 337 366 338 339 340 LIB_DEBUG_PRINT_SAFE( "Back from signal block\n" ); 367 // WE WOKE UP 368 341 369 342 370 //We are back, restore the owners and recursions 343 371 lock_all( locks, count ); 344 restore_recursion( this->monitors, recursions, count );372 restore_recursion( monitors, recursions, count ); 345 373 unlock_all( locks, count ); 346 374 … … 348 376 } 349 377 378 // Access the user_info of the thread waiting at the front of the queue 350 379 uintptr_t front( condition * this ) { 351 380 verifyf( !is_empty(this), … … 358 387 //----------------------------------------------------------------------------- 359 388 // Internal scheduling 360 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) { 361 // thread_desc * this = this_thread; 362 363 // unsigned short count = this->current_monitor_count; 364 // unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 365 // spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 366 367 // lock_all( this->current_monitors, locks, count ); 368 369 370 371 372 373 // // // Everything is ready to go to sleep 374 // // BlockInternal( locks, count, threads, thread_count ); 375 376 377 // //WE WOKE UP 378 379 380 // //We are back, restore the owners and recursions 381 // lock_all( locks, count ); 382 // restore_recursion( this->monitors, recursions, count ); 383 // unlock_all( locks, count ); 389 int __accept_internal( unsigned short acc_count, __acceptable_t * acceptables ) { 390 thread_desc * thrd = this_thread; 391 392 // Create storage for monitor context 393 monitor_ctx( acceptables->monitors, acceptables->count ); 394 395 // Lock all monitors (aggregates the lock them as well) 396 lock_all( monitors, locks, count ); 397 398 // Create the node specific to this wait operation 399 wait_ctx_primed( thrd, 0 ); 400 401 // Check if the entry queue 402 thread_desc * next = search_entry_queue( acceptables, acc_count, monitors, count ); 403 404 if( !next ) { 405 // Update acceptables on the current monitors 406 for(int i = 0; i < count; i++) { 407 monitors[i]->acceptables = acceptables; 408 monitors[i]->acceptable_count = acc_count; 409 } 410 } 411 412 save_recursion( monitors, recursions, count ); 413 414 // Everything is ready to go to sleep 415 BlockInternal( locks, count, &next, next ? 1 : 0 ); 416 417 418 //WE WOKE UP 419 420 421 //We are back, restore the owners and recursions 422 lock_all( locks, count ); 423 restore_recursion( monitors, recursions, count ); 424 int acc_idx = monitors[0]->accepted_index; 425 unlock_all( locks, count ); 426 427 return acc_idx; 384 428 } 385 429 … … 415 459 } 416 460 461 static inline int is_accepted( thread_desc * owner, monitor_desc * this, monitor_desc ** group, int group_cnt, void (*func)() ) { 462 __acceptable_t* accs = this->acceptables; // Optim 463 int acc_cnt = this->acceptable_count; 464 465 // Check if there are any acceptable functions 466 if( !accs ) return -1; 467 468 // If this isn't the first monitor to test this, there is no reason to repeat the test. 469 if( this != group[0] ) return group[0]->accepted_index; 470 471 // For all acceptable functions check if this is the current function. 472 OUT_LOOP: 473 for( int i = 0; i < acc_cnt; i++ ) { 474 __acceptable_t * acc = &accs[i]; 475 476 // if function matches, check the monitors 477 if( acc->func == func ) { 478 479 // If the group count is different then it can't be a match 480 if( acc->count != group_cnt ) return -1; 481 482 // Check that all the monitors match 483 for( int j = 0; j < group_cnt; j++ ) { 484 // If not a match, check next function 485 if( acc->monitors[j] != group[j] ) continue OUT_LOOP; 486 } 487 488 // It's a complete match, accept the call 489 return i; 490 } 491 } 492 493 // No function matched 494 return -1; 495 } 496 497 static inline void init( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) { 498 for(int i = 0; i < count; i++) { 499 (&criteria[i]){ monitors[i], waiter }; 500 } 501 502 waiter->criteria = criteria; 503 } 504 505 static inline void init_push( int count, monitor_desc ** monitors, __condition_node_t * waiter, __condition_criterion_t * criteria ) { 506 for(int i = 0; i < count; i++) { 507 (&criteria[i]){ monitors[i], waiter }; 508 push( &criteria[i].target->signal_stack, &criteria[i] ); 509 } 510 511 waiter->criteria = criteria; 512 } 513 417 514 static inline void lock_all( spinlock ** locks, unsigned short count ) { 418 515 for( int i = 0; i < count; i++ ) { … … 505 602 } 506 603 604 static inline thread_desc * search_entry_queue( __acceptable_t * acceptables, int acc_count, monitor_desc ** monitors, int count ) { 605 return NULL; 606 } 607 507 608 void ?{}( __condition_blocked_queue_t * this ) { 508 609 this->head = NULL;
Note: See TracChangeset
for help on using the changeset viewer.