Changeset 28d73c1 for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Jul 20, 2020, 3:30:25 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 124c1b7
- Parents:
- 3f1d9b5
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r3f1d9b5 r28d73c1 228 228 static void * __invoke_processor(void * arg); 229 229 230 void ?{}(processor & this, const char name[], cluster & _cltr) with( this ) {230 static init(processor & this, const char name[], cluster & _cltr) with( this ) { 231 231 this.name = name; 232 232 this.cltr = &_cltr; 233 233 id = -1u; 234 terminated{ 0 };235 234 destroyer = 0p; 236 235 do_terminate = false; 237 236 preemption_alarm = 0p; 238 237 pending_preemption = false; 239 runner.proc = &this;240 238 241 239 #if !defined(__CFA_NO_STATISTICS__) … … 244 242 #endif 245 243 246 idle{}; 244 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 245 246 id = doregister((__processor_id_t*)&this); 247 248 // Lock the RWlock so no-one pushes/pops while we are changing the queue 249 uint_fast32_t last_size = ready_mutate_lock(); 250 251 // Adjust the ready queue size 252 ready_queue_grow( cltr ); 253 254 // Unlock the RWlock 255 ready_mutate_unlock( last_size ); 256 257 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 258 } 259 260 // Not a ctor, it just preps the destruction but should not destroy members 261 void deinit(processor & this) { 262 // Lock the RWlock so no-one pushes/pops while we are changing the queue 263 uint_fast32_t last_size = ready_mutate_lock(); 264 265 // Adjust the ready queue size 266 ready_queue_shrink( this.cltr ); 267 268 // Make sure we aren't on the idle queue 269 unsafe_remove( this.cltr->idles, &this ); 270 271 // Unlock the RWlock 272 ready_mutate_unlock( last_size ); 273 274 // Finally we don't need the read_lock any more 275 unregister((__processor_id_t*)&this); 276 } 277 278 void ?{}(processor & this, const char name[], cluster & _cltr) { 279 ( this.idle ){}; 280 ( this.terminated ){ 0 }; 281 ( this.runner ){}; 282 init( this, name, _cltr ); 247 283 248 284 __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this); 249 285 250 286 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 251 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 252 253 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 287 254 288 } 255 289 … … 269 303 270 304 free( this.stack ); 305 306 deinit( this ); 271 307 272 308 __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); … … 318 354 319 355 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 320 321 // register the processor unless it's the main thread which is handled in the boot sequence 322 if(this != mainProcessor) { 323 this->id = doregister((__processor_id_t*)this); 324 #if !defined(__CFA_NO_STATISTICS__) 325 if( this->print_halts ) { 326 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->id, this->name, (void*)this); 327 } 328 #endif 329 330 // Lock the RWlock so no-one pushes/pops while we are changing the queue 331 uint_fast32_t last_size = ready_mutate_lock(); 332 333 // Adjust the ready queue size 334 ready_queue_grow( this->cltr ); 335 336 // Unlock the RWlock 337 ready_mutate_unlock( last_size ); 338 } 356 #if !defined(__CFA_NO_STATISTICS__) 357 if( this->print_halts ) { 358 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->id, this->name, (void*)this); 359 } 360 #endif 339 361 340 362 { … … 375 397 V( this->terminated ); 376 398 377 // unregister the processor unless it's the main thread which is handled in the boot sequence 378 if(this != mainProcessor) { 379 // Lock the RWlock so no-one pushes/pops while we are changing the queue 380 uint_fast32_t last_size = ready_mutate_lock(); 381 382 // Adjust the ready queue size 383 ready_queue_shrink( this->cltr ); 384 385 // Make sure we aren't on the idle queue 386 #if !defined(__CFA_NO_STATISTICS__) 387 bool removed = 388 #endif 389 unsafe_remove( this->cltr->idles, this ); 390 391 #if !defined(__CFA_NO_STATISTICS__) 392 if(removed) __tls_stats()->ready.sleep.exits++; 393 #endif 394 395 // Unlock the RWlock 396 ready_mutate_unlock( last_size ); 397 398 // Finally we don't need the read_lock any more 399 unregister((__processor_id_t*)this); 400 } 401 else { 399 if(this == mainProcessor) { 402 400 // HACK : the coroutine context switch expects this_thread to be set 403 401 // and it make sense for it to be set in all other cases except here … … 859 857 860 858 void ?{}(processor & this) with( this ) { 861 name = "Main Processor"; 862 cltr = mainCluster; 863 terminated{ 0 }; 864 do_terminate = false; 865 preemption_alarm = 0p; 866 pending_preemption = false; 859 ( this.idle ){}; 860 ( this.terminated ){ 0 }; 861 ( this.runner ){}; 862 init( this, "Main Processor", *mainCluster ); 867 863 kernel_thread = pthread_self(); 868 id = -1u;869 870 #if !defined(__CFA_NO_STATISTICS__)871 print_stats = false;872 print_halts = false;873 #endif874 864 875 865 runner{ &this }; 876 866 __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner); 877 878 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );879 867 } 880 868 … … 883 871 mainProcessor = (processor *)&storage_mainProcessor; 884 872 (*mainProcessor){}; 885 886 mainProcessor->id = doregister( (__processor_id_t*)mainProcessor);887 873 888 874 //initialize the global state variables … … 944 930 kernel_stop_preemption(); 945 931 946 unregister((__processor_id_t*)mainProcessor);947 948 932 // Destroy the main processor and its context in reverse order of construction 949 933 // These were manually constructed so we need manually destroy them 950 934 void ^?{}(processor & this) with( this ){ 935 deinit( this ); 936 951 937 /* paranoid */ verify( this.do_terminate == true ); 952 938 __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );
Note: See TracChangeset
for help on using the changeset viewer.