Changeset 28d73c1 for libcfa/src
- Timestamp:
- Jul 20, 2020, 3:30:25 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 124c1b7
- Parents:
- 3f1d9b5
- Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r3f1d9b5 r28d73c1 228 228 static void * __invoke_processor(void * arg); 229 229 230 void ?{}(processor & this, const char name[], cluster & _cltr) with( this ) {230 static init(processor & this, const char name[], cluster & _cltr) with( this ) { 231 231 this.name = name; 232 232 this.cltr = &_cltr; 233 233 id = -1u; 234 terminated{ 0 };235 234 destroyer = 0p; 236 235 do_terminate = false; 237 236 preemption_alarm = 0p; 238 237 pending_preemption = false; 239 runner.proc = &this;240 238 241 239 #if !defined(__CFA_NO_STATISTICS__) … … 244 242 #endif 245 243 246 idle{}; 244 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 245 246 id = doregister((__processor_id_t*)&this); 247 248 // Lock the RWlock so no-one pushes/pops while we are changing the queue 249 uint_fast32_t last_size = ready_mutate_lock(); 250 251 // Adjust the ready queue size 252 ready_queue_grow( cltr ); 253 254 // Unlock the RWlock 255 ready_mutate_unlock( last_size ); 256 257 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 258 } 259 260 // Not a ctor, it just preps the destruction but should not destroy members 261 void deinit(processor & this) { 262 // Lock the RWlock so no-one pushes/pops while we are changing the queue 263 uint_fast32_t last_size = ready_mutate_lock(); 264 265 // Adjust the ready queue size 266 ready_queue_shrink( this.cltr ); 267 268 // Make sure we aren't on the idle queue 269 unsafe_remove( this.cltr->idles, &this ); 270 271 // Unlock the RWlock 272 ready_mutate_unlock( last_size ); 273 274 // Finally we don't need the read_lock any more 275 unregister((__processor_id_t*)&this); 276 } 277 278 void ?{}(processor & this, const char name[], cluster & _cltr) { 279 ( this.idle ){}; 280 ( this.terminated ){ 0 }; 281 ( this.runner ){}; 282 init( this, name, _cltr ); 247 283 248 284 __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this); 249 285 250 286 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 251 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); 252 253 __cfadbg_print_safe(runtime_core, "Kernel : core %p created\n", &this); 287 254 288 } 255 289 … … 269 303 270 304 free( this.stack ); 305 306 deinit( this ); 271 307 272 308 __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); … … 318 354 319 355 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 320 321 // register the processor unless it's the main thread which is handled in the boot sequence 322 if(this != mainProcessor) { 323 this->id = doregister((__processor_id_t*)this); 324 #if !defined(__CFA_NO_STATISTICS__) 325 if( this->print_halts ) { 326 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->id, this->name, (void*)this); 327 } 328 #endif 329 330 // Lock the RWlock so no-one pushes/pops while we are changing the queue 331 uint_fast32_t last_size = ready_mutate_lock(); 332 333 // Adjust the ready queue size 334 ready_queue_grow( this->cltr ); 335 336 // Unlock the RWlock 337 ready_mutate_unlock( last_size ); 338 } 356 #if !defined(__CFA_NO_STATISTICS__) 357 if( this->print_halts ) { 358 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->id, this->name, (void*)this); 359 } 360 #endif 339 361 340 362 { … … 375 397 V( this->terminated ); 376 398 377 // unregister the processor unless it's the main thread which is handled in the boot sequence 378 if(this != mainProcessor) { 379 // Lock the RWlock so no-one pushes/pops while we are changing the queue 380 uint_fast32_t last_size = ready_mutate_lock(); 381 382 // Adjust the ready queue size 383 ready_queue_shrink( this->cltr ); 384 385 // Make sure we aren't on the idle queue 386 #if !defined(__CFA_NO_STATISTICS__) 387 bool removed = 388 #endif 389 unsafe_remove( this->cltr->idles, this ); 390 391 #if !defined(__CFA_NO_STATISTICS__) 392 if(removed) __tls_stats()->ready.sleep.exits++; 393 #endif 394 395 // Unlock the RWlock 396 ready_mutate_unlock( last_size ); 397 398 // Finally we don't need the read_lock any more 399 unregister((__processor_id_t*)this); 400 } 401 else { 399 if(this == mainProcessor) { 402 400 // HACK : the coroutine context switch expects this_thread to be set 403 401 // and it make sense for it to be set in all other cases except here … … 859 857 860 858 void ?{}(processor & this) with( this ) { 861 name = "Main Processor"; 862 cltr = mainCluster; 863 terminated{ 0 }; 864 do_terminate = false; 865 preemption_alarm = 0p; 866 pending_preemption = false; 859 ( this.idle ){}; 860 ( this.terminated ){ 0 }; 861 ( this.runner ){}; 862 init( this, "Main Processor", *mainCluster ); 867 863 kernel_thread = pthread_self(); 868 id = -1u;869 870 #if !defined(__CFA_NO_STATISTICS__)871 print_stats = false;872 print_halts = false;873 #endif874 864 875 865 runner{ &this }; 876 866 __cfadbg_print_safe(runtime_core, "Kernel : constructed main processor context %p\n", &runner); 877 878 __atomic_fetch_add( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST );879 867 } 880 868 … … 883 871 mainProcessor = (processor *)&storage_mainProcessor; 884 872 (*mainProcessor){}; 885 886 mainProcessor->id = doregister( (__processor_id_t*)mainProcessor);887 873 888 874 //initialize the global state variables … … 944 930 kernel_stop_preemption(); 945 931 946 unregister((__processor_id_t*)mainProcessor);947 948 932 // Destroy the main processor and its context in reverse order of construction 949 933 // These were manually constructed so we need manually destroy them 950 934 void ^?{}(processor & this) with( this ){ 935 deinit( this ); 936 951 937 /* paranoid */ verify( this.do_terminate == true ); 952 938 __atomic_fetch_sub( &cltr->nprocessors, 1u, __ATOMIC_SEQ_CST ); -
libcfa/src/concurrency/ready_queue.cfa
r3f1d9b5 r28d73c1 186 186 //======================================================================= 187 187 void ?{}(__ready_queue_t & this) with (this) { 188 189 lanes.data = alloc(4); 190 for( i; 4 ) { 191 (lanes.data[i]){}; 192 } 193 lanes.count = 4; 194 snzi{ log2( lanes.count / 8 ) }; 188 lanes.data = 0p; 189 lanes.count = 0; 195 190 } 196 191 197 192 void ^?{}(__ready_queue_t & this) with (this) { 198 verify( 4== lanes.count );193 verify( 0 == lanes.count ); 199 194 verify( !query( snzi ) ); 200 201 ^(snzi){};202 203 for( i; 4 ) {204 ^(lanes.data[i]){};205 }206 195 free(lanes.data); 207 196 } … … 495 484 } 496 485 486 #warning remove when alloc is fixed 487 forall( dtype T | sized(T) ) 488 static inline T * correct_alloc( T ptr[], size_t dim ) { 489 if( dim == 0 ) { 490 free(ptr); 491 return 0p; 492 } 493 T * temp = alloc( dim ); 494 if(ptr) { 495 memcpy( temp, ptr, dim * sizeof(T)); 496 free(ptr); 497 } 498 return temp; 499 } 500 497 501 // Grow the ready queue 498 502 void ready_queue_grow (struct cluster * cltr) { … … 513 517 514 518 // Allocate new array (uses realloc and memcpies the data) 515 lanes.data = alloc(lanes.data, ncount);519 lanes.data = correct_alloc(lanes.data, ncount); 516 520 517 521 // Fix the moved data … … 558 562 size_t ocount = lanes.count; 559 563 // Check that we have some space left 560 if(ocount < 8) abort("Program attempted to destroy more Ready Queues than were created");564 if(ocount < 4) abort("Program attempted to destroy more Ready Queues than were created"); 561 565 562 566 // reduce the actual count so push doesn't use the old queues … … 600 604 601 605 // Allocate new array (uses realloc and memcpies the data) 602 lanes.data = alloc(lanes.data, lanes.count);606 lanes.data = correct_alloc(lanes.data, lanes.count); 603 607 604 608 // Fix the moved data -
libcfa/src/concurrency/snzi.hfa
r3f1d9b5 r28d73c1 120 120 //-------------------------------------------------- 121 121 // SNZI object 122 void ?{}( __snzi_t & this ) { 123 this.mask = 0; 124 this.root = 0; 125 this.nodes = 0p; 126 } 127 122 128 void ?{}( __snzi_t & this, unsigned depth ) with( this ) { 123 129 mask = (1 << depth) - 1;
Note: See TracChangeset
for help on using the changeset viewer.