- Timestamp:
- Dec 16, 2020, 4:01:57 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 8ba363e, c8025a21
- Parents:
- b3c8496 (diff), 3e5dd913 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/coroutine.cfa
rb3c8496 r53449a4 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Oct 23 23:05:24 202013 // Update Count : 2 212 // Last Modified On : Tue Dec 15 12:06:04 2020 13 // Update Count : 23 14 14 // 15 15 … … 28 28 #include "kernel_private.hfa" 29 29 #include "exception.hfa" 30 #include "math.hfa" 31 32 #define CFA_COROUTINE_USE_MMAP 0 30 33 31 34 #define __CFA_INVOKE_PRIVATE__ … … 85 88 static const size_t MinStackSize = 1000; 86 89 extern size_t __page_size; // architecture pagesize HACK, should go in proper runtime singleton 90 extern int __map_prot; 87 91 88 92 void __stack_prepare( __stack_info_t * this, size_t create_size ); 93 void __stack_clean ( __stack_info_t * this ); 89 94 90 95 //----------------------------------------------------------------------------- … … 107 112 bool userStack = ((intptr_t)this.storage & 0x1) != 0; 108 113 if ( ! userStack && this.storage ) { 109 __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage; 110 *istorage &= (intptr_t)-1; 111 112 void * storage = this.storage->limit; 113 __cfaabi_dbg_debug_do( 114 storage = (char*)(storage) - __page_size; 115 if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) { 116 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 117 } 118 ); 119 __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage); 120 free( storage ); 114 __stack_clean( &this ); 121 115 } 122 116 } … … 167 161 assert(__page_size != 0l); 168 162 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; 163 size = ceiling(size, __page_size); 169 164 170 165 // If we are running debug, we also need to allocate a guardpage to catch stack overflows. 171 166 void * storage; 172 __cfaabi_dbg_debug_do( 173 storage = memalign( __page_size, size + __page_size ); 174 ); 175 __cfaabi_dbg_no_debug_do( 176 storage = (void*)malloc(size); 177 ); 178 167 #if CFA_COROUTINE_USE_MMAP 168 storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 169 if(storage == ((void*)-1)) { 170 abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); 171 } 172 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 173 abort( "coroutine stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 174 } // if 175 storage = (void *)(((intptr_t)storage) + __page_size); 176 #else 177 __cfaabi_dbg_debug_do( 178 storage = memalign( __page_size, size + __page_size ); 179 ); 180 __cfaabi_dbg_no_debug_do( 181 storage = (void*)malloc(size); 182 ); 183 184 __cfaabi_dbg_debug_do( 185 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 186 abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 187 } 188 storage = (void *)(((intptr_t)storage) + __page_size); 189 ); 190 #endif 179 191 __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size); 180 __cfaabi_dbg_debug_do(181 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {182 abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );183 }184 storage = (void *)(((intptr_t)storage) + __page_size);185 );186 192 187 193 verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul ); 188 194 return [storage, size]; 195 } 196 197 void __stack_clean ( __stack_info_t * this ) { 198 size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t); 199 void * storage = this->storage->limit; 200 201 #if CFA_COROUTINE_USE_MMAP 202 storage = (void *)(((intptr_t)storage) - __page_size); 203 if(munmap(storage, size + __page_size) == -1) { 204 abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) ); 205 } 206 #else 207 __cfaabi_dbg_debug_do( 208 storage = (char*)(storage) - __page_size; 209 if ( mprotect( storage, __page_size, __map_prot ) == -1 ) { 210 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 211 } 212 ); 213 214 free( storage ); 215 #endif 216 __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage); 189 217 } 190 218 … … 210 238 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize ); 211 239 212 this->storage = (__stack_t *)((intptr_t)storage + size );240 this->storage = (__stack_t *)((intptr_t)storage + size - sizeof(__stack_t)); 213 241 this->storage->limit = storage; 214 this->storage->base = (void*)((intptr_t)storage + size );242 this->storage->base = (void*)((intptr_t)storage + size - sizeof(__stack_t)); 215 243 this->storage->exception_context.top_resume = 0p; 216 244 this->storage->exception_context.current_exception = 0p; -
libcfa/src/concurrency/coroutine.hfa
rb3c8496 r53449a4 102 102 } 103 103 104 extern void __stack_prepare ( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 104 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 105 extern void __stack_clean ( __stack_info_t * this ); 106 105 107 106 108 // Suspend implementation inlined for performance … … 142 144 143 145 if( unlikely(dst->context.SP == 0p) ) { 144 active_thread()->curr_cor = dst;145 146 __stack_prepare(&dst->stack, 65000); 146 147 __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine); 147 active_thread()->curr_cor = src;148 148 } 149 149 -
libcfa/src/concurrency/io/setup.cfa
rb3c8496 r53449a4 17 17 #define _GNU_SOURCE /* See feature_test_macros(7) */ 18 18 19 #if defined(__CFA_DEBUG__) 20 // #define __CFA_DEBUG_PRINT_IO__ 21 // #define __CFA_DEBUG_PRINT_IO_CORE__ 22 #endif 23 19 24 #include "io/types.hfa" 20 25 #include "kernel.hfa" … … 111 116 112 117 void __kernel_io_startup(void) { 113 __cfa abi_dbg_print_safe("Kernel : Creating EPOLL instance\n" );118 __cfadbg_print_safe(io_core, "Kernel : Creating EPOLL instance\n" ); 114 119 115 120 iopoll.epollfd = epoll_create1(0); … … 118 123 } 119 124 120 __cfa abi_dbg_print_safe("Kernel : Starting io poller thread\n" );125 __cfadbg_print_safe(io_core, "Kernel : Starting io poller thread\n" ); 121 126 122 127 iopoll.run = true; … … 132 137 // Wait for the io poller thread to finish 133 138 134 pthread_join( iopoll.thrd, 0p ); 135 free( iopoll.stack ); 139 __destroy_pthread( iopoll.thrd, iopoll.stack, 0p ); 136 140 137 141 int ret = close(iopoll.epollfd); … … 142 146 // Io polling is now fully stopped 143 147 144 __cfa abi_dbg_print_safe("Kernel : IO poller stopped\n" );148 __cfadbg_print_safe(io_core, "Kernel : IO poller stopped\n" ); 145 149 } 146 150 … … 150 154 id.id = doregister(&id); 151 155 __cfaabi_tls.this_proc_id = &id; 152 __cfa abi_dbg_print_safe("Kernel : IO poller thread starting\n" );156 __cfadbg_print_safe(io_core, "Kernel : IO poller thread starting\n" ); 153 157 154 158 // Block signals to control when they arrive … … 185 189 } 186 190 187 __cfa abi_dbg_print_safe("Kernel : IO poller thread stopping\n" );191 __cfadbg_print_safe(io_core, "Kernel : IO poller thread stopping\n" ); 188 192 unregister(&id); 189 193 return 0p; -
libcfa/src/concurrency/kernel/startup.cfa
rb3c8496 r53449a4 29 29 #include "kernel_private.hfa" 30 30 #include "startup.hfa" // STARTUP_PRIORITY_XXX 31 #include "math.hfa" 32 33 #define CFA_PROCESSOR_USE_MMAP 0 31 34 32 35 //----------------------------------------------------------------------------- … … 114 117 } 115 118 116 size_t __page_size = 0;119 extern size_t __page_size; 117 120 118 121 //----------------------------------------------------------------------------- … … 158 161 /* paranoid */ verify( ! __preemption_enabled() ); 159 162 __cfadbg_print_safe(runtime_core, "Kernel : Starting\n"); 160 161 __page_size = sysconf( _SC_PAGESIZE );162 163 163 164 __cfa_dbg_global_clusters.list{ __get }; … … 539 540 } 540 541 542 extern size_t __page_size; 541 543 void ^?{}(processor & this) with( this ){ 542 544 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { … … 550 552 } 551 553 552 int err = pthread_join( kernel_thread, 0p ); 553 if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err)); 554 555 free( this.stack ); 554 __destroy_pthread( kernel_thread, this.stack, 0p ); 556 555 557 556 disable_interrupts(); … … 678 677 679 678 void * stack; 680 __cfaabi_dbg_debug_do( 681 stack = memalign( __page_size, stacksize + __page_size ); 682 // pthread has no mechanism to create the guard page in user supplied stack. 679 #if CFA_PROCESSOR_USE_MMAP 680 stacksize = ceiling( stacksize, __page_size ) + __page_size; 681 stack = mmap(0p, stacksize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 682 if(stack == ((void*)-1)) { 683 abort( "pthread stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); 684 } 683 685 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 684 abort( " mprotect: internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );686 abort( "pthread stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 685 687 } // if 686 ); 687 __cfaabi_dbg_no_debug_do( 688 stack = malloc( stacksize ); 689 ); 688 #else 689 __cfaabi_dbg_debug_do( 690 stack = memalign( __page_size, stacksize + __page_size ); 691 // pthread has no mechanism to create the guard page in user supplied stack. 692 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 693 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 694 } // if 695 ); 696 __cfaabi_dbg_no_debug_do( 697 stack = malloc( stacksize ); 698 ); 699 #endif 700 690 701 691 702 check( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); … … 694 705 return stack; 695 706 } 707 708 void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ) { 709 int err = pthread_join( pthread, retval ); 710 if( err != 0 ) abort("KERNEL ERROR: joining pthread %p caused error %s\n", (void*)pthread, strerror(err)); 711 712 #if CFA_PROCESSOR_USE_MMAP 713 pthread_attr_t attr; 714 715 check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 716 717 size_t stacksize; 718 // default stack size, normally defined by shell limit 719 check( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); 720 assert( stacksize >= PTHREAD_STACK_MIN ); 721 stacksize += __page_size; 722 723 if(munmap(stack, stacksize) == -1) { 724 abort( "pthread stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) ); 725 } 726 #else 727 free( stack ); 728 #endif 729 } 730 696 731 697 732 #if defined(__CFA_WITH_VERIFY__) -
libcfa/src/concurrency/kernel_private.hfa
rb3c8496 r53449a4 49 49 50 50 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 51 void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ); 51 52 52 53 -
libcfa/src/concurrency/preemption.cfa
rb3c8496 r53449a4 575 575 // Wait for the preemption thread to finish 576 576 577 pthread_join( alarm_thread, 0p ); 578 free( alarm_stack ); 577 __destroy_pthread( alarm_thread, alarm_stack, 0p ); 579 578 580 579 // Preemption is now fully stopped -
libcfa/src/heap.cfa
rb3c8496 r53449a4 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Sep 7 22:17:46202013 // Update Count : 95712 // Last Modified On : Wed Dec 16 12:28:25 2020 13 // Update Count : 1023 14 14 // 15 15 16 16 #include <unistd.h> // sbrk, sysconf 17 #include <stdlib.h> // EXIT_FAILURE 17 18 #include <stdbool.h> // true, false 18 19 #include <stdio.h> // snprintf, fileno … … 71 72 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 72 73 // the brk address is extended by the extension amount. 73 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),74 __CFA_DEFAULT_HEAP_EXPANSION__ = (10 * 1024 * 1024), 74 75 75 76 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; … … 115 116 116 117 // statically allocated variables => zero filled. 117 static size_t pageSize; // architecture pagesize 118 size_t __page_size; // architecture pagesize 119 int __map_prot; // common mmap/mprotect protection 118 120 static size_t heapExpand; // sbrk advance 119 121 static size_t mmapStart; // cross over point for mmap … … 249 251 #endif // FASTLOOKUP 250 252 251 static int mmapFd = -1;// fake or actual fd for anonymous file253 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 252 254 #ifdef __CFA_DEBUG__ 253 255 static bool heapBoot = 0; // detect recursion during boot … … 374 376 375 377 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 376 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return false;378 if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false; 377 379 mmapStart = value; // set global 378 380 … … 436 438 header = headerAddr( addr ); 437 439 438 if ( unlikely( heapEnd < addr ) ) {// mmapped ?440 if ( unlikely( addr < heapBegin || heapEnd < addr ) ) { // mmapped ? 439 441 fakeHeader( header, alignment ); 440 442 size = header->kind.real.blockSize & -3; // mmap size … … 443 445 444 446 #ifdef __CFA_DEBUG__ 445 checkHeader( addr < heapBegin, name, addr );// bad low address ?447 checkHeader( header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 446 448 #endif // __CFA_DEBUG__ 447 449 … … 464 466 } // headers 465 467 468 #ifdef __CFA_DEBUG__ 469 #if __SIZEOF_POINTER__ == 4 470 #define MASK 0xdeadbeef 471 #else 472 #define MASK 0xdeadbeefdeadbeef 473 #endif 474 #define STRIDE size_t 475 476 static void * Memset( void * addr, STRIDE size ) { // debug only 477 if ( size % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, size %zd not multiple of %zd.", size, sizeof(STRIDE) ); 478 if ( (STRIDE)addr % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, addr %p not multiple of %zd.", addr, sizeof(STRIDE) ); 479 480 STRIDE * end = (STRIDE *)addr + size / sizeof(STRIDE); 481 for ( STRIDE * p = (STRIDE *)addr; p < end; p += 1 ) *p = MASK; 482 return addr; 483 } // Memset 484 #endif // __CFA_DEBUG__ 485 486 466 487 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes." 467 488 … … 472 493 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 473 494 474 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() ); 495 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size ); 496 // Do not call abort or strerror( errno ) as they may call malloc. 475 497 if ( sbrk( increase ) == (void *)-1 ) { // failed, no memory ? 476 498 unlock( extlock ); 477 abort( NO_MEMORY_MSG, size ); // give up 499 __cfaabi_bits_print_nolock( STDERR_FILENO, NO_MEMORY_MSG, size ); 500 _exit( EXIT_FAILURE ); 501 } // if 502 if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { 503 unlock( extlock ); 504 __cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno ); 505 _exit( EXIT_FAILURE ); 478 506 } // if 479 507 #ifdef __STATISTICS__ … … 483 511 #ifdef __CFA_DEBUG__ 484 512 // Set new memory to garbage so subsequent uninitialized usages might fail. 485 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 513 memset( (char *)heapEnd + heapRemaining, '\xde', increase ); 514 //Memset( (char *)heapEnd + heapRemaining, increase ); 486 515 #endif // __CFA_DEBUG__ 487 516 rem = heapRemaining + increase - size; … … 542 571 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 543 572 } else { // large size => mmap 544 if ( unlikely( size > ULONG_MAX - pageSize ) ) return 0p;545 tsize = ceiling2( tsize, pageSize ); // must be multiple of page size573 if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p; 574 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size 546 575 #ifdef __STATISTICS__ 547 576 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST ); … … 549 578 #endif // __STATISTICS__ 550 579 551 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );580 block = (HeapManager.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 552 581 if ( block == (HeapManager.Storage *)MAP_FAILED ) { // failed ? 553 582 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory 554 583 // Do not call strerror( errno ) as it may call malloc. 555 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu err or:%d.", &heapManager, tsize, errno );584 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu errno:%d.", &heapManager, tsize, errno ); 556 585 } //if 557 586 #ifdef __CFA_DEBUG__ 558 587 // Set new memory to garbage so subsequent uninitialized usages might fail. 559 memset( block, '\377', tsize ); 588 memset( block, '\xde', tsize ); 589 //Memset( block, tsize ); 560 590 #endif // __CFA_DEBUG__ 561 591 block->header.kind.real.blockSize = tsize; // storage size for munmap … … 597 627 #endif // __STATISTICS__ 598 628 if ( munmap( header, size ) == -1 ) { 599 #ifdef __CFA_DEBUG__600 629 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n" 601 630 "Possible cause is invalid pointer.", 602 631 addr ); 603 #endif // __CFA_DEBUG__604 632 } // if 605 633 } else { 606 634 #ifdef __CFA_DEBUG__ 607 635 // Set free memory to garbage so subsequent usages might fail. 608 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) ); 636 memset( ((HeapManager.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( HeapManager.Storage ) ); 637 //Memset( ((HeapManager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) ); 609 638 #endif // __CFA_DEBUG__ 610 639 … … 648 677 for ( HeapManager.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 649 678 #else 650 // for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { 679 for(;;) { 680 // for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) { 651 681 // for ( HeapManager.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) { 652 for ( HeapManager.Storage * p ;; /* p = getNext( p )->top */) { 653 HeapManager.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works` 682 // HeapManager.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works` 654 683 // typeof(p) temp = (( p )`next)->top; // FIX ME: direct assignent fails, initialization works` 655 684 // p = temp; … … 675 704 676 705 static void ?{}( HeapManager & manager ) with( manager ) { 677 pageSize = sysconf( _SC_PAGESIZE ); 706 __page_size = sysconf( _SC_PAGESIZE ); 707 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; 678 708 679 709 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists … … 695 725 696 726 char * end = (char *)sbrk( 0 ); 697 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, libAlign()) - end ); // move start of heap to multiple of alignment727 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment 698 728 } // HeapManager 699 729 … … 713 743 #ifdef __CFA_DEBUG__ 714 744 if ( heapBoot ) { // check for recursion during system boot 715 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.716 745 abort( "boot() : internal error, recursively invoked during system boot." ); 717 746 } // if … … 935 964 header->kind.real.size = size; // reset allocation size 936 965 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? 937 memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage966 memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage 938 967 } // if 939 968 return oaddr; … … 960 989 header->kind.real.blockSize |= 2; // mark new request as zero filled 961 990 if ( size > osize ) { // previous request larger ? 962 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage991 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage 963 992 } // if 964 993 } // if … … 999 1028 return cmemalignNoStats( alignment, dim, elemSize ); 1000 1029 } // cmemalign 1030 1001 1031 1002 1032 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple … … 1017 1047 } // posix_memalign 1018 1048 1049 1019 1050 // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the 1020 1051 // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). 1021 1052 void * valloc( size_t size ) { 1022 return memalign( pageSize, size );1053 return memalign( __page_size, size ); 1023 1054 } // valloc 1024 1055 … … 1026 1057 // Same as valloc but rounds size to multiple of page size. 1027 1058 void * pvalloc( size_t size ) { 1028 return memalign( pageSize, ceiling2( size, pageSize ) );1059 return memalign( __page_size, ceiling2( size, __page_size ) ); 1029 1060 } // pvalloc 1030 1061 … … 1165 1196 choose( option ) { 1166 1197 case M_TOP_PAD: 1167 heapExpand = ceiling2( value, pageSize ); return 1;1198 heapExpand = ceiling2( value, __page_size ); return 1; 1168 1199 case M_MMAP_THRESHOLD: 1169 1200 if ( setMmapStart( value ) ) return 1; … … 1327 1358 header->kind.real.blockSize |= 2; // mark new request as zero filled 1328 1359 if ( size > osize ) { // previous request larger ? 1329 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage1360 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage 1330 1361 } // if 1331 1362 } // if -
libcfa/src/stdlib.hfa
rb3c8496 r53449a4 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Dec 8 18:27:22202013 // Update Count : 5 2412 // Last Modified On : Sat Dec 12 13:52:34 2020 13 // Update Count : 536 14 14 // 15 15 … … 49 49 50 50 static inline forall( dtype T | sized(T) ) { 51 // C forallsafe equivalents, i.e., implicit size specification51 // CFA safe equivalents, i.e., implicit size specification 52 52 53 53 T * malloc( void ) { … … 234 234 235 235 static inline forall( dtype T | sized(T) ) { 236 // C forallsafe initialization/copy, i.e., implicit size specification, non-array types236 // CFA safe initialization/copy, i.e., implicit size specification, non-array types 237 237 T * memset( T * dest, char fill ) { 238 238 return (T *)memset( dest, fill, sizeof(T) ); … … 243 243 } // memcpy 244 244 245 // C forallsafe initialization/copy, i.e., implicit size specification, array types245 // CFA safe initialization/copy, i.e., implicit size specification, array types 246 246 T * amemset( T dest[], char fill, size_t dim ) { 247 247 return (T *)(void *)memset( dest, fill, dim * sizeof(T) ); // C memset … … 253 253 } // distribution 254 254 255 // Cforall deallocation for multiple objects 255 // CFA deallocation for multiple objects 256 static inline forall( dtype T ) // FIX ME, problems with 0p in list 257 void free( T * ptr ) { 258 free( (void *)ptr ); // C free 259 } // free 256 260 static inline forall( dtype T, ttype TT | { void free( TT ); } ) 257 void free( T * addr, TT rest ) {258 free( ( void *)addr ); // use C free261 void free( T * ptr, TT rest ) { 262 free( ptr ); 259 263 free( rest ); 260 264 } // free 261 265 262 // C forallallocation/deallocation and constructor/destructor, non-array types266 // CFA allocation/deallocation and constructor/destructor, non-array types 263 267 static inline forall( dtype T | sized(T), ttype TT | { void ?{}( T &, TT ); } ) 264 268 T * new( TT p ) { … … 272 276 ^(*ptr){}; // run destructor 273 277 } // if 274 free( ptr ); 278 free( ptr ); // always call free 275 279 } // delete 276 277 280 static inline forall( dtype T, ttype TT | { void ^?{}( T & ); void delete( TT ); } ) 278 281 void delete( T * ptr, TT rest ) { … … 281 284 } // delete 282 285 283 // C forallallocation/deallocation and constructor/destructor, array types286 // CFA allocation/deallocation and constructor/destructor, array types 284 287 forall( dtype T | sized(T), ttype TT | { void ?{}( T &, TT ); } ) T * anew( size_t dim, TT p ); 285 288 forall( dtype T | sized(T) | { void ^?{}( T & ); } ) void adelete( T arr[] );
Note:
See TracChangeset
for help on using the changeset viewer.