Changeset 69ec0fb
- Timestamp:
- Apr 25, 2022, 7:00:39 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- c73213b
- Parents:
- e357efb
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
re357efb r69ec0fb 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Apr 24 09:58:01202213 // Update Count : 114 612 // Last Modified On : Mon Apr 25 18:51:36 2022 13 // Update Count : 1147 14 14 // 15 15 … … 116 116 117 117 // statically allocated variables => zero filled. 118 size_t __page_size; // architecture pagesize119 int __map_prot; // common mmap/mprotect protection120 118 static size_t heapExpand; // sbrk advance 121 119 static size_t mmapStart; // cross over point for mmap 122 120 static unsigned int maxBucketsUsed; // maximum number of buckets in use 121 // extern visibility, used by runtime kernel 122 size_t __page_size; // architecture pagesize 123 int __map_prot; // common mmap/mprotect protection 123 124 124 125 … … 540 541 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 541 542 542 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign());543 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size ); 543 544 // Do not call abort or strerror( errno ) as they may call malloc. 544 545 if ( sbrk( increase ) == (void *)-1 ) { // failed, no memory ? … … 549 550 550 551 // Make storage executable for thunks. 551 //if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) {552 //unlock( extlock );553 //__cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno );554 //_exit( EXIT_FAILURE );555 //} // if552 if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { 553 unlock( extlock ); 554 __cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno ); 555 _exit( EXIT_FAILURE ); 556 } // if 556 557 557 558 #ifdef __STATISTICS__ … … 580 581 // Look up size in the size list. Make sure the user request includes space for the header that must be allocated 581 582 // along with the block and is a multiple of the alignment size. 583 582 584 size_t tsize = size + sizeof(Heap.Storage); 583 585 … … 855 857 // SKULLDUGGERY: insert the offset to the start of the actual storage block and remember alignment 856 858 fakeHeader->kind.fake.offset = (char *)fakeHeader - (char *)RealHeader; 857 // SKULLDUGGERY: odd alignment impl es fake header859 // SKULLDUGGERY: odd alignment implies fake header 858 860 fakeHeader->kind.fake.alignment = MarkAlignmentBit( alignment ); 859 861 … … 1135 1137 // free(3). 1136 1138 int posix_memalign( void ** memptr, size_t alignment, size_t size ) { 1137 1139 if ( unlikely( alignment < libAlign() || ! is_pow2( alignment ) ) ) return EINVAL; // check alignment 1138 1140 *memptr = memalign( alignment, size ); 1139 1141 return 0; … … 1189 1191 1190 1192 1191 // Set the alignment for an the allocation and return previous alignment or 0 if no alignment.1192 // size_t malloc_alignment_set$( void * addr, size_t alignment ) {1193 // if ( unlikely( addr == 0p ) ) return libAlign(); // minimum alignment1194 // size_t ret;1195 // Heap.Storage.Header * header = HeaderAddr( addr );1196 // if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?1197 // ret = header->kind.fake.alignment & -2; // remove flag from old value1198 // header->kind.fake.alignment = alignment | 1; // add flag to new value1199 // } else {1200 // ret = 0; // => no alignment to change1201 // } // if1202 // return ret;1203 // } // malloc_alignment_set$1204 1205 1206 1193 // Returns true if the allocation is zero filled, e.g., allocated by calloc(). 1207 1194 bool malloc_zero_fill( void * addr ) { … … 1214 1201 } // malloc_zero_fill 1215 1202 1216 // Set allocation is zero filled and return previous zero filled.1217 // bool malloc_zero_fill_set$( void * addr ) {1218 // if ( unlikely( addr == 0p ) ) return false; // null allocation is not zero fill1219 // Heap.Storage.Header * header = HeaderAddr( addr );1220 // if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?1221 // header = RealHeader( header ); // backup from fake to real header1222 // } // if1223 // bool ret = (header->kind.real.blockSize & 2) != 0; // zero filled ?1224 // header->kind.real.blockSize |= 2; // mark as zero filled1225 // return ret;1226 // } // malloc_zero_fill_set$1227 1228 1203 1229 1204 // Returns original total allocation size (not bucket size) => array size is dimension * sizeof(T). … … 1236 1211 return header->kind.real.size; 1237 1212 } // malloc_size 1238 1239 // Set allocation size and return previous size.1240 // size_t malloc_size_set$( void * addr, size_t size ) {1241 // if ( unlikely( addr == 0p ) ) return 0; // null allocation has 0 size1242 // Heap.Storage.Header * header = HeaderAddr( addr );1243 // if ( (header->kind.fake.alignment & 1) == 1 ) { // fake header ?1244 // header = RealHeader( header ); // backup from fake to real header1245 // } // if1246 // size_t ret = header->kind.real.size;1247 // header->kind.real.size = size;1248 // return ret;1249 // } // malloc_size_set$1250 1213 1251 1214
Note: See TracChangeset
for help on using the changeset viewer.