Changeset d5d3a90
- Timestamp:
- Aug 3, 2020, 10:34:10 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 60062be
- Parents:
- 1a39a5a
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r1a39a5a rd5d3a90 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jul 27 23:16:18202013 // Update Count : 8 1512 // Last Modified On : Mon Aug 3 19:01:22 2020 13 // Update Count : 828 14 14 // 15 15 … … 222 222 223 223 // Bucket size must be multiple of 16. 224 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 224 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 225 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 225 226 static const unsigned int bucketSizes[] @= { // different bucket sizes 226 16 , 32, 48, 64 + sizeof(HeapManager.Storage), // 4227 96 , 112, 128 + sizeof(HeapManager.Storage), // 3227 16 + sizeof(HeapManager.Storage), 32 + sizeof(HeapManager.Storage), 48 + sizeof(HeapManager.Storage), 64 + sizeof(HeapManager.Storage), // 4 228 96 + sizeof(HeapManager.Storage), 112 + sizeof(HeapManager.Storage), 128 + sizeof(HeapManager.Storage), // 3 228 229 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4 229 230 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4 … … 434 435 #endif // __CFA_DEBUG__ 435 436 header = realHeader( header ); // backup from fake to real header 437 } else { 438 alignment = 0; 436 439 } // if 437 440 } // fakeHeader … … 481 484 unlock( extlock ); 482 485 errno = ENOMEM; 483 return 0p; 486 // return 0p; 487 abort( "no memory" ); 484 488 } // if 485 489 #ifdef __STATISTICS__ … … 550 554 551 555 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 552 if ( unlikely( block == 0p ) ) return 0p;556 // if ( unlikely( block == 0p ) ) return 0p; 553 557 #if BUCKETLOCK == SPINLOCK 554 558 } else { … … 746 750 747 751 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 748 //assert( heapManager.heapBegin != 0 ); 749 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ? 752 verify( heapManager.heapBegin != 0 ); // called before memory_startup ? 753 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 754 750 755 #if __SIZEOF_POINTER__ == 8 751 756 verify( size < ((typeof(size_t))1 << 48) ); 752 757 #endif // __SIZEOF_POINTER__ == 8 753 void * addr = doMalloc( size ); 754 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX 755 return addr; 758 return doMalloc( size ); 756 759 } // mallocNoStats 757 760 … … 759 762 static inline void * callocNoStats( size_t dim, size_t elemSize ) { 760 763 size_t size = dim * elemSize; 764 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 761 765 char * addr = (char *)mallocNoStats( size ); 762 if ( unlikely( addr == 0p ) ) return 0p;763 766 764 767 HeapManager.Storage.Header * header; 765 768 HeapManager.FreeHeader * freeElem; 766 769 size_t bsize, alignment; 767 bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment ); 770 #ifndef __CFA_DEBUG__ 771 bool mapped = 772 #endif // __CFA_DEBUG__ 773 headers( "calloc", addr, header, freeElem, bsize, alignment ); 768 774 #ifndef __CFA_DEBUG__ 769 775 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 770 776 if ( ! mapped ) 771 777 #endif // __CFA_DEBUG__ 772 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works. 773 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size) 778 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined 774 779 // `-header`-addr `-size 775 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) );// set to zeros780 memset( addr, '\0', size ); // set to zeros 776 781 777 782 header->kind.real.blockSize |= 2; // mark as zero filled … … 781 786 782 787 static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics 788 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 789 783 790 #ifdef __CFA_DEBUG__ 784 791 checkAlign( alignment ); // check alignment … … 798 805 // add sizeof(Storage) for fake header 799 806 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 800 if ( unlikely( addr == 0p ) ) return addr;801 807 802 808 // address in the block of the "next" alignment address … … 819 825 static inline void * cmemalignNoStats( size_t alignment, size_t dim, size_t elemSize ) { 820 826 size_t size = dim * elemSize; 827 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 821 828 char * addr = (char *)memalignNoStats( alignment, size ); 822 if ( unlikely( addr == 0p ) ) return 0p; 829 823 830 HeapManager.Storage.Header * header; 824 831 HeapManager.FreeHeader * freeElem; … … 890 897 891 898 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 892 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases899 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 893 900 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 894 901 … … 902 909 if ( oalign == 0 && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 903 910 header->kind.real.blockSize &= -2; // no alignment and turn off 0 fill 904 if ( size != odsize ) header->kind.real.size = size;// reset allocation size911 header->kind.real.size = size; // reset allocation size 905 912 return oaddr; 906 913 } // if … … 908 915 // change size, DO NOT preserve STICKY PROPERTIES. 909 916 free( oaddr ); 910 void * naddr = mallocNoStats( size ); // create new area 911 return naddr; 917 return mallocNoStats( size ); // create new area 912 918 } // resize 913 919 … … 922 928 923 929 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 924 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases930 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 925 931 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 926 932 … … 931 937 932 938 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 933 if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size 934 if ( size != odsize ) header->kind.real.size = size; // reset allocation size 939 size_t osize = header->kind.real.size; // old allocation size 940 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled 941 if ( unlikely( size <= odsize ) && size > odsize / 2 ) { // allow up to 50% wasted storage 942 header->kind.real.size = size; // reset allocation size 943 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? 944 memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage 945 } // if 935 946 return oaddr; 936 947 } // if … … 939 950 940 951 void * naddr; 941 if ( unlikely( oalign != 0 ) ) { // previous request memalign? 942 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 943 naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area 944 } else { 945 naddr = memalignNoStats( oalign, size ); // create new aligned area 952 if ( likely( oalign == 0 ) ) { // previous request memalign? 953 naddr = mallocNoStats( size ); // create new area 954 } else { 955 naddr = memalignNoStats( oalign, size ); // create new aligned area 956 } // if 957 958 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 959 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes 960 free( oaddr ); 961 962 if ( unlikely( ozfill ) ) { // previous request zero fill ? 963 header->kind.real.blockSize |= 2; // mark new request as zero filled 964 if ( size > osize ) { // previous request larger ? 965 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage 946 966 } // if 947 } else { 948 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 949 naddr = callocNoStats( 1, size ); // create new area 950 } else { 951 naddr = mallocNoStats( size ); // create new area 952 } // if 953 } // if 954 if ( unlikely( naddr == 0p ) ) return 0p; 955 956 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 957 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 958 // To preserve prior fill, the entire bucket must be copied versus the size. 959 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 960 free( oaddr ); 967 } // if 961 968 return naddr; 962 969 } // realloc … … 1008 1015 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment 1009 1016 * memptr = memalign( alignment, size ); 1010 if ( unlikely( * memptr == 0p ) ) return ENOMEM;1011 1017 return 0; 1012 1018 } // posix_memalign … … 1206 1212 1207 1213 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1208 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases1214 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1209 1215 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1210 1211 1216 1212 1217 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum … … 1235 1240 // change size 1236 1241 1237 void * naddr; 1238 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1239 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1240 } else { 1241 naddr = memalignNoStats( nalign, size ); // create new aligned area 1242 } // if 1243 1242 void * naddr = memalignNoStats( nalign, size ); // create new aligned area 1244 1243 free( oaddr ); 1245 1244 return naddr; … … 1258 1257 size_t bsize, oalign = 0; 1259 1258 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1260 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket1261 1259 1262 1260 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match … … 1274 1272 #endif // __STATISTICS__ 1275 1273 1274 size_t osize = header->kind.real.size; // old allocation size 1275 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled 1276 1276 1277 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1277 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases1278 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1278 1279 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1279 1280 … … 1286 1287 1287 1288 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1288 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage available in bucket 1289 // To preserve prior fill, the entire bucket must be copied versus the size. 1290 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 1289 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes 1291 1290 free( oaddr ); 1291 1292 if ( unlikely( ozfill ) ) { // previous request zero fill ? 1293 header->kind.real.blockSize |= 2; // mark new request as zero filled 1294 if ( size > osize ) { // previous request larger ? 1295 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage 1296 } // if 1297 } // if 1292 1298 return naddr; 1293 1299 } // realloc
Note: See TracChangeset
for help on using the changeset viewer.