Changeset 71dfe49
- Timestamp:
- Aug 4, 2020, 12:53:48 PM (3 years ago)
- Branches:
- ADT, arm-eh, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 954821df
- Parents:
- 8395152 (diff), 2ff42f4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
r8395152 r71dfe49 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jul 27 23:16:18202013 // Update Count : 8 1512 // Last Modified On : Mon Aug 3 19:01:22 2020 13 // Update Count : 828 14 14 // 15 15 … … 222 222 223 223 // Bucket size must be multiple of 16. 224 // Powers of 2 are common allocation sizes, so make powers of 2 generate the minimum required size. 224 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 225 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 225 226 static const unsigned int bucketSizes[] @= { // different bucket sizes 226 16 , 32, 48, 64 + sizeof(HeapManager.Storage), // 4227 96 , 112, 128 + sizeof(HeapManager.Storage), // 3227 16 + sizeof(HeapManager.Storage), 32 + sizeof(HeapManager.Storage), 48 + sizeof(HeapManager.Storage), 64 + sizeof(HeapManager.Storage), // 4 228 96 + sizeof(HeapManager.Storage), 112 + sizeof(HeapManager.Storage), 128 + sizeof(HeapManager.Storage), // 3 228 229 160, 192, 224, 256 + sizeof(HeapManager.Storage), // 4 229 230 320, 384, 448, 512 + sizeof(HeapManager.Storage), // 4 … … 434 435 #endif // __CFA_DEBUG__ 435 436 header = realHeader( header ); // backup from fake to real header 437 } else { 438 alignment = 0; 436 439 } // if 437 440 } // fakeHeader … … 481 484 unlock( extlock ); 482 485 errno = ENOMEM; 483 return 0p; 486 // return 0p; 487 abort( "no memory" ); 484 488 } // if 485 489 #ifdef __STATISTICS__ … … 550 554 551 555 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 552 if ( unlikely( block == 0p ) ) return 0p;556 // if ( unlikely( block == 0p ) ) return 0p; 553 557 #if BUCKETLOCK == SPINLOCK 554 558 } else { … … 746 750 747 751 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 748 //assert( heapManager.heapBegin != 0 ); 749 if ( unlikely( heapManager.heapBegin == 0p ) ) heapManager{}; // called before memory_startup ? 752 verify( heapManager.heapBegin != 0 ); // called before memory_startup ? 753 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 754 750 755 #if __SIZEOF_POINTER__ == 8 751 756 verify( size < ((typeof(size_t))1 << 48) ); 752 757 #endif // __SIZEOF_POINTER__ == 8 753 void * addr = doMalloc( size ); 754 if ( unlikely( addr == 0p ) ) errno = ENOMEM; // POSIX 755 return addr; 758 return doMalloc( size ); 756 759 } // mallocNoStats 757 760 … … 759 762 static inline void * callocNoStats( size_t dim, size_t elemSize ) { 760 763 size_t size = dim * elemSize; 764 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 761 765 char * addr = (char *)mallocNoStats( size ); 762 if ( unlikely( addr == 0p ) ) return 0p;763 766 764 767 HeapManager.Storage.Header * header; 765 768 HeapManager.FreeHeader * freeElem; 766 769 size_t bsize, alignment; 767 bool mapped __attribute__(( unused )) = headers( "calloc", addr, header, freeElem, bsize, alignment ); 770 #ifndef __CFA_DEBUG__ 771 bool mapped = 772 #endif // __CFA_DEBUG__ 773 headers( "calloc", addr, header, freeElem, bsize, alignment ); 768 774 #ifndef __CFA_DEBUG__ 769 775 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 770 776 if ( ! mapped ) 771 777 #endif // __CFA_DEBUG__ 772 // Zero entire data space even when > than size => realloc without a new allocation and zero fill works. 773 // <-------00000000000000000000000000000000000000000000000000000> bsize (bucket size) 778 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined 774 779 // `-header`-addr `-size 775 memset( addr, '\0', bsize - sizeof(HeapManager.Storage) );// set to zeros780 memset( addr, '\0', size ); // set to zeros 776 781 777 782 header->kind.real.blockSize |= 2; // mark as zero filled … … 781 786 782 787 static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics 788 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 789 783 790 #ifdef __CFA_DEBUG__ 784 791 checkAlign( alignment ); // check alignment … … 798 805 // add sizeof(Storage) for fake header 799 806 char * addr = (char *)mallocNoStats( size + alignment - libAlign() + sizeof(HeapManager.Storage) ); 800 if ( unlikely( addr == 0p ) ) return addr;801 807 802 808 // address in the block of the "next" alignment address … … 819 825 static inline void * cmemalignNoStats( size_t alignment, size_t dim, size_t elemSize ) { 820 826 size_t size = dim * elemSize; 827 if ( size == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 821 828 char * addr = (char *)memalignNoStats( alignment, size ); 822 if ( unlikely( addr == 0p ) ) return 0p; 829 823 830 HeapManager.Storage.Header * header; 824 831 HeapManager.FreeHeader * freeElem; … … 890 897 891 898 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 892 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases899 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 893 900 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 894 901 … … 902 909 if ( oalign == 0 && size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 903 910 header->kind.real.blockSize &= -2; // no alignment and turn off 0 fill 904 if ( size != odsize ) header->kind.real.size = size;// reset allocation size911 header->kind.real.size = size; // reset allocation size 905 912 return oaddr; 906 913 } // if … … 908 915 // change size, DO NOT preserve STICKY PROPERTIES. 909 916 free( oaddr ); 910 void * naddr = mallocNoStats( size ); // create new area 911 return naddr; 917 return mallocNoStats( size ); // create new area 912 918 } // resize 913 919 … … 922 928 923 929 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 924 if ( unlikely( size == 0 ) ) { free( oaddr ); return mallocNoStats( size ); } // special cases930 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 925 931 if ( unlikely( oaddr == 0p ) ) return mallocNoStats( size ); 926 932 … … 931 937 932 938 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 933 if ( size <= odsize && odsize <= size * 2 ) { // allow up to 50% wasted storage in smaller size 934 if ( size != odsize ) header->kind.real.size = size; // reset allocation size 939 size_t osize = header->kind.real.size; // old allocation size 940 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled 941 if ( unlikely( size <= odsize ) && size > odsize / 2 ) { // allow up to 50% wasted storage 942 header->kind.real.size = size; // reset allocation size 943 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? 944 memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage 945 } // if 935 946 return oaddr; 936 947 } // if … … 939 950 940 951 void * naddr; 941 if ( unlikely( oalign != 0 ) ) { // previous request memalign? 942 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 943 naddr = cmemalignNoStats( oalign, 1, size ); // create new aligned area 944 } else { 945 naddr = memalignNoStats( oalign, size ); // create new aligned area 952 if ( likely( oalign == 0 ) ) { // previous request memalign? 953 naddr = mallocNoStats( size ); // create new area 954 } else { 955 naddr = memalignNoStats( oalign, size ); // create new aligned area 956 } // if 957 958 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 959 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes 960 free( oaddr ); 961 962 if ( unlikely( ozfill ) ) { // previous request zero fill ? 963 header->kind.real.blockSize |= 2; // mark new request as zero filled 964 if ( size > osize ) { // previous request larger ? 965 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage 946 966 } // if 947 } else { 948 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 949 naddr = callocNoStats( 1, size ); // create new area 950 } else { 951 naddr = mallocNoStats( size ); // create new area 952 } // if 953 } // if 954 if ( unlikely( naddr == 0p ) ) return 0p; 955 956 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 957 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage avilable in bucket 958 // To preserve prior fill, the entire bucket must be copied versus the size. 959 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 960 free( oaddr ); 967 } // if 961 968 return naddr; 962 969 } // realloc … … 1008 1015 if ( alignment < sizeof(void *) || ! libPow2( alignment ) ) return EINVAL; // check alignment 1009 1016 * memptr = memalign( alignment, size ); 1010 if ( unlikely( * memptr == 0p ) ) return ENOMEM;1011 1017 return 0; 1012 1018 } // posix_memalign … … 1206 1212 1207 1213 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1208 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases1214 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1209 1215 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1210 1211 1216 1212 1217 if ( unlikely( nalign == 0 ) ) nalign = libAlign(); // reset alignment to minimum … … 1235 1240 // change size 1236 1241 1237 void * naddr; 1238 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1239 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1240 } else { 1241 naddr = memalignNoStats( nalign, size ); // create new aligned area 1242 } // if 1243 1242 void * naddr = memalignNoStats( nalign, size ); // create new aligned area 1244 1243 free( oaddr ); 1245 1244 return naddr; … … 1258 1257 size_t bsize, oalign = 0; 1259 1258 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1260 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket1261 1259 1262 1260 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match … … 1274 1272 #endif // __STATISTICS__ 1275 1273 1274 size_t osize = header->kind.real.size; // old allocation size 1275 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled 1276 1276 1277 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1277 if ( unlikely( size == 0 ) ) { free( oaddr ); return memalignNoStats( nalign, size ); } // special cases1278 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1278 1279 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1279 1280 … … 1286 1287 1287 1288 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1288 size_t ndsize = dataStorage( bsize, naddr, header ); // data storage available in bucket 1289 // To preserve prior fill, the entire bucket must be copied versus the size. 1290 memcpy( naddr, oaddr, MIN( odsize, ndsize ) ); // copy bytes 1289 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes 1291 1290 free( oaddr ); 1291 1292 if ( unlikely( ozfill ) ) { // previous request zero fill ? 1293 header->kind.real.blockSize |= 2; // mark new request as zero filled 1294 if ( size > osize ) { // previous request larger ? 1295 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage 1296 } // if 1297 } // if 1292 1298 return naddr; 1293 1299 } // realloc -
libcfa/src/stdlib.hfa
r8395152 r71dfe49 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jul 21 07:58:05202013 // Update Count : 4 7512 // Last Modified On : Thu Jul 30 16:14:58 2020 13 // Update Count : 490 14 14 // 15 15 … … 71 71 T * resize( T * ptr, size_t size ) { // CFA resize, eliminate return-type cast 72 72 $RE_SPECIALS( ptr, size, malloc, memalign ); 73 return (T *)(void *)resize( (void *)ptr, size ); // CFA resize 73 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)resize( (void *)ptr, size ); // CFA resize 74 else return (T *)(void *)resize( (void *)ptr, _Alignof(T), size ); // CFA resize 74 75 } // resize 75 76 76 77 T * realloc( T * ptr, size_t size ) { // CFA realloc, eliminate return-type cast 77 78 $RE_SPECIALS( ptr, size, malloc, memalign ); 78 return (T *)(void *)realloc( (void *)ptr, size ); // C realloc 79 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)realloc( (void *)ptr, size ); // C realloc 80 else return (T *)(void *)realloc( (void *)ptr, _Alignof(T), size ); // CFA realloc 79 81 } // realloc 80 82 … … 121 123 forall( dtype S | sized(S) ) 122 124 T * alloc( S ptr[], size_t dim = 1 ) { // singleton/array resize 123 size_t len = malloc_usable_size( ptr ); // current bucket size 124 if ( sizeof(T) * dim > len ) { // not enough space ? 125 T * temp = alloc( dim ); // new storage 126 free( ptr ); // free old storage 127 return temp; 128 } else { 129 return (T *)ptr; 130 } // if 131 } // alloc 132 133 T * alloc( T ptr[], size_t dim, bool copy = true ) { 125 return resize( (T *)ptr, dim * sizeof(T) ); // CFA resize 126 } // alloc 127 128 T * alloc( T ptr[], size_t dim = 1, bool copy = true ) { 134 129 if ( copy ) { 135 130 return realloc( ptr, dim * sizeof(T) ); // CFA realloc … … 168 163 memset( (char *)nptr + osize, (int)fill, nsize - osize ); // initialize added storage 169 164 } // if 170 return (T *)nptr;165 return nptr; 171 166 } // alloc_set 172 167 … … 181 176 } // for 182 177 } // if 183 return (T *)nptr;178 return nptr; 184 179 } // alloc_align_set 185 180 } // distribution … … 195 190 196 191 T * alloc_align( T * ptr, size_t align ) { // aligned realloc array 197 return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA realloc192 return (T *)(void *)realloc( (void *)ptr, align, sizeof(T) ); // CFA C realloc 198 193 } // alloc_align 199 194 … … 232 227 size_t osize = malloc_size( ptr ); // current allocation 233 228 size_t nsize = dim * sizeof(T); // new allocation 234 T * nptr = alloc_align( ptr, align, nsize ); // CFA alloc_align229 T * nptr = alloc_align( ptr, align, nsize ); 235 230 if ( nsize > osize ) { // larger ? 236 231 memset( (char *)nptr + osize, (int)fill, nsize - osize ); // initialize added storage 237 232 } // if 238 return (T *)nptr;233 return nptr; 239 234 } // alloc_align_set 240 235 … … 243 238 size_t nsize = dim * sizeof(T); // new allocation 244 239 size_t ndim = nsize / sizeof(T); // new dimension 245 T * nptr = alloc_align( ptr, align, nsize ); // CFA alloc_align240 T * nptr = alloc_align( ptr, align, nsize ); 246 241 if ( ndim > odim ) { // larger ? 247 242 for ( i; odim ~ ndim ) { … … 249 244 } // for 250 245 } // if 251 return (T *)nptr;246 return nptr; 252 247 } // alloc_align_set 253 248 } // distribution -
tests/heap.cfa
r8395152 r71dfe49 10 10 // Created On : Tue Nov 6 17:54:56 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Nov 24 12:34:51 201913 // Update Count : 2812 // Last Modified On : Tue Aug 4 06:36:17 2020 13 // Update Count : 56 14 14 // 15 15 … … 75 75 size_t s = (i + 1) * 20; 76 76 char * area = (char *)malloc( s ); 77 if ( area == 0p ) abort( "malloc/free out of memory" );78 77 area[0] = '\345'; area[s - 1] = '\345'; // fill first/last 79 78 area[malloc_usable_size( area ) - 1] = '\345'; // fill ultimate byte … … 84 83 size_t s = i + 1; // +1 to make initialization simpler 85 84 locns[i] = (char *)malloc( s ); 86 if ( locns[i] == 0p ) abort( "malloc/free out of memory" );87 85 locns[i][0] = '\345'; locns[i][s - 1] = '\345'; // fill first/last 88 86 locns[i][malloc_usable_size( locns[i] ) - 1] = '\345'; // fill ultimate byte … … 100 98 size_t s = i + default_mmap_start(); // cross over point 101 99 char * area = (char *)malloc( s ); 102 if ( area == 0p ) abort( "malloc/free out of memory" );103 100 area[0] = '\345'; area[s - 1] = '\345'; // fill first/last 104 101 area[malloc_usable_size( area ) - 1] = '\345'; // fill ultimate byte … … 109 106 size_t s = i + default_mmap_start(); // cross over point 110 107 locns[i] = (char *)malloc( s ); 111 if ( locns[i] == 0p ) abort( "malloc/free out of memory" );112 108 locns[i][0] = '\345'; locns[i][s - 1] = '\345'; // fill first/last 113 109 locns[i][malloc_usable_size( locns[i] ) - 1] = '\345'; // fill ultimate byte … … 125 121 size_t s = (i + 1) * 20; 126 122 char * area = (char *)calloc( 5, s ); 127 if ( area == 0p ) abort( "calloc/free out of memory" );128 123 if ( area[0] != '\0' || area[s - 1] != '\0' || 129 area[malloc_ usable_size( area ) - 1] != '\0' ||124 area[malloc_size( area ) - 1] != '\0' || 130 125 ! malloc_zero_fill( area ) ) abort( "calloc/free corrupt storage1" ); 131 126 area[0] = '\345'; area[s - 1] = '\345'; // fill first/last … … 137 132 size_t s = i + 1; 138 133 locns[i] = (char *)calloc( 5, s ); 139 if ( locns[i] == 0p ) abort( "calloc/free out of memory" );140 134 if ( locns[i][0] != '\0' || locns[i][s - 1] != '\0' || 141 locns[i][malloc_ usable_size( locns[i] ) - 1] != '\0' ||135 locns[i][malloc_size( locns[i] ) - 1] != '\0' || 142 136 ! malloc_zero_fill( locns[i] ) ) abort( "calloc/free corrupt storage2" ); 143 137 locns[i][0] = '\345'; locns[i][s - 1] = '\345'; // fill first/last … … 156 150 size_t s = i + default_mmap_start(); // cross over point 157 151 char * area = (char *)calloc( 1, s ); 158 if ( area == 0p ) abort( "calloc/free out of memory" );159 152 if ( area[0] != '\0' || area[s - 1] != '\0' ) abort( "calloc/free corrupt storage4.1" ); 160 if ( area[malloc_ usable_size( area ) - 1] != '\0' ) abort( "calloc/free corrupt storage4.2" );153 if ( area[malloc_size( area ) - 1] != '\0' ) abort( "calloc/free corrupt storage4.2" ); 161 154 if ( ! malloc_zero_fill( area ) ) abort( "calloc/free corrupt storage4.3" ); 162 155 area[0] = '\345'; area[s - 1] = '\345'; // fill first/last … … 168 161 size_t s = i + default_mmap_start(); // cross over point 169 162 locns[i] = (char *)calloc( 1, s ); 170 if ( locns[i] == 0p ) abort( "calloc/free out of memory" );171 163 if ( locns[i][0] != '\0' || locns[i][s - 1] != '\0' || 172 locns[i][malloc_ usable_size( locns[i] ) - 1] != '\0' ||164 locns[i][malloc_size( locns[i] ) - 1] != '\0' || 173 165 ! malloc_zero_fill( locns[i] ) ) abort( "calloc/free corrupt storage5" ); 174 166 locns[i][0] = '\345'; locns[i][s - 1] = '\345'; // fill first/last … … 188 180 for ( s; 1 ~ NoOfAllocs ) { // allocation of size 0 can return null 189 181 char * area = (char *)memalign( a, s ); 190 if ( area == 0p ) abort( "memalign/free out of memory" );191 182 //sout | i | area; 192 183 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment … … 206 197 size_t s = i + default_mmap_start(); // cross over point 207 198 char * area = (char *)memalign( a, s ); 208 if ( area == 0p ) abort( "memalign/free out of memory" );209 199 //sout | i | area; 210 200 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment … … 222 212 // initial N byte allocation 223 213 char * area = (char *)calloc( 5, i ); 224 if ( area == 0p ) abort( "calloc/realloc/free out of memory" );225 214 if ( area[0] != '\0' || area[i - 1] != '\0' || 226 area[malloc_ usable_size( area ) - 1] != '\0' ||215 area[malloc_size( area ) - 1] != '\0' || 227 216 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage1" ); 228 217 … … 230 219 for ( s; i ~ 256 * 1024 ~ 26 ) { // start at initial memory request 231 220 area = (char *)realloc( area, s ); // attempt to reuse storage 232 if ( area == 0p ) abort( "calloc/realloc/free out of memory" );233 221 if ( area[0] != '\0' || area[s - 1] != '\0' || 234 area[malloc_ usable_size( area ) - 1] != '\0' ||222 area[malloc_size( area ) - 1] != '\0' || 235 223 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage2" ); 236 224 } // for … … 244 232 size_t s = i + default_mmap_start(); // cross over point 245 233 char * area = (char *)calloc( 1, s ); 246 if ( area == 0p ) abort( "calloc/realloc/free out of memory" );234 // if ( area == 0p ) abort( "calloc/realloc/free out of memory" ); 247 235 if ( area[0] != '\0' || area[s - 1] != '\0' || 248 area[malloc_usable_size( area ) - 1] != '\0' || 249 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage1" ); 236 area[malloc_size( area ) - 1] != '\0' || 237 ! malloc_zero_fill( area ) ) //abort( "calloc/realloc/free corrupt storage3" ); 238 printf( "C %zd %d %d %d %d\n", s, area[0] != '\0', area[s - 1] != '\0', area[malloc_size( area ) - 1] != '\0', ! malloc_zero_fill( area ) ); 250 239 251 240 // Do not start this loop index at 0 because realloc of 0 bytes frees the storage. 252 241 for ( r; i ~ 256 * 1024 ~ 26 ) { // start at initial memory request 253 242 area = (char *)realloc( area, r ); // attempt to reuse storage 254 if ( area == 0p ) abort( "calloc/realloc/free out of memory" );243 // if ( area == 0p ) abort( "calloc/realloc/free out of memory" ); 255 244 if ( area[0] != '\0' || area[r - 1] != '\0' || 256 area[malloc_ usable_size( area ) - 1] != '\0' ||257 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage 2" );245 area[malloc_size( area ) - 1] != '\0' || 246 ! malloc_zero_fill( area ) ) abort( "calloc/realloc/free corrupt storage4" ); 258 247 } // for 259 248 free( area ); … … 266 255 // initial N byte allocation 267 256 char * area = (char *)memalign( a, amount ); // aligned N-byte allocation 268 if ( area == 0p ) abort( "memalign/realloc/free out of memory" ); // no storage ?257 // if ( area == 0p ) abort( "memalign/realloc/free out of memory" ); // no storage ? 269 258 //sout | alignments[a] | area; 270 259 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment … … 277 266 if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "memalign/realloc/free corrupt storage" ); 278 267 area = (char *)realloc( area, s ); // attempt to reuse storage 279 if ( area == 0p ) abort( "memalign/realloc/free out of memory" ); // no storage ?280 268 //sout | i | area; 281 269 if ( (size_t)area % a != 0 ) { // check for initial alignment … … 293 281 for ( s; 1 ~ limit ) { // allocation of size 0 can return null 294 282 char * area = (char *)cmemalign( a, 1, s ); 295 if ( area == 0p ) abort( "cmemalign/free out of memory" );296 283 //sout | i | area; 297 284 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment … … 299 286 } // if 300 287 if ( area[0] != '\0' || area[s - 1] != '\0' || 301 area[malloc_ usable_size( area ) - 1] != '\0' ||288 area[malloc_size( area ) - 1] != '\0' || 302 289 ! malloc_zero_fill( area ) ) abort( "cmemalign/free corrupt storage" ); 303 290 area[0] = '\345'; area[s - 1] = '\345'; // fill first/last byte … … 312 299 // initial N byte allocation 313 300 char * area = (char *)cmemalign( a, 1, amount ); // aligned N-byte allocation 314 if ( area == 0p ) abort( "cmemalign/realloc/free out of memory" ); // no storage ?315 301 //sout | alignments[a] | area; 316 302 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment … … 318 304 } // if 319 305 if ( area[0] != '\0' || area[amount - 1] != '\0' || 320 area[malloc_ usable_size( area ) - 1] != '\0' ||306 area[malloc_size( area ) - 1] != '\0' || 321 307 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc/free corrupt storage1" ); 322 308 area[0] = '\345'; area[amount - 2] = '\345'; // fill first/penultimate byte … … 326 312 if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "cmemalign/realloc/free corrupt storage2" ); 327 313 area = (char *)realloc( area, s ); // attempt to reuse storage 328 if ( area == 0p ) abort( "cmemalign/realloc/free out of memory" ); // no storage ?329 314 //sout | i | area; 330 315 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment 331 316 abort( "cmemalign/realloc/free bad alignment %p", area ); 332 317 } // if 333 if ( area[ s - 1] != '\0' || area[s - 1] != '\0' ||334 area[malloc_ usable_size( area ) - 1] != '\0' ||318 if ( area[0] != '\345' || area[s - 1] != '\0' || 319 area[malloc_size( area ) - 1] != '\0' || 335 320 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc/free corrupt storage3" ); 336 321 area[s - 1] = '\345'; // fill last byte … … 345 330 // initial N byte allocation 346 331 char * area = (char *)memalign( a, amount ); // aligned N-byte allocation 347 if ( area == 0p ) abort( "memalign/realloc with align/free out of memory" ); // no storage ?348 332 //sout | alignments[a] | area | endl; 349 333 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment … … 356 340 if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "memalign/realloc/free corrupt storage" ); 357 341 area = (char *)realloc( area, a * 2, s ); // attempt to reuse storage 358 if ( area == 0p ) abort( "memalign/realloc with align/free out of memory" ); // no storage ?359 342 //sout | i | area | endl; 360 343 if ( (size_t)area % a * 2 != 0 ) { // check for initial alignment … … 371 354 for ( size_t a = libAlign() + libAlign(); a <= limit; a += a ) { // generate powers of 2 372 355 // initial N byte allocation 373 char *area = (char *)cmemalign( a, 1, amount ); // aligned N-byte allocation 374 if ( area == 0p ) abort( "cmemalign/realloc with align/free out of memory" ); // no storage ? 356 char * area = (char *)cmemalign( a, 1, amount ); // aligned N-byte allocation 375 357 //sout | alignments[a] | area | endl; 376 358 if ( (size_t)area % a != 0 || malloc_alignment( area ) != a ) { // check for initial alignment … … 378 360 } // if 379 361 if ( area[0] != '\0' || area[amount - 1] != '\0' || 380 area[malloc_ usable_size( area ) - 1] != '\0' ||362 area[malloc_size( area ) - 1] != '\0' || 381 363 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc with align/free corrupt storage1" ); 382 364 area[0] = '\345'; area[amount - 2] = '\345'; // fill first/penultimate byte … … 386 368 if ( area[0] != '\345' || area[s - 2] != '\345' ) abort( "cmemalign/realloc with align/free corrupt storage2" ); 387 369 area = (char *)realloc( area, a * 2, s ); // attempt to reuse storage 388 if ( area == 0p ) abort( "cmemalign/realloc with align/free out of memory" ); // no storage ?389 370 //sout | i | area | endl; 390 371 if ( (size_t)area % a * 2 != 0 || malloc_alignment( area ) != a * 2 ) { // check for initial alignment 391 abort( "cmemalign/realloc with align/free bad alignment %p % jd %jd", area, malloc_alignment( area ), a * 2 );372 abort( "cmemalign/realloc with align/free bad alignment %p %zd %zd", area, malloc_alignment( area ), a * 2 ); 392 373 } // if 393 374 if ( area[s - 1] != '\0' || area[s - 1] != '\0' || 394 area[malloc_ usable_size( area ) - 1] != '\0' ||375 area[malloc_size( area ) - 1] != '\0' || 395 376 ! malloc_zero_fill( area ) ) abort( "cmemalign/realloc/free corrupt storage3" ); 396 377 area[s - 1] = '\345'; // fill last byte
Note: See TracChangeset
for help on using the changeset viewer.