Changeset 6c144d8
- Timestamp:
- Aug 6, 2020, 3:34:47 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 1eb239e4, c8e4b23d
- Parents:
- d3a518c (diff), 74cfe054 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 5 edited
- 2 moved
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/exception.hfa
rd3a518c r6c144d8 160 160 161 161 #define _FORALL_CTOR0_DECLARATION(exception_name, assertions, parameters) \ 162 forall(_UNPACK assertions | VTABLE_ASSERTION(exception_name, assertions, parameters) ) \ 163 /*| { VTABLE_TYPE(exception_name) parameters VTABLE_NAME(exception_name); } ) */ \ 162 forall(_UNPACK assertions | VTABLE_ASSERTION(exception_name, parameters) ) \ 164 163 void ?{}(exception_name parameters & this) 165 164 -
libcfa/src/heap.cfa
rd3a518c r6c144d8 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Aug 3 19:01:22202013 // Update Count : 8 2812 // Last Modified On : Wed Aug 5 22:21:27 2020 13 // Update Count : 853 14 14 // 15 15 … … 80 80 }; 81 81 82 size_t default_mmap_start() __attribute__(( weak )) { 83 return __CFA_DEFAULT_MMAP_START__; 84 } // default_mmap_start 85 82 86 size_t default_heap_expansion() __attribute__(( weak )) { 83 87 return __CFA_DEFAULT_HEAP_EXPANSION__; 84 88 } // default_heap_expansion 85 89 86 size_t default_mmap_start() __attribute__(( weak )) { 87 return __CFA_DEFAULT_MMAP_START__; 88 } // default_mmap_start 90 bool default_heap_exhausted() __attribute__(( weak )) { // find and free some storage 91 // Returning false prints "out of heap memory" message and aborts. 92 return false; 93 } // default_heap_exhausted 89 94 90 95 … … 473 478 } // headers 474 479 480 #define NO_MEMORY_MSG "no heap memory available for allocating %zd new bytes." 475 481 476 482 static inline void * extend( size_t size ) with( heapManager ) { … … 481 487 482 488 size_t increase = libCeiling( size > heapExpand ? size : heapExpand, libAlign() ); 483 if ( sbrk( increase ) == (void *)-1 ) { 489 Succeed: 490 { 491 if ( sbrk( increase ) != (void *)-1 ) break Succeed; // succeed ? 492 if ( default_heap_exhausted() ) { // try fix 493 if ( sbrk( increase ) != (void *)-1 ) break Succeed; // succeed ? 494 } // if 484 495 unlock( extlock ); 485 errno = ENOMEM; 486 // return 0p; 487 abort( "no memory" ); 488 } // if 496 abort( NO_MEMORY_MSG, size ); // give up 497 } 489 498 #ifdef __STATISTICS__ 490 499 sbrk_calls += 1; … … 554 563 555 564 block = (HeapManager.Storage *)extend( tsize ); // mutual exclusion on call 556 // if ( unlikely( block == 0p ) ) return 0p;557 565 #if BUCKETLOCK == SPINLOCK 558 566 } else { … … 570 578 __atomic_add_fetch( &mmap_storage, tsize, __ATOMIC_SEQ_CST ); 571 579 #endif // __STATISTICS__ 572 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 573 if ( block == (HeapManager.Storage *)MAP_FAILED ) { 580 Succeed: 581 { 582 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 583 if ( block != (HeapManager.Storage *)MAP_FAILED ) break Succeed; // succeed ? 584 if ( errno == ENOMEM && default_heap_exhausted() ) { // out of memory and try again ? 585 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 586 if ( block != (HeapManager.Storage *)MAP_FAILED ) break Succeed; // succeed ? 587 } // if 588 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); 574 589 // Do not call strerror( errno ) as it may call malloc. 575 590 abort( "(HeapManager &)0x%p.doMalloc() : internal error, mmap failure, size:%zu error:%d.", &heapManager, tsize, errno ); 576 } // if591 } 577 592 #ifdef __CFA_DEBUG__ 578 593 // Set new memory to garbage so subsequent uninitialized usages might fail. … … 751 766 static inline void * mallocNoStats( size_t size ) { // necessary for malloc statistics 752 767 verify( heapManager.heapBegin != 0 ); // called before memory_startup ? 753 if ( size == 0 ) return 0p;// 0 BYTE ALLOCATION RETURNS NULL POINTER768 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 754 769 755 770 #if __SIZEOF_POINTER__ == 8 … … 762 777 static inline void * callocNoStats( size_t dim, size_t elemSize ) { 763 778 size_t size = dim * elemSize; 764 if ( size == 0 ) return 0p;// 0 BYTE ALLOCATION RETURNS NULL POINTER779 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 765 780 char * addr = (char *)mallocNoStats( size ); 766 781 … … 773 788 headers( "calloc", addr, header, freeElem, bsize, alignment ); 774 789 #ifndef __CFA_DEBUG__ 790 775 791 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 776 792 if ( ! mapped ) … … 786 802 787 803 static inline void * memalignNoStats( size_t alignment, size_t size ) { // necessary for malloc statistics 788 if ( size == 0 ) return 0p;// 0 BYTE ALLOCATION RETURNS NULL POINTER804 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 789 805 790 806 #ifdef __CFA_DEBUG__ … … 825 841 static inline void * cmemalignNoStats( size_t alignment, size_t dim, size_t elemSize ) { 826 842 size_t size = dim * elemSize; 827 if ( size == 0 ) return 0p;// 0 BYTE ALLOCATION RETURNS NULL POINTER843 if ( unlikely( size ) == 0 ) return 0p; // 0 BYTE ALLOCATION RETURNS NULL POINTER 828 844 char * addr = (char *)memalignNoStats( alignment, size ); 829 845 … … 831 847 HeapManager.FreeHeader * freeElem; 832 848 size_t bsize; 833 bool mapped __attribute__(( unused )) = headers( "cmemalign", addr, header, freeElem, bsize, alignment );834 849 #ifndef __CFA_DEBUG__ 850 bool mapped = 851 #endif // __CFA_DEBUG__ 852 headers( "cmemalign", addr, header, freeElem, bsize, alignment ); 853 #ifndef __CFA_DEBUG__ 854 835 855 // Mapped storage is zero filled, but in debug mode mapped memory is scrubbed in doMalloc, so it has to be reset to zero. 836 856 if ( ! mapped ) 837 857 #endif // __CFA_DEBUG__ 838 memset( addr, '\0', dataStorage( bsize, addr, header ) ); // set to zeros 858 // <-------0000000000000000000000000000UUUUUUUUUUUUUUUUUUUUUUUUU> bsize (bucket size) U => undefined 859 // `-header`-addr `-size 860 memset( addr, '\0', size ); // set to zeros 839 861 840 862 header->kind.real.blockSize |= 2; // mark as zero filled … … 1135 1157 1136 1158 headers( "malloc_usable_size", addr, header, freeElem, bsize, alignment ); 1137 return dataStorage( bsize, addr, header ); // data storage in bucket1159 return dataStorage( bsize, addr, header ); // data storage in bucket 1138 1160 } // malloc_usable_size 1139 1161 … … 1233 1255 if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 1234 1256 header->kind.real.blockSize &= -2; // turn off 0 fill 1235 if ( size != odsize ) header->kind.real.size = size;// reset allocation size1257 header->kind.real.size = size; // reset allocation size 1236 1258 return oaddr; 1237 1259 } // if 1238 1260 } // if 1239 1261 1240 // change size 1241 1242 void * naddr = memalignNoStats( nalign, size ); // create new aligned area 1262 // change size, DO NOT preserve STICKY PROPERTIES. 1243 1263 free( oaddr ); 1244 return naddr;1264 return memalignNoStats( nalign, size ); // create new aligned area 1245 1265 } // resize 1246 1266 … … 1272 1292 #endif // __STATISTICS__ 1273 1293 1274 size_t osize = header->kind.real.size; // old allocation size1275 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled1276 1277 1294 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1278 1295 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1279 1296 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size ); 1280 1297 1281 void * naddr; 1282 if ( unlikely( header->kind.real.blockSize & 2 ) ) { // previous request zero fill 1283 naddr = cmemalignNoStats( nalign, 1, size ); // create new aligned area 1284 } else { 1285 naddr = memalignNoStats( nalign, size ); // create new aligned area 1286 } // if 1298 size_t osize = header->kind.real.size; // old allocation size 1299 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled 1300 1301 void * naddr = memalignNoStats( nalign, size ); // create new aligned area 1287 1302 1288 1303 headers( "realloc", naddr, header, freeElem, bsize, oalign ); -
libcfa/src/heap.hfa
rd3a518c r6c144d8 10 10 // Created On : Tue May 26 11:23:55 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jul 20 18:52:31202013 // Update Count : 1 112 // Last Modified On : Wed Aug 5 14:55:55 2020 13 // Update Count : 14 14 14 // 15 15 … … 18 18 size_t default_mmap_start(); // CFA extras 19 19 size_t default_heap_expansion(); 20 bool default_heap_exhausted(); // unsafe to call printf! 20 21 21 22 bool traceHeap(); -
tests/Makefile.am
rd3a518c r6c144d8 19 19 20 20 include $(top_srcdir)/src/cfa.make 21 22 DEFAULT_INCLUDES = -I${abs_srcdir} 21 23 22 24 debug=yes -
tests/Makefile.in
rd3a518c r6c144d8 133 133 am__v_at_0 = @ 134 134 am__v_at_1 = 135 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)136 135 depcomp = $(SHELL) $(top_srcdir)/automake/depcomp 137 136 am__depfiles_maybe = depfiles … … 394 393 am__v_JAVAC_0 = @echo " JAVAC " $@; 395 394 am__v_JAVAC_1 = 395 DEFAULT_INCLUDES = -I${abs_srcdir} 396 396 debug = yes 397 397 installed = no -
tests/exceptions/polymorphic.cfa
rd3a518c r6c144d8 3 3 #include <exception.hfa> 4 4 5 FORALL_TRIVIAL_EXCEPTION _(proxy, (otype U3), (U3));6 FORALL_TRIVIAL_INSTANCE _(proxy, (otype U4), (U4))5 FORALL_TRIVIAL_EXCEPTION(proxy, (otype T), (T)); 6 FORALL_TRIVIAL_INSTANCE(proxy, (otype U), (U)) 7 7 8 8 const char * msg(proxy(int) * this) { return "proxy(int)"; }
Note: See TracChangeset
for help on using the changeset viewer.