Changeset 7a70fb2 for libcfa/src
- Timestamp:
- Dec 17, 2020, 10:34:27 AM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 852ae0ea
- Parents:
- 72a3aff (diff), 28e88d7 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/collection.hfa
r72a3aff r7a70fb2 1 1 #pragma once 2 #include <stdio.h> // REMOVE THIS AFTER DEBUGGING 3 2 4 3 5 struct Colable { 4 Colable * next; // next node in the list6 struct Colable * next; // next node in the list 5 7 // invariant: (next != 0) <=> listed() 6 8 }; 7 8 inline {9 #ifdef __cforall 10 static inline { 9 11 // PUBLIC 10 12 … … 28 30 } 29 31 30 // wrappers to make Collection have T31 forall( dtype T ) {32 T *& Next( T * n ) {33 return (T *)Next( (Colable *)n );34 }32 // // wrappers to make Collection have T 33 // forall( dtype T ) { 34 // T *& Next( T * n ) { 35 // return (T *)Next( (Colable *)n ); 36 // } 35 37 36 bool listed( T * n ) {37 return Next( (Colable *)n ) != 0p;38 }39 } // distribution38 // bool listed( T * n ) { 39 // return Next( (Colable *)n ) != 0p; 40 // } 41 // } // distribution 40 42 } // distribution 41 43 … … 45 47 }; 46 48 47 inline {49 static inline { 48 50 // class invariant: root == 0 & empty() | *root in *this 49 51 void ?{}( Collection &, const Collection & ) = void; // no copy … … 68 70 }; 69 71 70 inline {72 static inline { 71 73 void ?{}( ColIter & colIter ) with( colIter ) { 72 74 curr = 0p; … … 79 81 } // distribution 80 82 } // distribution 83 #endif -
libcfa/src/bits/containers.hfa
r72a3aff r7a70fb2 36 36 #define __small_array_t(T) __small_array(T) 37 37 #else 38 #define __small_array_t(T) struct__small_array38 #define __small_array_t(T) __small_array 39 39 #endif 40 40 -
libcfa/src/bits/defs.hfa
r72a3aff r7a70fb2 29 29 #define __cfa_anonymous_object(x) inline struct x 30 30 #else 31 #define __cfa_anonymous_object(x) x __cfa_anonymous_object31 #define __cfa_anonymous_object(x) struct x __cfa_anonymous_object 32 32 #endif 33 33 -
libcfa/src/bits/queue.hfa
r72a3aff r7a70fb2 3 3 #include "bits/collection.hfa" 4 4 5 forall( dtype T ) {5 forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) { 6 6 struct Queue { 7 7 inline Collection; // Plan 9 inheritance … … 64 64 T & t = head( q ); 65 65 if ( root ) { 66 root = Next( root );66 root = Next( (T *)root ); 67 67 if ( &head( q ) == &t ) { 68 68 root = last = 0p; // only one element … … 142 142 } // distribution 143 143 144 forall( dtype T ) {144 forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) { 145 145 struct QueueIter { 146 146 inline ColIter; // Plan 9 inheritance -
libcfa/src/bits/sequence.hfa
r72a3aff r7a70fb2 2 2 3 3 #include "bits/collection.hfa" 4 #include "bits/defs.hfa" 4 5 5 6 struct Seqable { 6 inline Colable;7 Seqable * back; // pointer to previous node in the list7 __cfa_anonymous_object(Colable); 8 struct Seqable * back; // pointer to previous node in the list 8 9 }; 9 10 10 inline { 11 #ifdef __cforall 12 static inline { 11 13 // PUBLIC 12 14 … … 26 28 } 27 29 28 // wrappers to make Collection have T29 forall( dtype T ) {30 T *& Back( T * n ) {31 return (T *)Back( (Seqable *)n );32 }33 } // distribution30 // // wrappers to make Collection have T 31 // forall( dtype T ) { 32 // T *& Back( T * n ) { 33 // return (T *)Back( (Seqable *)n ); 34 // } 35 // } // distribution 34 36 } // distribution 35 37 36 forall( dtype T ) {38 forall( dtype T | { T *& Back ( T * ); T *& Next ( T * ); bool listed ( T * ); } ) { 37 39 struct Sequence { 38 40 inline Collection; // Plan 9 inheritance 39 41 }; 40 42 41 inline {43 static inline { 42 44 // wrappers to make Collection have T 43 45 T & head( Sequence(T) & s ) with( s ) { … … 184 186 T * toEnd = Back( &head( s ) ); 185 187 T * fromEnd = Back( &head( from ) ); 186 Back( root ) = fromEnd;188 Back( (T *)root ) = fromEnd; 187 189 Next( fromEnd ) = &head( s ); 188 Back( from.root ) = toEnd;190 Back( (T *)from.root ) = toEnd; 189 191 Next( toEnd ) = &head( from ); 190 192 } // if … … 214 216 } // distribution 215 217 216 forall( dtype T ) {218 forall( dtype T | { T *& Back ( T * ); T *& Next ( T * ); bool listed ( T * ); } ) { 217 219 // SeqIter(T) is used to iterate over a Sequence(T) in head-to-tail order. 218 220 struct SeqIter { … … 224 226 }; 225 227 226 inline {228 static inline { 227 229 void ?{}( SeqIter(T) & si ) with( si ) { 228 230 ((ColIter &)si){}; … … 265 267 }; 266 268 267 inline {269 static inline { 268 270 void ?{}( SeqIterRev(T) & si ) with( si ) { 269 271 ((ColIter &)si){}; … … 298 300 } // distribution 299 301 } // distribution 302 303 #endif -
libcfa/src/bits/stack.hfa
r72a3aff r7a70fb2 3 3 #include "bits/collection.hfa" 4 4 5 forall( dtype T ) {5 forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) { 6 6 struct Stack { 7 7 inline Collection; // Plan 9 inheritance … … 44 44 T & t = head( s ); 45 45 if ( root ) { 46 root = ( T *)Next( root );46 root = ( T *)Next( (T *)root ); 47 47 if ( &head( s ) == &t ) root = 0p; // only one element ? 48 48 Next( &t ) = 0p; … … 58 58 59 59 60 forall( dtype T ) {60 forall( dtype T | { T *& Next ( T * ); bool listed ( T * ); } ) { 61 61 struct StackIter { 62 62 inline ColIter; // Plan 9 inheritance -
libcfa/src/concurrency/coroutine.cfa
r72a3aff r7a70fb2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Oct 23 23:05:24 202013 // Update Count : 2 212 // Last Modified On : Tue Dec 15 12:06:04 2020 13 // Update Count : 23 14 14 // 15 15 … … 88 88 static const size_t MinStackSize = 1000; 89 89 extern size_t __page_size; // architecture pagesize HACK, should go in proper runtime singleton 90 extern int __map_prot; 90 91 91 92 void __stack_prepare( __stack_info_t * this, size_t create_size ); … … 206 207 __cfaabi_dbg_debug_do( 207 208 storage = (char*)(storage) - __page_size; 208 if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE) == -1 ) {209 if ( mprotect( storage, __page_size, __map_prot ) == -1 ) { 209 210 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 210 211 } -
libcfa/src/concurrency/invoke.h
r72a3aff r7a70fb2 189 189 struct __monitor_group_t monitors; 190 190 191 // used to put threads on user data structures 192 struct { 193 struct $thread * next; 194 struct $thread * back; 195 } seqable; 196 191 197 struct { 192 198 struct $thread * next; … … 218 224 } 219 225 226 static inline $thread *& Back( $thread * this ) __attribute__((const)) { 227 return this->seqable.back; 228 } 229 230 static inline $thread *& Next( $thread * this ) __attribute__((const)) { 231 return this->seqable.next; 232 } 233 234 static inline bool listed( $thread * this ) { 235 return this->seqable.next != 0p; 236 } 237 220 238 static inline void ?{}(__monitor_group_t & this) { 221 239 (this.data){0p}; -
libcfa/src/concurrency/kernel/startup.cfa
r72a3aff r7a70fb2 117 117 } 118 118 119 size_t __page_size = 0;119 extern size_t __page_size; 120 120 121 121 //----------------------------------------------------------------------------- … … 161 161 /* paranoid */ verify( ! __preemption_enabled() ); 162 162 __cfadbg_print_safe(runtime_core, "Kernel : Starting\n"); 163 164 __page_size = sysconf( _SC_PAGESIZE );165 163 166 164 __cfa_dbg_global_clusters.list{ __get }; … … 681 679 #if CFA_PROCESSOR_USE_MMAP 682 680 stacksize = ceiling( stacksize, __page_size ) + __page_size; 683 stack = mmap(0p, stacksize, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);681 stack = mmap(0p, stacksize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 684 682 if(stack == ((void*)-1)) { 685 683 abort( "pthread stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); -
libcfa/src/concurrency/locks.cfa
r72a3aff r7a70fb2 29 29 30 30 void ^?{}( info_thread(L) & this ){ } 31 32 info_thread(L) *& Back( info_thread(L) * this ) { 33 return (info_thread(L) *)Back( (Seqable *)this ); 34 } 35 36 info_thread(L) *& Next( info_thread(L) * this ) { 37 return (info_thread(L) *)Next( (Colable *)this ); 38 } 39 40 bool listed( info_thread(L) * this ) { 41 return Next( (Colable *)this ) != 0p; 42 } 31 43 } 32 44 … … 58 70 abort("A single acquisition lock holder attempted to reacquire the lock resulting in a deadlock."); 59 71 } else if ( owner != 0p && owner != active_thread() ) { 60 a ppend( blocked_threads,active_thread() );72 addTail( blocked_threads, *active_thread() ); 61 73 wait_count++; 62 74 unlock( lock ); … … 96 108 97 109 void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 98 $thread * t = pop_head( blocked_threads );110 $thread * t = &dropHead( blocked_threads ); 99 111 owner = t; 100 112 recursion_count = ( t ? 1 : 0 ); … … 128 140 lock( lock __cfaabi_dbg_ctx2 ); 129 141 if ( owner != 0p ) { 130 a ppend( blocked_threads,t );142 addTail( blocked_threads, *t ); 131 143 wait_count++; 132 144 unlock( lock ); … … 257 269 size_t recursion_count = 0; 258 270 if (i->lock) { 259 i->t->link.next = 1p;260 271 recursion_count = get_recursion_count(*i->lock); 261 272 remove_( *i->lock ); -
libcfa/src/concurrency/locks.hfa
r72a3aff r7a70fb2 43 43 void ?{}( info_thread(L) & this, $thread * t, uintptr_t info ); 44 44 void ^?{}( info_thread(L) & this ); 45 46 info_thread(L) *& Back( info_thread(L) * this ); 47 info_thread(L) *& Next( info_thread(L) * this ); 48 bool listed( info_thread(L) * this ); 45 49 } 46 50 … … 64 68 65 69 // List of blocked threads 66 __queue_t( $thread ) blocked_threads;70 Sequence( $thread ) blocked_threads; 67 71 68 72 // Count of current blocked threads -
libcfa/src/concurrency/thread.cfa
r72a3aff r7a70fb2 43 43 canary = 0x0D15EA5E0D15EA5Ep; 44 44 #endif 45 46 seqable.next = 0p; 47 seqable.back = 0p; 45 48 46 49 node.next = 0p; -
libcfa/src/heap.cfa
r72a3aff r7a70fb2 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Dec 13 22:04:10202013 // Update Count : 98412 // Last Modified On : Wed Dec 16 12:28:25 2020 13 // Update Count : 1023 14 14 // 15 15 16 16 #include <unistd.h> // sbrk, sysconf 17 #include <stdlib.h> // EXIT_FAILURE 17 18 #include <stdbool.h> // true, false 18 19 #include <stdio.h> // snprintf, fileno … … 71 72 // Define the default extension heap amount in units of bytes. When the uC++ supplied heap reaches the brk address, 72 73 // the brk address is extended by the extension amount. 73 __CFA_DEFAULT_HEAP_EXPANSION__ = (1 * 1024 * 1024),74 __CFA_DEFAULT_HEAP_EXPANSION__ = (10 * 1024 * 1024), 74 75 75 76 // Define the mmap crossover point during allocation. Allocations less than this amount are allocated from buckets; … … 115 116 116 117 // statically allocated variables => zero filled. 117 static size_t pageSize; // architecture pagesize 118 size_t __page_size; // architecture pagesize 119 int __map_prot; // common mmap/mprotect protection 118 120 static size_t heapExpand; // sbrk advance 119 121 static size_t mmapStart; // cross over point for mmap … … 249 251 #endif // FASTLOOKUP 250 252 251 static int mmapFd = -1;// fake or actual fd for anonymous file253 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 252 254 #ifdef __CFA_DEBUG__ 253 255 static bool heapBoot = 0; // detect recursion during boot … … 374 376 375 377 static inline bool setMmapStart( size_t value ) { // true => mmapped, false => sbrk 376 if ( value < pageSize || bucketSizes[NoBucketSizes - 1] < value ) return false;378 if ( value < __page_size || bucketSizes[NoBucketSizes - 1] < value ) return false; 377 379 mmapStart = value; // set global 378 380 … … 436 438 header = headerAddr( addr ); 437 439 438 if ( unlikely( heapEnd < addr ) ) {// mmapped ?440 if ( unlikely( addr < heapBegin || heapEnd < addr ) ) { // mmapped ? 439 441 fakeHeader( header, alignment ); 440 442 size = header->kind.real.blockSize & -3; // mmap size … … 443 445 444 446 #ifdef __CFA_DEBUG__ 445 checkHeader( addr < heapBegin, name, addr );// bad low address ?447 checkHeader( header < (HeapManager.Storage.Header *)heapBegin, name, addr ); // bad low address ? 446 448 #endif // __CFA_DEBUG__ 447 449 … … 482 484 #endif // __CFA_DEBUG__ 483 485 486 484 487 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes." 485 488 … … 490 493 // If the size requested is bigger than the current remaining storage, increase the size of the heap. 491 494 492 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, pageSize ); 495 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, __page_size ); 496 // Do not call abort or strerror( errno ) as they may call malloc. 493 497 if ( sbrk( increase ) == (void *)-1 ) { // failed, no memory ? 494 498 unlock( extlock ); 495 abort( NO_MEMORY_MSG, size ); // give up 496 } // if 497 if ( mprotect( (char *)heapEnd + heapRemaining, increase, PROT_READ | PROT_WRITE | PROT_EXEC ) ) { 498 enum { BufferSize = 128 }; 499 char helpText[BufferSize]; 500 // Do not call strerror( errno ) as it may call malloc. 501 int len = snprintf( helpText, BufferSize, "internal error, extend(), mprotect failure, heapEnd:%p size:%zd, errno:%d.", heapEnd, increase, errno ); 502 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); 499 __cfaabi_bits_print_nolock( STDERR_FILENO, NO_MEMORY_MSG, size ); 500 _exit( EXIT_FAILURE ); 501 } // if 502 if ( mprotect( (char *)heapEnd + heapRemaining, increase, __map_prot ) ) { 503 unlock( extlock ); 504 __cfaabi_bits_print_nolock( STDERR_FILENO, "extend() : internal error, mprotect failure, heapEnd:%p size:%zd, errno:%d.\n", heapEnd, increase, errno ); 505 _exit( EXIT_FAILURE ); 503 506 } // if 504 507 #ifdef __STATISTICS__ … … 508 511 #ifdef __CFA_DEBUG__ 509 512 // Set new memory to garbage so subsequent uninitialized usages might fail. 510 //memset( (char *)heapEnd + heapRemaining, '\377', increase );511 Memset( (char *)heapEnd + heapRemaining, increase );513 memset( (char *)heapEnd + heapRemaining, '\xde', increase ); 514 //Memset( (char *)heapEnd + heapRemaining, increase ); 512 515 #endif // __CFA_DEBUG__ 513 516 rem = heapRemaining + increase - size; … … 568 571 block->header.kind.real.home = freeElem; // pointer back to free list of apropriate size 569 572 } else { // large size => mmap 570 if ( unlikely( size > ULONG_MAX - pageSize ) ) return 0p;571 tsize = ceiling2( tsize, pageSize ); // must be multiple of page size573 if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p; 574 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size 572 575 #ifdef __STATISTICS__ 573 576 __atomic_add_fetch( &mmap_calls, 1, __ATOMIC_SEQ_CST ); … … 575 578 #endif // __STATISTICS__ 576 579 577 block = (HeapManager.Storage *)mmap( 0, tsize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 );580 block = (HeapManager.Storage *)mmap( 0, tsize, __map_prot, MAP_PRIVATE | MAP_ANONYMOUS, mmapFd, 0 ); 578 581 if ( block == (HeapManager.Storage *)MAP_FAILED ) { // failed ? 579 582 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory … … 583 586 #ifdef __CFA_DEBUG__ 584 587 // Set new memory to garbage so subsequent uninitialized usages might fail. 585 //memset( block, '\377', tsize );586 Memset( block, tsize );588 memset( block, '\xde', tsize ); 589 //Memset( block, tsize ); 587 590 #endif // __CFA_DEBUG__ 588 591 block->header.kind.real.blockSize = tsize; // storage size for munmap … … 624 627 #endif // __STATISTICS__ 625 628 if ( munmap( header, size ) == -1 ) { 626 #ifdef __CFA_DEBUG__627 629 abort( "Attempt to deallocate storage %p not allocated or with corrupt header.\n" 628 630 "Possible cause is invalid pointer.", 629 631 addr ); 630 #endif // __CFA_DEBUG__631 632 } // if 632 633 } else { 633 634 #ifdef __CFA_DEBUG__ 634 635 // Set free memory to garbage so subsequent usages might fail. 635 //memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) );636 Memset( ((HeapManager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) );636 memset( ((HeapManager.Storage *)header)->data, '\xde', freeElem->blockSize - sizeof( HeapManager.Storage ) ); 637 //Memset( ((HeapManager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) ); 637 638 #endif // __CFA_DEBUG__ 638 639 … … 703 704 704 705 static void ?{}( HeapManager & manager ) with( manager ) { 705 pageSize = sysconf( _SC_PAGESIZE ); 706 __page_size = sysconf( _SC_PAGESIZE ); 707 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; 706 708 707 709 for ( unsigned int i = 0; i < NoBucketSizes; i += 1 ) { // initialize the free lists … … 723 725 724 726 char * end = (char *)sbrk( 0 ); 725 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, pageSize ) - end ); // move start of heap to multiple of alignment727 heapBegin = heapEnd = sbrk( (char *)ceiling2( (long unsigned int)end, __page_size ) - end ); // move start of heap to multiple of alignment 726 728 } // HeapManager 727 729 … … 741 743 #ifdef __CFA_DEBUG__ 742 744 if ( heapBoot ) { // check for recursion during system boot 743 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT.744 745 abort( "boot() : internal error, recursively invoked during system boot." ); 745 746 } // if … … 1028 1029 } // cmemalign 1029 1030 1031 1030 1032 // Same as memalign(), but ISO/IEC 2011 C11 Section 7.22.2 states: the value of size shall be an integral multiple 1031 1033 // of alignment. This requirement is universally ignored. … … 1045 1047 } // posix_memalign 1046 1048 1049 1047 1050 // Allocates size bytes and returns a pointer to the allocated memory. The memory address shall be a multiple of the 1048 1051 // page size. It is equivalent to memalign(sysconf(_SC_PAGESIZE),size). 1049 1052 void * valloc( size_t size ) { 1050 return memalign( pageSize, size );1053 return memalign( __page_size, size ); 1051 1054 } // valloc 1052 1055 … … 1054 1057 // Same as valloc but rounds size to multiple of page size. 1055 1058 void * pvalloc( size_t size ) { 1056 return memalign( pageSize, ceiling2( size, pageSize ) );1059 return memalign( __page_size, ceiling2( size, __page_size ) ); 1057 1060 } // pvalloc 1058 1061 … … 1193 1196 choose( option ) { 1194 1197 case M_TOP_PAD: 1195 heapExpand = ceiling2( value, pageSize ); return 1;1198 heapExpand = ceiling2( value, __page_size ); return 1; 1196 1199 case M_MMAP_THRESHOLD: 1197 1200 if ( setMmapStart( value ) ) return 1;
Note: See TracChangeset
for help on using the changeset viewer.