Changeset 3e3f236 for libcfa/src
- Timestamp:
- Dec 10, 2020, 4:00:29 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 97aca3df, b3a0df6
- Parents:
- 6a45bd78 (diff), 297cf18 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/Makefile.am
r6a45bd78 r3e3f236 11 11 ## Created On : Sun May 31 08:54:01 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Mon Jun 1 13:35:33202014 ## Update Count : 2 4813 ## Last Modified On : Wed Dec 9 22:46:14 2020 14 ## Update Count : 250 15 15 ############################################################################### 16 16 … … 43 43 clock.hfa \ 44 44 exception.hfa \ 45 exception.h \ 45 46 gmp.hfa \ 46 47 math.hfa \ … … 51 52 bits/defs.hfa \ 52 53 bits/locks.hfa \ 54 bits/collection.hfa \ 55 bits/stack.hfa \ 56 bits/queue.hfa \ 57 bits/sequence.hfa \ 53 58 concurrency/iofwd.hfa \ 54 59 containers/list.hfa \ … … 77 82 bits/debug.cfa \ 78 83 exception.c \ 79 exception.h \80 84 interpose.cfa \ 81 85 lsda.h \ … … 195 199 -rm -rf ${CFA_INCDIR} ${CFA_LIBDIR} 196 200 201 distclean-local: 202 find ${builddir} -path '*.Plo' -delete 203 197 204 198 205 # $(AM_V_CFA)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ -
libcfa/src/bits/collection.hfa
r6a45bd78 r3e3f236 7 7 8 8 inline { 9 // PUBLIC 10 9 11 void ?{}( Colable & co ) with( co ) { 10 12 next = 0p; … … 16 18 } 17 19 18 Colable *getNext( Colable & co ) with( co ) {19 return next;20 Colable & getNext( Colable & co ) with( co ) { 21 return *next; 20 22 } 23 24 // PRIVATE 21 25 22 26 Colable *& Next( Colable * cp ) { … … 24 28 } 25 29 30 // wrappers to make Collection have T 26 31 forall( dtype T ) { 27 32 T *& Next( T * n ) { -
libcfa/src/bits/queue.hfa
r6a45bd78 r3e3f236 1 1 #pragma once 2 2 3 #include " collection.hfa"3 #include "bits/collection.hfa" 4 4 5 5 forall( dtype T ) { … … 28 28 29 29 T * succ( Queue(T) & q, T * n ) with( q ) { // pre: *n in *q 30 #ifdef __CFA_DEBUG__30 #ifdef __CFA_DEBUG__ 31 31 if ( ! listed( n ) ) abort( "(Queue &)%p.succ( %p ) : Node is not on a list.", &q, n ); 32 #endif // __CFA_DEBUG__32 #endif // __CFA_DEBUG__ 33 33 return (Next( n ) == n) ? 0p : Next( n ); 34 34 } // post: n == tail() & succ(n) == 0 | n != tail() & *succ(n) in *q 35 35 36 36 void addHead( Queue(T) & q, T & n ) with( q ) { 37 #ifdef __CFA_DEBUG__37 #ifdef __CFA_DEBUG__ 38 38 if ( listed( &n ) ) abort( "(Queue &)%p.addHead( %p ) : Node is already on another list.", &q, &n ); 39 #endif // __CFA_DEBUG__39 #endif // __CFA_DEBUG__ 40 40 if ( last ) { 41 41 Next( &n ) = &head( q ); … … 43 43 } else { 44 44 root = last = &n; 45 Next( &n ) = &n; 45 Next( &n ) = &n; // last node points to itself 46 46 } 47 47 } 48 48 49 49 void addTail( Queue(T) & q, T & n ) with( q ) { 50 #ifdef __CFA_DEBUG__50 #ifdef __CFA_DEBUG__ 51 51 if ( listed( &n ) ) abort( "(Queue &)%p.addTail( %p ) : Node is already on another list.", &q, &n ); 52 #endif // __CFA_DEBUG__52 #endif // __CFA_DEBUG__ 53 53 if ( last ) Next( last ) = &n; 54 54 else root = &n; 55 55 last = &n; 56 Next( &n ) = &n; 56 Next( &n ) = &n; // last node points to itself 57 57 } 58 58 … … 78 78 79 79 void remove( Queue(T) & q, T & n ) with( q ) { // O(n) 80 #ifdef __CFA_DEBUG__80 #ifdef __CFA_DEBUG__ 81 81 if ( ! listed( (Colable &)n ) ) abort( "(Queue &)%p.remove( %p ) : Node is not on a list.", &q, &n ); 82 #endif // __CFA_DEBUG__82 #endif // __CFA_DEBUG__ 83 83 T * prev = 0p; 84 84 T * curr = (T *)root; … … 96 96 break; 97 97 } 98 #ifdef __CFA_DEBUG__99 98 // not found => error 100 if (curr == last) abort( "(Queue &)%p.remove( %p ) : Node is not in list.", &q, &n ); 101 #endif // __CFA_DEBUG__ 99 #ifdef __CFA_DEBUG__ 100 if ( curr == last ) abort( "(Queue &)%p.remove( %p ) : Node is not in list.", &q, &n ); 101 #endif // __CFA_DEBUG__ 102 102 prev = curr; 103 103 curr = Next( curr ); … … 125 125 // Node "n" must be in the "from" list. 126 126 void split( Queue(T) & q, Queue(T) & from, T & n ) with( q ) { 127 #ifdef __CFA_DEBUG__127 #ifdef __CFA_DEBUG__ 128 128 if ( ! listed( (Colable &)n ) ) abort( "(Queue &)%p.split( %p ) : Node is not on a list.", &q, &n ); 129 #endif // __CFA_DEBUG__129 #endif // __CFA_DEBUG__ 130 130 Queue(T) to; 131 131 to.root = from.root; // start of "to" list … … 177 177 } // distribution 178 178 } // distribution 179 180 // Local Variables: //181 // compile-command: "cfa queue.cfa" //182 // End: // -
libcfa/src/bits/sequence.hfa
r6a45bd78 r3e3f236 1 1 #pragma once 2 2 3 #include " collection.hfa"3 #include "bits/collection.hfa" 4 4 5 5 struct Seqable { … … 9 9 10 10 inline { 11 // PUBLIC 12 11 13 void ?{}( Seqable & sq ) with( sq ) { 12 ((Colable &) 14 ((Colable &)sq){}; 13 15 back = 0p; 14 16 } // post: ! listed() … … 18 20 } 19 21 22 // PRIVATE 23 20 24 Seqable *& Back( Seqable * sq ) { 21 25 return sq->back; 22 26 } 27 28 // wrappers to make Collection have T 29 forall( dtype T ) { 30 T *& Back( T * n ) { 31 return (T *)Back( (Seqable *)n ); 32 } 33 } // distribution 23 34 } // distribution 24 35 … … 34 45 } // post: empty() & head() == 0 | !empty() & head() in *s 35 46 36 T *& Back( T * n ) {37 return (T *)Back( (Seqable *)n );38 }39 40 47 void ?{}( Sequence(T) &, const Sequence(T) & ) = void; // no copy 41 48 Sequence(T) & ?=?( const Sequence(T) & ) = void; // no assignment 42 49 43 50 void ?{}( Sequence(T) & s ) with( s ) { 44 ((Collection &) 51 ((Collection &)s){}; 45 52 } // post: isEmpty(). 46 53 … … 50 57 } // post: empty() & tail() == 0 | !empty() & tail() in *s 51 58 52 // Return a pointer to the element after *n, or 0p if there isn't one.59 // Return a pointer to the element after *n, or 0p if list empty. 53 60 T * succ( Sequence(T) & s, T * n ) with( s ) { // pre: *n in *s 54 #ifdef __CFA_DEBUG__61 #ifdef __CFA_DEBUG__ 55 62 if ( ! listed( n ) ) abort( "(Sequence &)%p.succ( %p ) : Node is not on a list.", &s, n ); 56 #endif // __CFA_DEBUG__63 #endif // __CFA_DEBUG__ 57 64 return Next( n ) == &head( s ) ? 0p : Next( n ); 58 65 } // post: n == tail() & succ(n) == 0 | n != tail() & *succ(n) in *s … … 60 67 // Return a pointer to the element before *n, or 0p if there isn't one. 61 68 T * pred( Sequence(T) & s, T * n ) with( s ) { // pre: *n in *s 62 #ifdef __CFA_DEBUG__69 #ifdef __CFA_DEBUG__ 63 70 if ( ! listed( n ) ) abort( "(Sequence &)%p.pred( %p ) : Node is not on a list.", &s, n ); 64 #endif // __CFA_DEBUG__71 #endif // __CFA_DEBUG__ 65 72 return n == &head( s ) ? 0p : Back( n ); 66 73 } // post: n == head() & head(n) == 0 | n != head() & *pred(n) in *s … … 69 76 // Insert *n into the sequence before *bef, or at the end if bef == 0. 70 77 void insertBef( Sequence(T) & s, T & n, T & bef ) with( s ) { // pre: !n->listed() & *bef in *s 71 #ifdef __CFA_DEBUG__78 #ifdef __CFA_DEBUG__ 72 79 if ( listed( &n ) ) abort( "(Sequence &)%p.insertBef( %p, %p ) : Node is already on another list.", &s, n, &bef ); 73 #endif // __CFA_DEBUG__80 #endif // __CFA_DEBUG__ 74 81 if ( &bef == &head( s ) ) { // must change root 75 82 if ( root ) { … … 101 108 // Insert *n into the sequence after *aft, or at the beginning if aft == 0. 102 109 void insertAft( Sequence(T) & s, T & aft, T & n ) with( s ) { // pre: !n->listed() & *aft in *s 103 #ifdef __CFA_DEBUG__110 #ifdef __CFA_DEBUG__ 104 111 if ( listed( &n ) ) abort( "(Sequence &)%p.insertAft( %p, %p ) : Node is already on another list.", &s, &aft, &n ); 105 #endif // __CFA_DEBUG__112 #endif // __CFA_DEBUG__ 106 113 if ( ! &aft ) { // must change root 107 114 if ( root ) { … … 130 137 // pre: n->listed() & *n in *s 131 138 void remove( Sequence(T) & s, T & n ) with( s ) { // O(1) 132 #ifdef __CFA_DEBUG__139 #ifdef __CFA_DEBUG__ 133 140 if ( ! listed( &n ) ) abort( "(Sequence &)%p.remove( %p ) : Node is not on a list.", &s, &n ); 134 #endif // __CFA_DEBUG__141 #endif // __CFA_DEBUG__ 135 142 if ( &n == &head( s ) ) { 136 143 if ( Next( &head( s ) ) == &head( s ) ) root = 0p; … … 188 195 // Node "n" must be in the "from" list. 189 196 void split( Sequence(T) & s, Sequence(T) & from, T & n ) with( s ) { 190 #ifdef __CFA_DEBUG__197 #ifdef __CFA_DEBUG__ 191 198 if ( ! listed( &n ) ) abort( "(Sequence &)%p.split( %p ) : Node is not on a list.", &s, &n ); 192 #endif // __CFA_DEBUG__199 #endif // __CFA_DEBUG__ 193 200 Sequence(T) to; 194 201 to.root = from.root; // start of "to" list … … 199 206 Back( &head( from ) ) = Back( &head( to ) ); // fix "from" list 200 207 Next( Back( &head( to ) ) ) = &head( from ); 201 Next( &n ) = &head( to ); 208 Next( &n ) = &head( to ); // fix "to" list 202 209 Back( &head( to ) ) = &n; 203 210 } // if … … 214 221 // passing the sequence, traversing would require its length. Thus the iterator needs a pointer to the sequence 215 222 // to pass to succ/pred. Both stack and queue just encounter 0p since the lists are not circular. 216 Sequence(T) * seq; 223 Sequence(T) * seq; // FIX ME: cannot be reference 217 224 }; 218 225 … … 224 231 225 232 void ?{}( SeqIter(T) & si, Sequence(T) & s ) with( si ) { 226 ((ColIter &) 233 ((ColIter &)si){}; 227 234 seq = &s; 228 235 curr = &head( s ); … … 230 237 231 238 void ?{}( SeqIter(T) & si, Sequence(T) & s, T & start ) with( si ) { 232 ((ColIter &) 239 ((ColIter &)si){}; 233 240 seq = &s; 234 241 curr = &start; … … 255 262 inline ColIter; 256 263 // See above for explanation. 257 Sequence(T) * seq; 264 Sequence(T) * seq; // FIX ME: cannot be reference 258 265 }; 259 266 260 267 inline { 261 268 void ?{}( SeqIterRev(T) & si ) with( si ) { 262 ((ColIter &) 269 ((ColIter &)si){}; 263 270 seq = 0p; 264 271 } // post: elts = null. 265 272 266 273 void ?{}( SeqIterRev(T) & si, Sequence(T) & s ) with( si ) { 267 ((ColIter &) 274 ((ColIter &)si){}; 268 275 seq = &s; 269 276 curr = &tail( s ); … … 271 278 272 279 void ?{}( SeqIterRev(T) & si, Sequence(T) & s, T & start ) with( si ) { 273 ((ColIter &) 280 ((ColIter &)si){}; 274 281 seq = &s; 275 282 curr = &start; … … 291 298 } // distribution 292 299 } // distribution 293 294 // Local Variables: //295 // compile-command: "cfa sequence.hfa" //296 // End: // -
libcfa/src/bits/stack.hfa
r6a45bd78 r3e3f236 1 1 #pragma once 2 2 3 #include " collection.hfa"3 #include "bits/collection.hfa" 4 4 5 5 forall( dtype T ) { … … 26 26 27 27 void addHead( Stack(T) & s, T & n ) with( s ) { 28 #ifdef __CFA_DEBUG__28 #ifdef __CFA_DEBUG__ 29 29 if ( listed( (Colable &)(n) ) ) abort( "(Stack &)%p.addHead( %p ) : Node is already on another list.", &s, n ); 30 #endif // __CFA_DEBUG__30 #endif // __CFA_DEBUG__ 31 31 Next( &n ) = &head( s ) ? &head( s ) : &n; 32 32 root = &n; … … 44 44 T & t = head( s ); 45 45 if ( root ) { 46 root = ( T *)Next( root);46 root = ( T *)Next( root ); 47 47 if ( &head( s ) == &t ) root = 0p; // only one element ? 48 48 Next( &t ) = 0p; … … 92 92 } // distribution 93 93 } // distribution 94 95 // Local Variables: //96 // compile-command: "make install" //97 // End: // -
libcfa/src/concurrency/coroutine.cfa
r6a45bd78 r3e3f236 28 28 #include "kernel_private.hfa" 29 29 #include "exception.hfa" 30 #include "math.hfa" 30 31 31 32 #define __CFA_INVOKE_PRIVATE__ … … 87 88 88 89 void __stack_prepare( __stack_info_t * this, size_t create_size ); 90 void __stack_clean ( __stack_info_t * this ); 89 91 90 92 //----------------------------------------------------------------------------- … … 107 109 bool userStack = ((intptr_t)this.storage & 0x1) != 0; 108 110 if ( ! userStack && this.storage ) { 109 __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage; 110 *istorage &= (intptr_t)-1; 111 112 void * storage = this.storage->limit; 113 __cfaabi_dbg_debug_do( 114 storage = (char*)(storage) - __page_size; 115 if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) { 116 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 117 } 118 ); 119 __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage); 120 free( storage ); 111 __stack_clean( &this ); 112 // __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage; 113 // *istorage &= (intptr_t)-1; 114 115 // void * storage = this.storage->limit; 116 // __cfaabi_dbg_debug_do( 117 // storage = (char*)(storage) - __page_size; 118 // if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) { 119 // abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 120 // } 121 // ); 122 // __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage); 123 // free( storage ); 121 124 } 122 125 } … … 167 170 assert(__page_size != 0l); 168 171 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; 172 size = ceiling(size, __page_size); 169 173 170 174 // If we are running debug, we also need to allocate a guardpage to catch stack overflows. 171 175 void * storage; 172 __cfaabi_dbg_debug_do( 173 storage = memalign( __page_size, size + __page_size ); 174 ); 175 __cfaabi_dbg_no_debug_do( 176 storage = (void*)malloc(size); 177 ); 178 179 __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size); 180 __cfaabi_dbg_debug_do( 181 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 182 abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 183 } 184 storage = (void *)(((intptr_t)storage) + __page_size); 185 ); 176 // __cfaabi_dbg_debug_do( 177 // storage = memalign( __page_size, size + __page_size ); 178 // ); 179 // __cfaabi_dbg_no_debug_do( 180 // storage = (void*)malloc(size); 181 // ); 182 183 // __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size); 184 // __cfaabi_dbg_debug_do( 185 // if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 186 // abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 187 // } 188 // storage = (void *)(((intptr_t)storage) + __page_size); 189 // ); 190 storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 191 if(storage == ((void*)-1)) { 192 abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); 193 } 194 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 195 abort( "coroutine stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 196 } // if 197 storage = (void *)(((intptr_t)storage) + __page_size); 186 198 187 199 verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul ); 188 200 return [storage, size]; 201 } 202 203 void __stack_clean ( __stack_info_t * this ) { 204 size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t); 205 void * storage = this->storage->limit; 206 207 storage = (void *)(((intptr_t)storage) - __page_size); 208 if(munmap(storage, size + __page_size) == -1) { 209 abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) ); 210 } 189 211 } 190 212 … … 210 232 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize ); 211 233 212 this->storage = (__stack_t *)((intptr_t)storage + size );234 this->storage = (__stack_t *)((intptr_t)storage + size - sizeof(__stack_t)); 213 235 this->storage->limit = storage; 214 this->storage->base = (void*)((intptr_t)storage + size );236 this->storage->base = (void*)((intptr_t)storage + size - sizeof(__stack_t)); 215 237 this->storage->exception_context.top_resume = 0p; 216 238 this->storage->exception_context.current_exception = 0p; -
libcfa/src/concurrency/coroutine.hfa
r6a45bd78 r3e3f236 102 102 } 103 103 104 extern void __stack_prepare ( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 104 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 105 extern void __stack_clean ( __stack_info_t * this ); 106 105 107 106 108 // Suspend implementation inlined for performance -
libcfa/src/concurrency/io/setup.cfa
r6a45bd78 r3e3f236 132 132 // Wait for the io poller thread to finish 133 133 134 pthread_join( iopoll.thrd, 0p ); 135 free( iopoll.stack ); 134 __destroy_pthread( iopoll.thrd, iopoll.stack, 0p ); 136 135 137 136 int ret = close(iopoll.epollfd); -
libcfa/src/concurrency/kernel/startup.cfa
r6a45bd78 r3e3f236 29 29 #include "kernel_private.hfa" 30 30 #include "startup.hfa" // STARTUP_PRIORITY_XXX 31 #include "math.hfa" 31 32 32 33 //----------------------------------------------------------------------------- … … 539 540 } 540 541 542 extern size_t __page_size; 541 543 void ^?{}(processor & this) with( this ){ 542 544 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { … … 550 552 } 551 553 552 int err = pthread_join( kernel_thread, 0p ); 553 if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err)); 554 555 free( this.stack ); 554 __destroy_pthread( kernel_thread, this.stack, 0p ); 556 555 557 556 disable_interrupts(); … … 678 677 679 678 void * stack; 680 __cfaabi_dbg_debug_do( 681 stack = memalign( __page_size, stacksize + __page_size ); 682 // pthread has no mechanism to create the guard page in user supplied stack. 683 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 684 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 685 } // if 686 ); 687 __cfaabi_dbg_no_debug_do( 688 stack = malloc( stacksize ); 689 ); 679 #warning due to the thunk problem, stack creation uses mmap, revert to malloc once this goes away 680 // __cfaabi_dbg_debug_do( 681 // stack = memalign( __page_size, stacksize + __page_size ); 682 // // pthread has no mechanism to create the guard page in user supplied stack. 683 // if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 684 // abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 685 // } // if 686 // ); 687 // __cfaabi_dbg_no_debug_do( 688 // stack = malloc( stacksize ); 689 // ); 690 stacksize = ceiling( stacksize, __page_size ) + __page_size; 691 stack = mmap(0p, stacksize, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 692 if(stack == ((void*)-1)) { 693 abort( "pthread stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); 694 } 695 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 696 abort( "pthread stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 697 } // if 690 698 691 699 check( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); … … 694 702 return stack; 695 703 } 704 705 void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ) { 706 int err = pthread_join( pthread, retval ); 707 if( err != 0 ) abort("KERNEL ERROR: joining pthread %p caused error %s\n", (void*)pthread, strerror(err)); 708 709 pthread_attr_t attr; 710 711 check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 712 713 size_t stacksize; 714 // default stack size, normally defined by shell limit 715 check( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); 716 assert( stacksize >= PTHREAD_STACK_MIN ); 717 stacksize += __page_size; 718 719 if(munmap(stack, stacksize) == -1) { 720 abort( "pthread stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) ); 721 } 722 } 723 696 724 697 725 #if defined(__CFA_WITH_VERIFY__) -
libcfa/src/concurrency/kernel_private.hfa
r6a45bd78 r3e3f236 49 49 50 50 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 51 void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ); 51 52 52 53 -
libcfa/src/concurrency/preemption.cfa
r6a45bd78 r3e3f236 354 354 } 355 355 356 //----------------------------------------------------------------------------- 357 // Kernel Signal Debug 358 void __cfaabi_check_preemption() { 359 bool ready = __preemption_enabled(); 360 if(!ready) { abort("Preemption should be ready"); } 361 362 __cfaasm_label(debug, before); 363 364 sigset_t oldset; 365 int ret; 366 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 367 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 368 369 ret = sigismember(&oldset, SIGUSR1); 370 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 371 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 372 373 ret = sigismember(&oldset, SIGALRM); 374 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 375 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 376 377 ret = sigismember(&oldset, SIGTERM); 378 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 379 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 380 381 __cfaasm_label(debug, after); 382 } 383 384 #ifdef __CFA_WITH_VERIFY__ 385 bool __cfaabi_dbg_in_kernel() { 386 return !__preemption_enabled(); 387 } 388 #endif 389 356 390 #undef __cfaasm_label 391 392 //----------------------------------------------------------------------------- 393 // Signal handling 357 394 358 395 // sigprocmask wrapper : unblock a single signal … … 405 442 #define RELOC_SUFFIX "" 406 443 #endif 407 #define __cfaasm_label( label ) st atic struct asm_region label = \444 #define __cfaasm_label( label ) struct asm_region label = \ 408 445 ({ \ 409 446 struct asm_region region; \ … … 424 461 #define RELOC_SUFFIX "" 425 462 #endif 426 #define __cfaasm_label( label ) st atic struct asm_region label = \463 #define __cfaasm_label( label ) struct asm_region label = \ 427 464 ({ \ 428 465 struct asm_region region; \ … … 437 474 #ifdef __PIC__ 438 475 // Note that this works only for gcc 439 #define __cfaasm_label( label ) st atic struct asm_region label = \476 #define __cfaasm_label( label ) struct asm_region label = \ 440 477 ({ \ 441 478 struct asm_region region; \ … … 452 489 #error this is not the right thing to do 453 490 /* 454 #define __cfaasm_label( label ) st atic struct asm_region label = \491 #define __cfaasm_label( label ) struct asm_region label = \ 455 492 ({ \ 456 493 struct asm_region region; \ … … 479 516 __cfaasm_label( check ); 480 517 __cfaasm_label( dsable ); 518 __cfaasm_label( debug ); 481 519 482 520 // Check if preemption is safe … … 485 523 if( __cfaasm_in( ip, check ) ) { ready = false; goto EXIT; }; 486 524 if( __cfaasm_in( ip, dsable ) ) { ready = false; goto EXIT; }; 525 if( __cfaasm_in( ip, debug ) ) { ready = false; goto EXIT; }; 487 526 if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; }; 488 527 if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; }; … … 536 575 // Wait for the preemption thread to finish 537 576 538 pthread_join( alarm_thread, 0p ); 539 free( alarm_stack ); 577 __destroy_pthread( alarm_thread, alarm_stack, 0p ); 540 578 541 579 // Preemption is now fully stopped … … 697 735 } 698 736 699 //=============================================================================================700 // Kernel Signal Debug701 //=============================================================================================702 703 void __cfaabi_check_preemption() {704 bool ready = __preemption_enabled();705 if(!ready) { abort("Preemption should be ready"); }706 707 sigset_t oldset;708 int ret;709 ret = pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary710 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }711 712 ret = sigismember(&oldset, SIGUSR1);713 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }714 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }715 716 ret = sigismember(&oldset, SIGALRM);717 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }718 if(ret == 0) { abort("ERROR SIGALRM is enabled"); }719 720 ret = sigismember(&oldset, SIGTERM);721 if(ret < 0) { abort("ERROR sigismember returned %d", ret); }722 if(ret == 1) { abort("ERROR SIGTERM is disabled"); }723 }724 725 #ifdef __CFA_WITH_VERIFY__726 bool __cfaabi_dbg_in_kernel() {727 return !__preemption_enabled();728 }729 #endif730 731 737 // Local Variables: // 732 738 // mode: c // -
libcfa/src/stdlib.hfa
r6a45bd78 r3e3f236 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Nov 12 20:58:48202013 // Update Count : 52 012 // Last Modified On : Tue Dec 8 18:27:22 2020 13 // Update Count : 524 14 14 // 15 15 … … 268 268 static inline forall( dtype T | { void ^?{}( T & ); } ) 269 269 void delete( T * ptr ) { 270 if ( ptr ) { // ignore null 270 // special case for 0-sized object => always call destructor 271 if ( ptr || sizeof(ptr) == 0 ) { // ignore null but not 0-sized objects 271 272 ^(*ptr){}; // run destructor 272 free( ptr );273 273 } // if 274 free( ptr ); 274 275 } // delete 275 276
Note:
See TracChangeset
for help on using the changeset viewer.