Changeset 54f89d5 for libcfa/src
- Timestamp:
- Dec 4, 2020, 11:39:23 AM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 3c6480b7
- Parents:
- ab0257b9 (diff), a32cbac2 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 1 added
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/containers.hfa
rab0257b9 r54f89d5 17 17 #include "bits/align.hfa" 18 18 #include "bits/defs.hfa" 19 19 #include <stdio.h> 20 20 //----------------------------------------------------------------------------- 21 21 // Array … … 146 146 static inline forall( dtype T | is_node(T) ) { 147 147 void ?{}( __queue(T) & this ) with( this ) { 148 head{ 1p };149 tail{ &head };150 verify(*t ail == 1p);148 (this.head){ 1p }; 149 (this.tail){ &this.head }; 150 verify(*this.tail == 1p); 151 151 } 152 152 153 153 void append( __queue(T) & this, T * val ) with( this ) { 154 verify(t ail != 0p);155 verify(*t ail == 1p);156 *t ail = val;157 t ail = &get_next( *val );158 *t ail = 1p;154 verify(this.tail != 0p); 155 verify(*this.tail == 1p); 156 *this.tail = val; 157 this.tail = &get_next( *val ); 158 *this.tail = 1p; 159 159 } 160 160 161 161 T * peek( __queue(T) & this ) { 162 162 verify(*this.tail == 1p); 163 T * head= this.head;164 if( head!= 1p ) {163 T * front = this.head; 164 if( front != 1p ) { 165 165 verify(*this.tail == 1p); 166 return head;166 return front; 167 167 } 168 168 verify(*this.tail == 1p); … … 172 172 T * pop_head( __queue(T) & this ) { 173 173 verify(*this.tail == 1p); 174 T * head = this.head;175 if( head != 1p ) {176 this.head = get_next( * head );177 if( get_next( * head ) == 1p ) {174 T * _head = this.head; 175 if( _head != 1p ) { 176 this.head = get_next( *_head ); 177 if( get_next( *_head ) == 1p ) { 178 178 this.tail = &this.head; 179 179 } 180 get_next( * head ) = 0p;180 get_next( *_head ) = 0p; 181 181 verify(*this.tail == 1p); 182 verify( get_next(* head) == 0p );183 return head;182 verify( get_next(*_head) == 0p ); 183 return _head; 184 184 } 185 185 verify(*this.tail == 1p); … … 193 193 (*it) = get_next( *val ); 194 194 195 if( t ail == &get_next( *val ) ) {196 t ail = it;195 if( this.tail == &get_next( *val ) ) { 196 this.tail = it; 197 197 } 198 198 199 199 get_next( *val ) = 0p; 200 200 201 verify( ( head == 1p) == (&head ==tail) );202 verify( *t ail == 1p );201 verify( (this.head == 1p) == (&this.head == this.tail) ); 202 verify( *this.tail == 1p ); 203 203 return val; 204 204 } … … 239 239 forall(dtype T ) 240 240 static inline [void] ?{}( __dllist(T) & this, * [T * & next, T * & prev] ( T & ) __get ) { 241 this.head{ 0p };241 (this.head){ 0p }; 242 242 this.__get = __get; 243 243 } … … 248 248 void push_front( __dllist(T) & this, T & node ) with( this ) { 249 249 verify(__get); 250 if ( head ) {251 __get( node ).next = head;252 __get( node ).prev = __get( * head ).prev;250 if ( this.head ) { 251 __get( node ).next = this.head; 252 __get( node ).prev = __get( *this.head ).prev; 253 253 // inserted node must be consistent before it is seen 254 254 // prevent code movement across barrier 255 255 asm( "" : : : "memory" ); 256 __get( * head ).prev = &node;256 __get( *this.head ).prev = &node; 257 257 T & _prev = *__get( node ).prev; 258 258 __get( _prev ).next = &node; … … 264 264 // prevent code movement across barrier 265 265 asm( "" : : : "memory" ); 266 head = &node;266 this.head = &node; 267 267 } 268 268 269 269 void remove( __dllist(T) & this, T & node ) with( this ) { 270 270 verify(__get); 271 if ( &node == head ) {272 if ( __get( * head ).next ==head ) {273 head = 0p;271 if ( &node == this.head ) { 272 if ( __get( *this.head ).next == this.head ) { 273 this.head = 0p; 274 274 } else { 275 head = __get( *head ).next;275 this.head = __get( *this.head ).next; 276 276 } 277 277 } -
libcfa/src/bits/queue.hfa
rab0257b9 r54f89d5 11 11 inline { 12 12 // wrappers to make Collection have T 13 T *head( Queue(T) & q ) with( q ) {14 return (T *)head( (Collection &)q );13 T & head( Queue(T) & q ) with( q ) { 14 return *(T *)head( (Collection &)q ); 15 15 } // post: empty() & head() == 0 | !empty() & head() in *q 16 16 … … 23 23 } // post: empty() 24 24 25 T *tail( Queue(T) & q ) with( q ) {26 return last;25 T & tail( Queue(T) & q ) with( q ) { 26 return *last; 27 27 } 28 28 … … 34 34 } // post: n == tail() & succ(n) == 0 | n != tail() & *succ(n) in *q 35 35 36 void addHead( Queue(T) & q, T *n ) with( q ) {36 void addHead( Queue(T) & q, T & n ) with( q ) { 37 37 #ifdef __CFA_DEBUG__ 38 if ( listed( n ) ) abort( "(Queue &)%p.addHead( %p ) : Node is already on another list.", &q,n );38 if ( listed( &n ) ) abort( "(Queue &)%p.addHead( %p ) : Node is already on another list.", &q, &n ); 39 39 #endif // __CFA_DEBUG__ 40 40 if ( last ) { 41 Next( n ) =head( q );42 q.root = n;41 Next( &n ) = &head( q ); 42 q.root = &n; 43 43 } else { 44 root = last = n;45 Next( n ) =n; // last node points to itself44 root = last = &n; 45 Next( &n ) = &n; // last node points to itself 46 46 } 47 47 } 48 48 49 void addTail( Queue(T) & q, T *n ) with( q ) {49 void addTail( Queue(T) & q, T & n ) with( q ) { 50 50 #ifdef __CFA_DEBUG__ 51 if ( listed( n ) ) abort( "(Queue &)%p.addTail( %p ) : Node is already on another list.", &q,n );51 if ( listed( &n ) ) abort( "(Queue &)%p.addTail( %p ) : Node is already on another list.", &q, &n ); 52 52 #endif // __CFA_DEBUG__ 53 if ( last ) Next( last ) = n;54 else root = n;55 last = n;56 Next( n ) =n; // last node points to itself53 if ( last ) Next( last ) = &n; 54 else root = &n; 55 last = &n; 56 Next( &n ) = &n; // last node points to itself 57 57 } 58 58 59 void add( Queue(T) & q, T *n ) with( q ) {59 void add( Queue(T) & q, T & n ) with( q ) { 60 60 addTail( q, n ); 61 61 } 62 62 63 T *dropHead( Queue(T) & q ) with( q ) {64 T *t = head( q );63 T & dropHead( Queue(T) & q ) with( q ) { 64 T & t = head( q ); 65 65 if ( root ) { 66 66 root = Next( root ); 67 if ( head( q ) ==t ) {67 if ( &head( q ) == &t ) { 68 68 root = last = 0p; // only one element 69 69 } 70 Next( t ) = 0p;70 Next( &t ) = 0p; 71 71 } 72 72 return t; 73 73 } 74 74 75 T *drop( Queue(T) & q ) with( q ) {75 T & drop( Queue(T) & q ) with( q ) { 76 76 return dropHead( q ); 77 77 } 78 78 79 void remove( Queue(T) & q, T *n ) with( q ) { // O(n)79 void remove( Queue(T) & q, T & n ) with( q ) { // O(n) 80 80 #ifdef __CFA_DEBUG__ 81 if ( ! listed( (Colable &) (*n) ) ) abort( "(Queue &)%p.remove( %p ) : Node is not on a list.", &q,n );81 if ( ! listed( (Colable &)n ) ) abort( "(Queue &)%p.remove( %p ) : Node is not on a list.", &q, &n ); 82 82 #endif // __CFA_DEBUG__ 83 T * prev = 0 ;83 T * prev = 0p; 84 84 T * curr = (T *)root; 85 85 for ( ;; ) { 86 if ( n == curr) { // found => remove87 if ( (T *)root == n) {86 if ( &n == curr ) { // found => remove 87 if ( (T *)root == &n ) { 88 88 dropHead( q ); 89 } else if ( last == n) {89 } else if ( last == &n ) { 90 90 last = prev; 91 91 Next( last ) = last; … … 93 93 Next( prev ) = Next( curr ); 94 94 } 95 Next( n ) = 0p;95 Next( &n ) = 0p; 96 96 break; 97 97 } 98 98 #ifdef __CFA_DEBUG__ 99 99 // not found => error 100 if (curr == last) abort( "(Queue &)%p.remove( %p ) : Node is not in list.", &q, n );100 if (curr == last) abort( "(Queue &)%p.remove( %p ) : Node is not in list.", &q, &n ); 101 101 #endif // __CFA_DEBUG__ 102 102 prev = curr; … … 105 105 } // post: ! listed( n ) 106 106 107 T *dropTail( Queue(T) & q ) with( q ) { // O(n)108 T *n = tail( q );109 return n ? remove( q, n ), n :0p;107 T & dropTail( Queue(T) & q ) with( q ) { // O(n) 108 T & n = tail( q ); 109 return &n ? remove( q, n ), n : *0p; 110 110 } 111 111 … … 116 116 root = from.root; 117 117 } else { // "to" list not empty 118 Next( last ) = head( from );118 Next( last ) = &head( from ); 119 119 } 120 120 last = from.last; … … 124 124 // Transfer the "from" list up to node "n" to the end of queue list; the "from" list becomes the list after node "n". 125 125 // Node "n" must be in the "from" list. 126 void split( Queue(T) & q, Queue(T) & from, T *n ) with( q ) {126 void split( Queue(T) & q, Queue(T) & from, T & n ) with( q ) { 127 127 #ifdef __CFA_DEBUG__ 128 if ( ! listed( (Colable &) (*n) ) ) abort( "(Queue &)%p.split( %p ) : Node is not on a list.", &q,n );128 if ( ! listed( (Colable &)n ) ) abort( "(Queue &)%p.split( %p ) : Node is not on a list.", &q, &n ); 129 129 #endif // __CFA_DEBUG__ 130 130 Queue(T) to; 131 131 to.root = from.root; // start of "to" list 132 to.last = n; // end of "to" list133 from.root = Next( n ); // start of "from" list134 if ( n == head( from ) ) {// last node in list ?132 to.last = &n; // end of "to" list 133 from.root = Next( &n ); // start of "from" list 134 if ( &n == &head( from ) ) { // last node in list ? 135 135 from.root = from.last = 0p; // mark "from" list empty 136 136 } else { 137 Next( n ) = n;// fix end of "to" list137 Next( &n ) = &n; // fix end of "to" list 138 138 } 139 139 transfer( q, to ); … … 154 154 // create an iterator active in Queue q 155 155 void ?{}( QueueIter(T) & qi, Queue(T) & q ) with( qi ) { 156 curr = head( q );156 curr = &head( q ); 157 157 } // post: curr = {e in q} 158 158 159 void ?{}( QueueIter(T) & qi, T *start ) with( qi ) {160 curr = start;159 void ?{}( QueueIter(T) & qi, T & start ) with( qi ) { 160 curr = &start; 161 161 } // post: curr = {e in q} 162 162 163 163 // make existing iterator active in Queue q 164 164 void over( QueueIter(T) & qi, Queue(T) & q ) with( qi ) { 165 curr = head( q );165 curr = &head( q ); 166 166 } // post: curr = {e in q} 167 167 … … 179 179 180 180 // Local Variables: // 181 // compile-command: " make install" //181 // compile-command: "cfa queue.cfa" // 182 182 // End: // -
libcfa/src/bits/queue_example.cfa
rab0257b9 r54f89d5 27 27 28 28 for ( i; 10 ) { 29 add( fred, new( 2 * i ) );29 add( fred, *new( 2 * i ) ); 30 30 } 31 31 … … 36 36 37 37 for ( i; 9 ) { 38 delete( drop( fred ) );38 delete( &drop( fred ) ); 39 39 } 40 40 … … 45 45 46 46 for ( i; 10 ) { 47 add( fred, new( 2 * i + 1 ) );47 add( fred, *new( 2 * i + 1 ) ); 48 48 } 49 49 for ( over( fredIter, fred ); fredIter >> f; ) { … … 78 78 79 79 for ( i; 10 ) { 80 add( mary, new( 2 * i ) );80 add( mary, *new( 2 * i ) ); 81 81 } 82 82 … … 87 87 88 88 for ( i; 9 ) { 89 delete( drop( mary ) );89 delete( &drop( mary ) ); 90 90 } 91 91 … … 96 96 97 97 for ( i; 10 ) { 98 add( mary, new( 2 * i + 1 ) );98 add( mary, *new( 2 * i + 1 ) ); 99 99 } 100 100 for ( over( maryIter, mary ); maryIter >> m; ) { -
libcfa/src/bits/sequence.hfa
rab0257b9 r54f89d5 14 14 } // post: ! listed() 15 15 16 Seqable *getBack( Seqable & sq ) with( sq ) {17 return back;16 Seqable & getBack( Seqable & sq ) with( sq ) { 17 return *back; 18 18 } 19 19 … … 30 30 inline { 31 31 // wrappers to make Collection have T 32 T *head( Sequence(T) & s ) with( s ) {33 return (T *)head( (Collection &)s );32 T & head( Sequence(T) & s ) with( s ) { 33 return *(T *)head( (Collection &)s ); 34 34 } // post: empty() & head() == 0 | !empty() & head() in *s 35 35 … … 47 47 // Return a pointer to the last sequence element, without removing it. 48 48 T & tail( Sequence(T) & s ) with( s ) { 49 return root ? (T &) Back( head( s ) ) : *0p; // needs cast?49 return root ? (T &)*Back( &head( s ) ) : *0p; 50 50 } // post: empty() & tail() == 0 | !empty() & tail() in *s 51 51 … … 55 55 if ( ! listed( n ) ) abort( "(Sequence &)%p.succ( %p ) : Node is not on a list.", &s, n ); 56 56 #endif // __CFA_DEBUG__ 57 return Next( n ) == head( s ) ? 0p : Next( n );58 } 57 return Next( n ) == &head( s ) ? 0p : Next( n ); 58 } // post: n == tail() & succ(n) == 0 | n != tail() & *succ(n) in *s 59 59 60 60 // Return a pointer to the element before *n, or 0p if there isn't one. … … 63 63 if ( ! listed( n ) ) abort( "(Sequence &)%p.pred( %p ) : Node is not on a list.", &s, n ); 64 64 #endif // __CFA_DEBUG__ 65 return n == head( s ) ? 0p : Back( n );65 return n == &head( s ) ? 0p : Back( n ); 66 66 } // post: n == head() & head(n) == 0 | n != head() & *pred(n) in *s 67 67 … … 72 72 if ( listed( &n ) ) abort( "(Sequence &)%p.insertBef( %p, %p ) : Node is already on another list.", &s, n, &bef ); 73 73 #endif // __CFA_DEBUG__ 74 if ( &bef == head( s ) ) { // must change root74 if ( &bef == &head( s ) ) { // must change root 75 75 if ( root ) { 76 Next( &n ) = head( s );77 Back( &n ) = Back( head( s ) );76 Next( &n ) = &head( s ); 77 Back( &n ) = Back( &head( s ) ); 78 78 // inserted node must be consistent before it is seen 79 79 asm( "" : : : "memory" ); // prevent code movement across barrier 80 Back( head( s ) ) = &n;80 Back( &head( s ) ) = &n; 81 81 Next( Back( &n ) ) = &n; 82 82 } else { … … 88 88 root = &n; 89 89 } else { 90 if ( ! &bef ) &bef = head( s );90 if ( ! &bef ) &bef = &head( s ); 91 91 Next( &n ) = &bef; 92 92 Back( &n ) = Back( &bef ); … … 106 106 if ( ! &aft ) { // must change root 107 107 if ( root ) { 108 Next( &n ) = head( s );109 Back( &n ) = Back( head( s ) );108 Next( &n ) = &head( s ); 109 Back( &n ) = Back( &head( s ) ); 110 110 // inserted node must be consistent before it is seen 111 111 asm( "" : : : "memory" ); // prevent code movement across barrier 112 Back( head( s ) ) = &n;112 Back( &head( s ) ) = &n; 113 113 Next( Back( &n ) ) = &n; 114 114 } else { … … 133 133 if ( ! listed( &n ) ) abort( "(Sequence &)%p.remove( %p ) : Node is not on a list.", &s, &n ); 134 134 #endif // __CFA_DEBUG__ 135 if ( &n == head( s ) ) {136 if ( Next( head( s ) ) ==head( s ) ) root = 0p;137 else root = Next( head(s ) );135 if ( &n == &head( s ) ) { 136 if ( Next( &head( s ) ) == &head( s ) ) root = 0p; 137 else root = Next( &head( s ) ); 138 138 } // if 139 139 Back( Next( &n ) ) = Back( &n ); … … 156 156 // Remove and return the head element in the sequence. 157 157 T & dropHead( Sequence(T) & s ) { 158 T *n = head( s );159 return n ? remove( s, *n ), *n : *0p;158 T & n = head( s ); 159 return &n ? remove( s, n ), n : *0p; 160 160 } 161 161 // Remove and return the head element in the sequence. … … 175 175 root = from.root; 176 176 } else { // "to" list not empty 177 T * toEnd = Back( head( s ) );178 T * fromEnd = Back( head( from ) );177 T * toEnd = Back( &head( s ) ); 178 T * fromEnd = Back( &head( from ) ); 179 179 Back( root ) = fromEnd; 180 Next( fromEnd ) = head( s );180 Next( fromEnd ) = &head( s ); 181 181 Back( from.root ) = toEnd; 182 Next( toEnd ) = head( from );182 Next( toEnd ) = &head( from ); 183 183 } // if 184 184 from.root = 0p; // mark "from" list empty … … 187 187 // Transfer the "from" list up to node "n" to the end of s list; the "from" list becomes the sequence after node "n". 188 188 // Node "n" must be in the "from" list. 189 void split( Sequence(T) & s, Sequence(T) & from, T *n ) with( s ) {190 #ifdef __CFA_DEBUG__ 191 if ( ! listed( n ) ) abort( "(Sequence &)%p.split( %p ) : Node is not on a list.", &s,n );189 void split( Sequence(T) & s, Sequence(T) & from, T & n ) with( s ) { 190 #ifdef __CFA_DEBUG__ 191 if ( ! listed( &n ) ) abort( "(Sequence &)%p.split( %p ) : Node is not on a list.", &s, &n ); 192 192 #endif // __CFA_DEBUG__ 193 193 Sequence(T) to; 194 194 to.root = from.root; // start of "to" list 195 from.root = Next( n ); // start of "from" list195 from.root = Next( &n ); // start of "from" list 196 196 if ( to.root == from.root ) { // last node in list ? 197 197 from.root = 0p; // mark "from" list empty 198 198 } else { 199 Back( head( from ) ) = Back(head( to ) ); // fix "from" list200 Next( Back( head( to ) ) ) =head( from );201 Next( n ) =head( to ); // fix "to" list202 Back( head( to ) ) =n;199 Back( &head( from ) ) = Back( &head( to ) ); // fix "from" list 200 Next( Back( &head( to ) ) ) = &head( from ); 201 Next( &n ) = &head( to ); // fix "to" list 202 Back( &head( to ) ) = &n; 203 203 } // if 204 204 transfer( s, to ); … … 211 211 struct SeqIter { 212 212 inline ColIter; 213 // The Sequence must be passed to pred and succ to check for the end of the Sequence and return 0p. Without 214 // passing the sequence, traversing would require its length. Thus the iterator needs a pointer to the sequence 215 // to pass to succ/pred. Both stack and queue just encounter 0p since the lists are not circular. 213 216 Sequence(T) * seq; 214 217 }; … … 223 226 ((ColIter &) si){}; 224 227 seq = &s; 225 curr = head( s ); 226 } // post: elts = null. 227 228 curr = &head( s ); 229 } // post: elts = null. 230 231 void ?{}( SeqIter(T) & si, Sequence(T) & s, T & start ) with( si ) { 232 ((ColIter &) si){}; 233 seq = &s; 234 curr = &start; 235 } // post: elts = null. 236 228 237 void over( SeqIter(T) & si, Sequence(T) & s ) with( si ) { 229 238 seq = &s; 230 curr = head( s );239 curr = &head( s ); 231 240 } // post: elts = {e in s}. 232 241 … … 235 244 &tp = Curr( si ); 236 245 T * n = succ( *seq, Curr( si ) ); 237 curr = n == head( *seq ) ? 0p : n;246 curr = n == &head( *seq ) ? 0p : n; 238 247 } else &tp = 0p; 239 248 return &tp != 0p; … … 245 254 struct SeqIterRev { 246 255 inline ColIter; 256 // See above for explanation. 247 257 Sequence(T) * seq; 248 258 }; … … 259 269 curr = &tail( s ); 260 270 } // post: elts = null. 261 271 272 void ?{}( SeqIterRev(T) & si, Sequence(T) & s, T & start ) with( si ) { 273 ((ColIter &) si){}; 274 seq = &s; 275 curr = &start; 276 } // post: elts = null. 277 262 278 void over( SeqIterRev(T) & si, Sequence(T) & s ) with( si ) { 263 279 seq = &s; … … 277 293 278 294 // Local Variables: // 279 // compile-command: " make install" //295 // compile-command: "cfa sequence.hfa" // 280 296 // End: // -
libcfa/src/bits/stack.hfa
rab0257b9 r54f89d5 10 10 inline { 11 11 // wrappers to make Collection have T 12 T *head( Stack(T) & s ) with( s ) {13 return (T *)head( (Collection &)s );12 T & head( Stack(T) & s ) with( s ) { 13 return *(T *)head( (Collection &)s ); 14 14 } // post: empty() & head() == 0 | !empty() & head() in *this 15 15 … … 22 22 23 23 T & top( Stack(T) & s ) with( s ) { 24 return *head( s );24 return head( s ); 25 25 } 26 26 … … 29 29 if ( listed( (Colable &)(n) ) ) abort( "(Stack &)%p.addHead( %p ) : Node is already on another list.", &s, n ); 30 30 #endif // __CFA_DEBUG__ 31 Next( &n ) = head( s ) ?head( s ) : &n;31 Next( &n ) = &head( s ) ? &head( s ) : &n; 32 32 root = &n; 33 33 } … … 42 42 43 43 T & drop( Stack(T) & s ) with( s ) { 44 T & t = *head( s );44 T & t = head( s ); 45 45 if ( root ) { 46 46 root = ( T *)Next(root); 47 if ( head( s ) == &t ) root = 0p; // only one element ?47 if ( &head( s ) == &t ) root = 0p; // only one element ? 48 48 Next( &t ) = 0p; 49 49 } // if … … 70 70 // create an iterator active in Stack s 71 71 void ?{}( StackIter(T) & si, Stack(T) & s ) with( si ) { 72 curr = head( s );72 curr = &head( s ); 73 73 } // post: curr = {e in s} 74 74 … … 79 79 // make existing iterator active in Stack q 80 80 void over( StackIter(T) & si, Stack(T) & s ) with( si ) { 81 curr = head( s );81 curr = &head( s ); 82 82 } // post: curr = {e in s} 83 83 -
libcfa/src/concurrency/alarm.cfa
rab0257b9 r54f89d5 60 60 type = Kernel; 61 61 } 62 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period, Alarm_Callback callback ) with( this ) { 63 this.thrd = thrd; 62 void ?{}( alarm_node_t & this, Alarm_Callback callback, Time alarm, Duration period ) with( this ) { 64 63 this.alarm = alarm; 65 64 this.period = period; -
libcfa/src/concurrency/alarm.hfa
rab0257b9 r54f89d5 52 52 53 53 union { 54 $thread * thrd; // thrd who created event 55 processor * proc; // proc who created event 54 $thread * thrd; // thrd who created event 55 processor * proc; // proc who created event 56 Alarm_Callback callback; // callback to handle event 56 57 }; 57 58 Alarm_Callback callback;59 58 60 59 bool set :1; // whether or not the alarm has be registered … … 65 64 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ); 66 65 void ?{}( alarm_node_t & this, processor * proc, Time alarm, Duration period ); 67 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period, Alarm_Callback callback);66 void ?{}( alarm_node_t & this, Alarm_Callback callback, Time alarm, Duration period ); 68 67 void ^?{}( alarm_node_t & this ); 69 68 -
libcfa/src/concurrency/io.cfa
rab0257b9 r54f89d5 160 160 static inline void process(struct io_uring_cqe & cqe ) { 161 161 struct io_future_t * future = (struct io_future_t *)(uintptr_t)cqe.user_data; 162 __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", future, cqe.res, data->thrd);162 __cfadbg_print_safe( io, "Kernel I/O : Syscall completed : cqe %p, result %d for %p\n", &cqe, cqe.res, future ); 163 163 164 164 fulfil( *future, cqe.res ); … … 298 298 __u32 mask = *ring.submit_q.mask; 299 299 300 disable_interrupts(); 301 __u32 off = __tls_rand(); 302 enable_interrupts( __cfaabi_dbg_ctx ); 300 __u32 off = thread_rand(); 303 301 304 302 // Loop around looking for an available spot … … 344 342 __u32 ready_mask = ring.submit_q.ready_cnt - 1; 345 343 346 disable_interrupts(); 347 __u32 off = __tls_rand(); 348 enable_interrupts( __cfaabi_dbg_ctx ); 344 __u32 off = thread_rand(); 349 345 350 346 __u32 picked; -
libcfa/src/concurrency/io/call.cfa.in
rab0257b9 r54f89d5 84 84 85 85 /* paranoid */ verifyf( cltr->io.ctxs, "default io contexts for cluster %p are missing\\n", cltr); 86 return &cltr->io.ctxs[ __tls_rand() % cltr->io.cnt ];86 return &cltr->io.ctxs[ thread_rand() % cltr->io.cnt ]; 87 87 } 88 88 #endif -
libcfa/src/concurrency/kernel.cfa
rab0257b9 r54f89d5 619 619 lock( kernel_abort_lock __cfaabi_dbg_ctx2 ); 620 620 621 // disable interrupts, it no longer makes sense to try to interrupt this processor 622 disable_interrupts(); 623 621 624 // first task to abort ? 622 625 if ( kernel_abort_called ) { // not first task to abort ? -
libcfa/src/concurrency/kernel/fwd.hfa
rab0257b9 r54f89d5 132 132 } 133 133 134 extern uint64_t thread_rand(); 135 134 136 //----------------------------------------------------------------------- 135 137 // Statics call at the end of each thread to register statistics -
libcfa/src/concurrency/locks.cfa
rab0257b9 r54f89d5 11 11 //// info_thread 12 12 /////////////////////////////////////////////////////////////////// 13 13 14 forall(dtype L | is_blocking_lock(L)) { 14 15 void ?{}( info_thread(L) & this, $thread * t ) { 16 ((Seqable &) this){}; 15 17 this.t = t; 16 18 this.lock = 0p; … … 19 21 20 22 void ?{}( info_thread(L) & this, $thread * t, uintptr_t info ) { 23 ((Seqable &) this){}; 21 24 this.t = t; 22 25 this.info = info; … … 25 28 } 26 29 27 void ^?{}( info_thread(L) & this ){ 28 // default 29 } 30 31 info_thread(L) *& get_next( info_thread(L) & this ) { 32 return this.next; 33 } 34 } 30 void ^?{}( info_thread(L) & this ){ } 31 } 32 35 33 /////////////////////////////////////////////////////////////////// 36 34 //// Blocking Locks … … 47 45 } 48 46 49 void ^?{}( blocking_lock & this ) { 50 // default 51 } 52 53 void ?{}( single_acquisition_lock & this ) { 54 ((blocking_lock &)this){ false, false }; 55 } 56 57 void ^?{}( single_acquisition_lock & this ) { 58 // default 59 } 60 61 void ?{}( owner_lock & this ) { 62 ((blocking_lock &)this){ true, true }; 63 } 64 65 void ^?{}( owner_lock & this ) { 66 // default 67 } 68 69 void ?{}( multiple_acquisition_lock & this ) { 70 ((blocking_lock &)this){ true, false }; 71 } 72 73 void ^?{}( multiple_acquisition_lock & this ) { 74 // default 75 } 47 void ^?{}( blocking_lock & this ) {} 48 void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };} 49 void ^?{}( single_acquisition_lock & this ) {} 50 void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };} 51 void ^?{}( owner_lock & this ) {} 52 void ?{}( multiple_acquisition_lock & this ) {((blocking_lock &)this){ true, false };} 53 void ^?{}( multiple_acquisition_lock & this ) {} 76 54 77 55 void lock( blocking_lock & this ) with( this ) { 78 56 lock( lock __cfaabi_dbg_ctx2 ); 79 57 if ( owner == active_thread() && !multi_acquisition) { 80 fprintf(stderr, "A single acquisition lock holder attempted to reacquire the lock resulting in a deadlock."); // Possibly throw instead 81 exit(EXIT_FAILURE); 58 abort("A single acquisition lock holder attempted to reacquire the lock resulting in a deadlock."); 82 59 } else if ( owner != 0p && owner != active_thread() ) { 83 60 append( blocked_threads, active_thread() ); … … 110 87 } 111 88 89 void unlock_error_check( blocking_lock & this ) with( this ) { 90 if ( owner == 0p ){ // no owner implies lock isn't held 91 abort( "There was an attempt to release a lock that isn't held" ); 92 } else if ( strict_owner && owner != active_thread() ) { 93 abort( "A thread other than the owner attempted to release an owner lock" ); 94 } 95 } 96 97 void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 98 $thread * t = pop_head( blocked_threads ); 99 owner = t; 100 recursion_count = ( t ? 1 : 0 ); 101 wait_count--; 102 unpark( t ); 103 } 104 112 105 void unlock( blocking_lock & this ) with( this ) { 113 106 lock( lock __cfaabi_dbg_ctx2 ); 114 if ( owner == 0p ){ // no owner implies lock isn't held 115 fprintf( stderr, "There was an attempt to release a lock that isn't held" ); 116 return; 117 } else if ( strict_owner && owner != active_thread() ) { 118 fprintf( stderr, "A thread other than the owner attempted to release an owner lock" ); 119 return; 120 } 107 unlock_error_check( this ); 121 108 recursion_count--; 122 109 if ( recursion_count == 0 ) { 123 $thread * thrd = pop_head( blocked_threads ); 124 owner = thrd; 125 recursion_count = ( thrd ? 1 : 0 ); 126 wait_count--; 127 unpark( thrd ); 110 pop_and_set_new_owner( this ); 128 111 } 129 112 unlock( lock ); … … 133 116 return wait_count; 134 117 } 135 136 118 137 119 void set_recursion_count( blocking_lock & this, size_t recursion ) with( this ) { … … 152 134 owner = t; 153 135 recursion_count = 1; 154 #if !defined( __CFA_NO_STATISTICS__ )155 //kernelTLS.this_stats = t->curr_cluster->stats;156 #endif157 136 unpark( t ); 158 137 unlock( lock ); … … 162 141 void remove_( blocking_lock & this ) with( this ) { 163 142 lock( lock __cfaabi_dbg_ctx2 ); 164 if ( owner == 0p ){ // no owner implies lock isn't held 165 fprintf( stderr, "A lock that is not held was passed to a synchronization lock" ); 166 } else if ( strict_owner && owner != active_thread() ) { 167 fprintf( stderr, "A thread other than the owner of a lock passed it to a synchronization lock" ); 168 } else { 169 $thread * thrd = pop_head( blocked_threads ); 170 owner = thrd; 171 recursion_count = ( thrd ? 1 : 0 ); 172 wait_count--; 173 unpark( thrd ); 174 } 143 unlock_error_check( this ); 144 pop_and_set_new_owner( this ); 175 145 unlock( lock ); 176 146 } … … 182 152 // This is temporary until an inheritance bug is fixed 183 153 184 void lock( single_acquisition_lock & this ){ 185 lock( (blocking_lock &)this ); 186 } 187 188 void unlock( single_acquisition_lock & this ){ 189 unlock( (blocking_lock &)this ); 190 } 191 192 void add_( single_acquisition_lock & this, struct $thread * t ){ 193 add_( (blocking_lock &)this, t ); 194 } 195 196 void remove_( single_acquisition_lock & this ){ 197 remove_( (blocking_lock &)this ); 198 } 199 200 void set_recursion_count( single_acquisition_lock & this, size_t recursion ){ 201 set_recursion_count( (blocking_lock &)this, recursion ); 202 } 203 204 size_t get_recursion_count( single_acquisition_lock & this ){ 205 return get_recursion_count( (blocking_lock &)this ); 206 } 207 208 void lock( owner_lock & this ){ 209 lock( (blocking_lock &)this ); 210 } 211 212 void unlock( owner_lock & this ){ 213 unlock( (blocking_lock &)this ); 214 } 215 216 void add_( owner_lock & this, struct $thread * t ){ 217 add_( (blocking_lock &)this, t ); 218 } 219 220 void remove_( owner_lock & this ){ 221 remove_( (blocking_lock &)this ); 222 } 223 224 void set_recursion_count( owner_lock & this, size_t recursion ){ 225 set_recursion_count( (blocking_lock &)this, recursion ); 226 } 227 228 size_t get_recursion_count( owner_lock & this ){ 229 return get_recursion_count( (blocking_lock &)this ); 230 } 231 232 void lock( multiple_acquisition_lock & this ){ 233 lock( (blocking_lock &)this ); 234 } 235 236 void unlock( multiple_acquisition_lock & this ){ 237 unlock( (blocking_lock &)this ); 238 } 239 240 void add_( multiple_acquisition_lock & this, struct $thread * t ){ 241 add_( (blocking_lock &)this, t ); 242 } 243 244 void remove_( multiple_acquisition_lock & this ){ 245 remove_( (blocking_lock &)this ); 246 } 247 248 void set_recursion_count( multiple_acquisition_lock & this, size_t recursion ){ 249 set_recursion_count( (blocking_lock &)this, recursion ); 250 } 251 252 size_t get_recursion_count( multiple_acquisition_lock & this ){ 253 return get_recursion_count( (blocking_lock &)this ); 254 } 154 void lock( single_acquisition_lock & this ){ lock( (blocking_lock &)this ); } 155 void unlock( single_acquisition_lock & this ){ unlock( (blocking_lock &)this ); } 156 void add_( single_acquisition_lock & this, struct $thread * t ){ add_( (blocking_lock &)this, t ); } 157 void remove_( single_acquisition_lock & this ){ remove_( (blocking_lock &)this ); } 158 void set_recursion_count( single_acquisition_lock & this, size_t recursion ){ set_recursion_count( (blocking_lock &)this, recursion ); } 159 size_t get_recursion_count( single_acquisition_lock & this ){ return get_recursion_count( (blocking_lock &)this ); } 160 161 void lock( owner_lock & this ){ lock( (blocking_lock &)this ); } 162 void unlock( owner_lock & this ){ unlock( (blocking_lock &)this ); } 163 void add_( owner_lock & this, struct $thread * t ){ add_( (blocking_lock &)this, t ); } 164 void remove_( owner_lock & this ){ remove_( (blocking_lock &)this ); } 165 void set_recursion_count( owner_lock & this, size_t recursion ){ set_recursion_count( (blocking_lock &)this, recursion ); } 166 size_t get_recursion_count( owner_lock & this ){ return get_recursion_count( (blocking_lock &)this ); } 167 168 void lock( multiple_acquisition_lock & this ){ lock( (blocking_lock &)this ); } 169 void unlock( multiple_acquisition_lock & this ){ unlock( (blocking_lock &)this ); } 170 void add_( multiple_acquisition_lock & this, struct $thread * t ){ add_( (blocking_lock &)this, t ); } 171 void remove_( multiple_acquisition_lock & this ){ remove_( (blocking_lock &)this ); } 172 void set_recursion_count( multiple_acquisition_lock & this, size_t recursion ){ set_recursion_count( (blocking_lock &)this, recursion ); } 173 size_t get_recursion_count( multiple_acquisition_lock & this ){ return get_recursion_count( (blocking_lock &)this ); } 255 174 256 175 /////////////////////////////////////////////////////////////////// … … 263 182 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin. 264 183 lock( cond->lock __cfaabi_dbg_ctx2 ); 265 if ( (*i)->listed ) { // is thread on queue 266 info_thread(L) * copy = *i; 267 remove( cond->blocked_threads, i ); //remove this thread O(1) 184 185 if ( i->listed ) { // is thread on queue 186 cond->last_thread = i; // REMOVE THIS AFTER DEBUG 187 remove( cond->blocked_threads, *i ); //remove this thread O(1) 268 188 cond->count--; 269 if( !copy->lock ) { 270 #if !defined( __CFA_NO_STATISTICS__ ) 271 //kernelTLS.this_stats = copy->t->curr_cluster->stats; 272 #endif 273 unpark( copy->t ); 189 if( !i->lock ) { 190 unpark( i->t ); 274 191 } else { 275 add_(* copy->lock, copy->t); // call lock's add_192 add_(*i->lock, i->t); // call lock's add_ 276 193 } 277 194 } … … 279 196 } 280 197 281 void alarm_node_wrap_cast( alarm_node_t & a ) { 282 timeout_handler( (alarm_node_wrap(L) &)a ); 283 } 198 void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); } 284 199 285 200 void ?{}( condition_variable(L) & this ){ … … 287 202 this.blocked_threads{}; 288 203 this.count = 0; 289 } 290 291 void ^?{}( condition_variable(L) & this ){ 292 // default 293 } 294 295 void ?{}( alarm_node_wrap(L) & this, $thread * thrd, Time alarm, Duration period, Alarm_Callback callback ) { 296 this.alarm_node{ thrd, alarm, period, callback }; 297 } 298 299 void ^?{}( alarm_node_wrap(L) & this ) { 300 // default 204 this.last_thread = 0p; // REMOVE AFTER DEBUG 205 } 206 207 void ^?{}( condition_variable(L) & this ){ } 208 209 void ?{}( alarm_node_wrap(L) & this, Time alarm, Duration period, Alarm_Callback callback ) { 210 this.alarm_node{ callback, alarm, period }; 211 } 212 213 void ^?{}( alarm_node_wrap(L) & this ) { } 214 215 void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) { 216 if(&popped != 0p) { 217 popped.listed = false; 218 count--; 219 if (popped.lock) { 220 add_(*popped.lock, popped.t); 221 } else { 222 unpark(popped.t); 223 } 224 } 301 225 } 302 226 303 227 bool notify_one( condition_variable(L) & this ) with( this ) { 304 228 lock( lock __cfaabi_dbg_ctx2 ); 305 bool ret = !!blocked_threads; 306 info_thread(L) * popped = pop_head( blocked_threads ); 307 if(popped != 0p) { 308 popped->listed = false; 309 count--; 310 if (popped->lock) { 311 add_(*popped->lock, popped->t); 312 } else { 313 unpark(popped->t); 314 } 315 } 229 bool ret = !empty(blocked_threads); 230 process_popped(this, dropHead( blocked_threads )); 316 231 unlock( lock ); 317 232 return ret; … … 320 235 bool notify_all( condition_variable(L) & this ) with(this) { 321 236 lock( lock __cfaabi_dbg_ctx2 ); 322 bool ret = blocked_threads ? true : false; 323 while( blocked_threads ) { 324 info_thread(L) * popped = pop_head( blocked_threads ); 325 if(popped != 0p){ 326 popped->listed = false; 327 count--; 328 if (popped->lock) { 329 add_(*popped->lock, popped->t); 330 } else { 331 unpark(popped->t); 332 } 333 } 237 bool ret = !empty(blocked_threads); 238 while( !empty(blocked_threads) ) { 239 process_popped(this, dropHead( blocked_threads )); 334 240 } 335 241 unlock( lock ); … … 338 244 339 245 uintptr_t front( condition_variable(L) & this ) with(this) { 340 if(!blocked_threads) return NULL; 341 return peek(blocked_threads)->info; 342 } 343 344 bool empty( condition_variable(L) & this ) with(this) { 345 return blocked_threads ? false : true; 346 } 347 348 int counter( condition_variable(L) & this ) with(this) { 349 return count; 350 } 351 352 // helper for wait()'s' without a timeout 246 return empty(blocked_threads) ? NULL : head(blocked_threads).info; 247 } 248 249 bool empty( condition_variable(L) & this ) with(this) { return empty(blocked_threads); } 250 251 int counter( condition_variable(L) & this ) with(this) { return count; } 252 253 size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) { 254 addTail( blocked_threads, *i ); 255 count++; 256 i->listed = true; 257 size_t recursion_count = 0; 258 if (i->lock) { 259 i->t->link.next = 1p; 260 recursion_count = get_recursion_count(*i->lock); 261 remove_( *i->lock ); 262 } 263 return recursion_count; 264 } 265 266 // helper for wait()'s' with no timeout 353 267 void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 354 268 lock( lock __cfaabi_dbg_ctx2 ); 355 append( this.blocked_threads, &i ); 356 count++; 357 i.listed = true; 358 size_t recursion_count; 359 if (i.lock) { 360 recursion_count = get_recursion_count(*i.lock); 361 remove_( *i.lock ); 362 } 363 269 size_t recursion_count = queue_and_get_recursion(this, &i); 364 270 unlock( lock ); 365 271 park( ); // blocks here 366 367 272 if (i.lock) set_recursion_count(*i.lock, recursion_count); // resets recursion count here after waking 368 273 } … … 371 276 void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Time t ) with(this) { 372 277 lock( lock __cfaabi_dbg_ctx2 ); 373 374 info_thread(L) * queue_ptr = &info; 375 376 alarm_node_wrap(L) node_wrap = { info.t, t, 0`s, alarm_node_wrap_cast }; 278 size_t recursion_count = queue_and_get_recursion(this, &info); 279 alarm_node_wrap(L) node_wrap = { t, 0`s, alarm_node_wrap_cast }; 377 280 node_wrap.cond = &this; 378 node_wrap.i = &queue_ptr; 379 281 node_wrap.i = &info; 380 282 register_self( &node_wrap.alarm_node ); 381 382 append( blocked_threads, queue_ptr );383 info.listed = true;384 count++;385 386 size_t recursion_count;387 if (info.lock) {388 recursion_count = get_recursion_count(*info.lock);389 remove_( *info.lock );390 }391 392 283 unlock( lock ); 393 284 park(); 394 285 unregister_self( &node_wrap.alarm_node ); 395 286 if (info.lock) set_recursion_count(*info.lock, recursion_count); 396 287 } … … 462 353 } 463 354 } 464 465 // thread T1 {};466 // thread T2 {};467 468 // multiple_acquisition_lock m;469 // condition_variable( multiple_acquisition_lock ) c;470 471 // void main( T1 & this ) {472 // printf("T1 start\n");473 // lock(m);474 // printf("%d\n", counter(c));475 // if(empty(c)) {476 // printf("T1 wait\n");477 // wait(c,m,12);478 // }else{479 // printf("%d\n", front(c));480 // notify_one(c);481 // }482 // unlock(m);483 // printf("curr thd in main %p \n", active_thread());484 // printf("T1 waits for 2s\n");485 // lock(m);486 // wait( c, m, 2`s );487 // unlock(m);488 // printf("T1 wakes\n");489 // printf("T1 done\n");490 // }491 492 // void main( T2 & this ) {493 // printf("T2 start\n");494 // lock(m);495 // printf("%d\n", counter(c));496 // if(empty(c)) {497 // printf("T2 wait\n");498 // wait(c,m,12);499 // }else{500 // printf("%d\n", front(c));501 // notify_one(c);502 // }503 // unlock(m);504 // printf("T2 done\n");505 // }506 507 // int main() {508 // printf("start\n");509 // processor p[2];510 // {511 // T1 t1;512 // T2 t2;513 // }514 // printf("done\n");515 // } -
libcfa/src/concurrency/locks.hfa
rab0257b9 r54f89d5 5 5 #include "bits/algorithm.hfa" 6 6 #include "bits/locks.hfa" 7 #include "bits/sequence.hfa" 7 8 #include "bits/containers.hfa" 8 9 … … 31 32 forall(dtype L | is_blocking_lock(L)) { 32 33 struct info_thread { 34 inline Seqable; 33 35 struct $thread * t; 34 36 uintptr_t info; 35 info_thread(L) * next;36 37 L * lock; 37 38 bool listed; // true if info_thread is on queue, false otherwise; … … 42 43 void ?{}( info_thread(L) & this, $thread * t, uintptr_t info ); 43 44 void ^?{}( info_thread(L) & this ); 44 45 info_thread(L) *& get_next( info_thread(L) & this );46 45 } 47 46 … … 50 49 /////////////////////////////////////////////////////////////////// 51 50 51 // struct lock_thread { 52 // struct $thread * t; 53 // lock_thread * next; 54 // }; 55 56 // void ?{}( lock_thread & this, struct $thread * thd ); 57 // void ^?{}( lock_thread & this ); 58 59 // lock_thread *& get_next( lock_thread & ); 60 52 61 struct blocking_lock { 53 62 // Spin lock used for mutual exclusion … … 55 64 56 65 // List of blocked threads 57 __queue_t( struct$thread ) blocked_threads;66 __queue_t( $thread ) blocked_threads; 58 67 59 68 // Count of current blocked threads … … 135 144 __spinlock_t lock; 136 145 146 info_thread(L) * last_thread; 147 137 148 // List of blocked threads 138 __queue_t( info_thread(L) ) blocked_threads;149 Sequence( info_thread(L) ) blocked_threads; 139 150 140 151 // Count of current blocked threads … … 150 161 condition_variable(L) * cond; 151 162 152 info_thread(L) * *i;163 info_thread(L) * i; 153 164 }; 154 165 155 void ?{}( alarm_node_wrap(L) & this, $thread * thrd,Time alarm, Duration period, Alarm_Callback callback );166 void ?{}( alarm_node_wrap(L) & this, Time alarm, Duration period, Alarm_Callback callback ); 156 167 void ^?{}( alarm_node_wrap(L) & this ); 157 168 -
libcfa/src/concurrency/thread.cfa
rab0257b9 r54f89d5 162 162 } 163 163 164 uint64_t thread_rand() { 165 disable_interrupts(); 166 uint64_t ret = __tls_rand(); 167 enable_interrupts( __cfaabi_dbg_ctx ); 168 return ret; 169 } 170 164 171 // Local Variables: // 165 172 // mode: c // -
libcfa/src/interpose.cfa
rab0257b9 r54f89d5 220 220 } 221 221 222 static volatile int __abort_stage = 0; 223 222 224 // Cannot forward va_list. 223 225 void __abort( bool signalAbort, const char fmt[], va_list args ) { 224 void * kernel_data = kernel_abort(); // must be done here to lock down kernel 225 int len; 226 227 signal( SIGABRT, SIG_DFL ); // prevent final "real" abort from recursing to handler 228 229 len = snprintf( abort_text, abort_text_size, "Cforall Runtime error (UNIX pid:%ld) ", (long int)getpid() ); // use UNIX pid (versus getPid) 230 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 231 232 assert( fmt ); 233 len = vsnprintf( abort_text, abort_text_size, fmt, args ); 234 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 235 236 if ( fmt[strlen( fmt ) - 1] != '\n' ) { // add optional newline if missing at the end of the format text 237 __cfaabi_bits_write( STDERR_FILENO, "\n", 1 ); 238 } // if 239 kernel_abort_msg( kernel_data, abort_text, abort_text_size ); 240 241 __cfaabi_backtrace( signalAbort ? 4 : 2 ); 242 243 __cabi_libc.abort(); // print stack trace in handler 226 int stage = __atomic_add_fetch( &__abort_stage, 1, __ATOMIC_SEQ_CST ); 227 228 // First stage: stop the cforall kernel and print 229 if(stage == 1) { 230 // increment stage 231 stage = __atomic_add_fetch( &__abort_stage, 1, __ATOMIC_SEQ_CST ); 232 233 // must be done here to lock down kernel 234 void * kernel_data = kernel_abort(); 235 int len; 236 237 signal( SIGABRT, SIG_DFL ); // prevent final "real" abort from recursing to handler 238 239 len = snprintf( abort_text, abort_text_size, "Cforall Runtime error (UNIX pid:%ld) ", (long int)getpid() ); // use UNIX pid (versus getPid) 240 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 241 242 assert( fmt ); 243 len = vsnprintf( abort_text, abort_text_size, fmt, args ); 244 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 245 246 // add optional newline if missing at the end of the format text 247 if ( fmt[strlen( fmt ) - 1] != '\n' ) { 248 __cfaabi_bits_write( STDERR_FILENO, "\n", 1 ); 249 } // if 250 kernel_abort_msg( kernel_data, abort_text, abort_text_size ); 251 } 252 253 // Second stage: print the backtrace 254 if(stage == 2) { 255 // increment stage 256 stage = __atomic_add_fetch( &__abort_stage, 1, __ATOMIC_SEQ_CST ); 257 258 // print stack trace in handler 259 __cfaabi_backtrace( signalAbort ? 4 : 2 ); 260 } 261 262 do { 263 // Finally call abort 264 __cabi_libc.abort(); 265 266 // Loop so that we never return 267 } while(true); 244 268 } 245 269
Note:
See TracChangeset
for help on using the changeset viewer.