Changeset 73a3be5
- Timestamp:
- Dec 11, 2020, 11:26:18 AM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 4af1021
- Parents:
- 98168b9 (diff), e4b6b7d3 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
benchmark/io/http/main.cfa
r98168b9 r73a3be5 75 75 address.sin_port = htons( options.socket.port ); 76 76 77 ret = bind( server_fd, (struct sockaddr *)&address, sizeof(address) ); 78 if(ret < 0) { 79 abort( "bind error: (%d) %s\n", (int)errno, strerror(errno) ); 77 int waited = 0; 78 for() { 79 ret = bind( server_fd, (struct sockaddr *)&address, sizeof(address) ); 80 if(ret < 0) { 81 if(errno == 98) { 82 if(waited == 0) { 83 printf("Waiting for port\n"); 84 } else { 85 printf("\r%d", waited); 86 fflush(stdout); 87 } 88 waited ++; 89 sleep( 1`s ); 90 continue; 91 } 92 abort( "bind error: (%d) %s\n", (int)errno, strerror(errno) ); 93 } 94 break; 80 95 } 81 96 -
libcfa/src/concurrency/coroutine.cfa
r98168b9 r73a3be5 28 28 #include "kernel_private.hfa" 29 29 #include "exception.hfa" 30 #include "math.hfa" 30 31 31 32 #define __CFA_INVOKE_PRIVATE__ … … 87 88 88 89 void __stack_prepare( __stack_info_t * this, size_t create_size ); 90 void __stack_clean ( __stack_info_t * this ); 89 91 90 92 //----------------------------------------------------------------------------- … … 107 109 bool userStack = ((intptr_t)this.storage & 0x1) != 0; 108 110 if ( ! userStack && this.storage ) { 109 __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage; 110 *istorage &= (intptr_t)-1; 111 112 void * storage = this.storage->limit; 113 __cfaabi_dbg_debug_do( 114 storage = (char*)(storage) - __page_size; 115 if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) { 116 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 117 } 118 ); 119 __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage); 120 free( storage ); 111 __stack_clean( &this ); 112 // __attribute__((may_alias)) intptr_t * istorage = (intptr_t *)&this.storage; 113 // *istorage &= (intptr_t)-1; 114 115 // void * storage = this.storage->limit; 116 // __cfaabi_dbg_debug_do( 117 // storage = (char*)(storage) - __page_size; 118 // if ( mprotect( storage, __page_size, PROT_READ | PROT_WRITE ) == -1 ) { 119 // abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 120 // } 121 // ); 122 // __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage); 123 // free( storage ); 121 124 } 122 125 } … … 167 170 assert(__page_size != 0l); 168 171 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; 172 size = ceiling(size, __page_size); 169 173 170 174 // If we are running debug, we also need to allocate a guardpage to catch stack overflows. 171 175 void * storage; 172 __cfaabi_dbg_debug_do( 173 storage = memalign( __page_size, size + __page_size ); 174 ); 175 __cfaabi_dbg_no_debug_do( 176 storage = (void*)malloc(size); 177 ); 178 179 __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size); 180 __cfaabi_dbg_debug_do( 181 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 182 abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 183 } 184 storage = (void *)(((intptr_t)storage) + __page_size); 185 ); 176 // __cfaabi_dbg_debug_do( 177 // storage = memalign( __page_size, size + __page_size ); 178 // ); 179 // __cfaabi_dbg_no_debug_do( 180 // storage = (void*)malloc(size); 181 // ); 182 183 // __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size); 184 // __cfaabi_dbg_debug_do( 185 // if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 186 // abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 187 // } 188 // storage = (void *)(((intptr_t)storage) + __page_size); 189 // ); 190 storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 191 if(storage == ((void*)-1)) { 192 abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); 193 } 194 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) { 195 abort( "coroutine stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 196 } // if 197 storage = (void *)(((intptr_t)storage) + __page_size); 186 198 187 199 verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul ); 188 200 return [storage, size]; 201 } 202 203 void __stack_clean ( __stack_info_t * this ) { 204 size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t); 205 void * storage = this->storage->limit; 206 207 storage = (void *)(((intptr_t)storage) - __page_size); 208 if(munmap(storage, size + __page_size) == -1) { 209 abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) ); 210 } 189 211 } 190 212 … … 210 232 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize ); 211 233 212 this->storage = (__stack_t *)((intptr_t)storage + size );234 this->storage = (__stack_t *)((intptr_t)storage + size - sizeof(__stack_t)); 213 235 this->storage->limit = storage; 214 this->storage->base = (void*)((intptr_t)storage + size );236 this->storage->base = (void*)((intptr_t)storage + size - sizeof(__stack_t)); 215 237 this->storage->exception_context.top_resume = 0p; 216 238 this->storage->exception_context.current_exception = 0p; -
libcfa/src/concurrency/coroutine.hfa
r98168b9 r73a3be5 102 102 } 103 103 104 extern void __stack_prepare ( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 104 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 105 extern void __stack_clean ( __stack_info_t * this ); 106 105 107 106 108 // Suspend implementation inlined for performance -
libcfa/src/concurrency/io/setup.cfa
r98168b9 r73a3be5 17 17 #define _GNU_SOURCE /* See feature_test_macros(7) */ 18 18 19 #if defined(__CFA_DEBUG__) 20 // #define __CFA_DEBUG_PRINT_IO__ 21 // #define __CFA_DEBUG_PRINT_IO_CORE__ 22 #endif 23 19 24 #include "io/types.hfa" 20 25 #include "kernel.hfa" … … 111 116 112 117 void __kernel_io_startup(void) { 113 __cfa abi_dbg_print_safe("Kernel : Creating EPOLL instance\n" );118 __cfadbg_print_safe(io_core, "Kernel : Creating EPOLL instance\n" ); 114 119 115 120 iopoll.epollfd = epoll_create1(0); … … 118 123 } 119 124 120 __cfa abi_dbg_print_safe("Kernel : Starting io poller thread\n" );125 __cfadbg_print_safe(io_core, "Kernel : Starting io poller thread\n" ); 121 126 122 127 iopoll.run = true; … … 132 137 // Wait for the io poller thread to finish 133 138 134 pthread_join( iopoll.thrd, 0p ); 135 free( iopoll.stack ); 139 __destroy_pthread( iopoll.thrd, iopoll.stack, 0p ); 136 140 137 141 int ret = close(iopoll.epollfd); … … 142 146 // Io polling is now fully stopped 143 147 144 __cfa abi_dbg_print_safe("Kernel : IO poller stopped\n" );148 __cfadbg_print_safe(io_core, "Kernel : IO poller stopped\n" ); 145 149 } 146 150 … … 150 154 id.id = doregister(&id); 151 155 __cfaabi_tls.this_proc_id = &id; 152 __cfa abi_dbg_print_safe("Kernel : IO poller thread starting\n" );156 __cfadbg_print_safe(io_core, "Kernel : IO poller thread starting\n" ); 153 157 154 158 // Block signals to control when they arrive … … 185 189 } 186 190 187 __cfa abi_dbg_print_safe("Kernel : IO poller thread stopping\n" );191 __cfadbg_print_safe(io_core, "Kernel : IO poller thread stopping\n" ); 188 192 unregister(&id); 189 193 return 0p; -
libcfa/src/concurrency/kernel/startup.cfa
r98168b9 r73a3be5 29 29 #include "kernel_private.hfa" 30 30 #include "startup.hfa" // STARTUP_PRIORITY_XXX 31 #include "math.hfa" 31 32 32 33 //----------------------------------------------------------------------------- … … 539 540 } 540 541 542 extern size_t __page_size; 541 543 void ^?{}(processor & this) with( this ){ 542 544 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { … … 550 552 } 551 553 552 int err = pthread_join( kernel_thread, 0p ); 553 if( err != 0 ) abort("KERNEL ERROR: joining processor %p caused error %s\n", &this, strerror(err)); 554 555 free( this.stack ); 554 __destroy_pthread( kernel_thread, this.stack, 0p ); 556 555 557 556 disable_interrupts(); … … 678 677 679 678 void * stack; 680 __cfaabi_dbg_debug_do( 681 stack = memalign( __page_size, stacksize + __page_size ); 682 // pthread has no mechanism to create the guard page in user supplied stack. 683 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 684 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 685 } // if 686 ); 687 __cfaabi_dbg_no_debug_do( 688 stack = malloc( stacksize ); 689 ); 679 #warning due to the thunk problem, stack creation uses mmap, revert to malloc once this goes away 680 // __cfaabi_dbg_debug_do( 681 // stack = memalign( __page_size, stacksize + __page_size ); 682 // // pthread has no mechanism to create the guard page in user supplied stack. 683 // if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 684 // abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 685 // } // if 686 // ); 687 // __cfaabi_dbg_no_debug_do( 688 // stack = malloc( stacksize ); 689 // ); 690 stacksize = ceiling( stacksize, __page_size ) + __page_size; 691 stack = mmap(0p, stacksize, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 692 if(stack == ((void*)-1)) { 693 abort( "pthread stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); 694 } 695 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 696 abort( "pthread stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 697 } // if 690 698 691 699 check( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); … … 694 702 return stack; 695 703 } 704 705 void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ) { 706 int err = pthread_join( pthread, retval ); 707 if( err != 0 ) abort("KERNEL ERROR: joining pthread %p caused error %s\n", (void*)pthread, strerror(err)); 708 709 pthread_attr_t attr; 710 711 check( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 712 713 size_t stacksize; 714 // default stack size, normally defined by shell limit 715 check( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); 716 assert( stacksize >= PTHREAD_STACK_MIN ); 717 stacksize += __page_size; 718 719 if(munmap(stack, stacksize) == -1) { 720 abort( "pthread stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) ); 721 } 722 } 723 696 724 697 725 #if defined(__CFA_WITH_VERIFY__) -
libcfa/src/concurrency/kernel_private.hfa
r98168b9 r73a3be5 49 49 50 50 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 51 void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ); 51 52 52 53 -
libcfa/src/concurrency/preemption.cfa
r98168b9 r73a3be5 575 575 // Wait for the preemption thread to finish 576 576 577 pthread_join( alarm_thread, 0p ); 578 free( alarm_stack ); 577 __destroy_pthread( alarm_thread, alarm_stack, 0p ); 579 578 580 579 // Preemption is now fully stopped -
libcfa/src/heap.cfa
r98168b9 r73a3be5 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Sep 7 22:17:46202013 // Update Count : 9 5712 // Last Modified On : Fri Dec 11 07:36:34 2020 13 // Update Count : 970 14 14 // 15 15 … … 464 464 } // headers 465 465 466 #ifdef __CFA_DEBUG__ 467 #if __SIZEOF_POINTER__ == 4 468 #define MASK 0xdeadbeef 469 #else 470 #define MASK 0xdeadbeefdeadbeef 471 #endif 472 #define STRIDE size_t 473 474 static void * Memset( void * addr, STRIDE size ) { // debug only 475 if ( size % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, size %zd not multiple of %zd.", size, sizeof(STRIDE) ); 476 if ( (STRIDE)addr % sizeof(STRIDE) != 0 ) abort( "Memset() : internal error, addr %p not multiple of %zd.", addr, sizeof(STRIDE) ); 477 478 STRIDE * end = (STRIDE *)addr + size / sizeof(STRIDE); 479 for ( STRIDE * p = (STRIDE *)addr; p < end; p += 1 ) *p = MASK; 480 return addr; 481 } // Memset 482 #endif // __CFA_DEBUG__ 483 466 484 #define NO_MEMORY_MSG "insufficient heap memory available for allocating %zd new bytes." 467 485 … … 483 501 #ifdef __CFA_DEBUG__ 484 502 // Set new memory to garbage so subsequent uninitialized usages might fail. 485 memset( (char *)heapEnd + heapRemaining, '\377', increase ); 503 //memset( (char *)heapEnd + heapRemaining, '\377', increase ); 504 Memset( (char *)heapEnd + heapRemaining, increase ); 486 505 #endif // __CFA_DEBUG__ 487 506 rem = heapRemaining + increase - size; … … 557 576 #ifdef __CFA_DEBUG__ 558 577 // Set new memory to garbage so subsequent uninitialized usages might fail. 559 memset( block, '\377', tsize ); 578 //memset( block, '\377', tsize ); 579 Memset( block, tsize ); 560 580 #endif // __CFA_DEBUG__ 561 581 block->header.kind.real.blockSize = tsize; // storage size for munmap … … 606 626 #ifdef __CFA_DEBUG__ 607 627 // Set free memory to garbage so subsequent usages might fail. 608 memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) ); 628 //memset( ((HeapManager.Storage *)header)->data, '\377', freeElem->blockSize - sizeof( HeapManager.Storage ) ); 629 Memset( ((HeapManager.Storage *)header)->data, freeElem->blockSize - sizeof( HeapManager.Storage ) ); 609 630 #endif // __CFA_DEBUG__ 610 631 … … 935 956 header->kind.real.size = size; // reset allocation size 936 957 if ( unlikely( ozfill ) && size > osize ) { // previous request zero fill and larger ? 937 memset( (char *)oaddr + osize, (int)'\0', size - osize ); // initialize added storage958 memset( (char *)oaddr + osize, '\0', size - osize ); // initialize added storage 938 959 } // if 939 960 return oaddr; … … 960 981 header->kind.real.blockSize |= 2; // mark new request as zero filled 961 982 if ( size > osize ) { // previous request larger ? 962 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage983 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage 963 984 } // if 964 985 } // if … … 1327 1348 header->kind.real.blockSize |= 2; // mark new request as zero filled 1328 1349 if ( size > osize ) { // previous request larger ? 1329 memset( (char *)naddr + osize, (int)'\0', size - osize ); // initialize added storage1350 memset( (char *)naddr + osize, '\0', size - osize ); // initialize added storage 1330 1351 } // if 1331 1352 } // if -
src/AST/Convert.cpp
r98168b9 r73a3be5 233 233 const ast::Decl * namedTypePostamble( NamedTypeDecl * decl, const ast::NamedTypeDecl * node ) { 234 234 // base comes from constructor 235 decl->parameters = get<TypeDecl>().acceptL( node->params );236 235 decl->assertions = get<DeclarationWithType>().acceptL( node->assertions ); 237 236 declPostamble( decl, node ); … … 1704 1703 cache.emplace( old, decl ); 1705 1704 decl->assertions = GET_ACCEPT_V(assertions, DeclWithType); 1706 decl->params = GET_ACCEPT_V(parameters, TypeDecl);1707 1705 decl->extension = old->extension; 1708 1706 decl->uniqueId = old->uniqueId; … … 1720 1718 ); 1721 1719 decl->assertions = GET_ACCEPT_V(assertions, DeclWithType); 1722 decl->params = GET_ACCEPT_V(parameters, TypeDecl);1723 1720 decl->extension = old->extension; 1724 1721 decl->uniqueId = old->uniqueId; -
src/AST/Decl.hpp
r98168b9 r73a3be5 154 154 public: 155 155 ptr<Type> base; 156 std::vector<ptr<TypeDecl>> params;157 156 std::vector<ptr<DeclWithType>> assertions; 158 157 … … 160 159 const CodeLocation & loc, const std::string & name, Storage::Classes storage, 161 160 const Type * b, Linkage::Spec spec = Linkage::Cforall ) 162 : Decl( loc, name, storage, spec ), base( b ), params(),assertions() {}161 : Decl( loc, name, storage, spec ), base( b ), assertions() {} 163 162 164 163 /// Produces a name for the kind of alias -
src/AST/Pass.impl.hpp
r98168b9 r73a3be5 609 609 VISIT({ 610 610 guard_symtab guard { *this }; 611 maybe_accept( node, &TypeDecl::params );612 611 maybe_accept( node, &TypeDecl::base ); 613 612 }) … … 638 637 VISIT({ 639 638 guard_symtab guard { *this }; 640 maybe_accept( node, &TypedefDecl::params );641 639 maybe_accept( node, &TypedefDecl::base ); 642 640 }) -
src/AST/Print.cpp
r98168b9 r73a3be5 221 221 ++indent; 222 222 node->base->accept( *this ); 223 --indent;224 }225 226 if ( ! node->params.empty() ) {227 os << endl << indent << "... with parameters" << endl;228 ++indent;229 printAll( node->params );230 223 --indent; 231 224 } -
src/Common/PassVisitor.impl.h
r98168b9 r73a3be5 835 835 { 836 836 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 837 maybeAccept_impl( node->parameters, *this );838 837 maybeAccept_impl( node->base , *this ); 839 838 } … … 858 857 { 859 858 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 860 maybeAccept_impl( node->parameters, *this );861 859 maybeAccept_impl( node->base , *this ); 862 860 } … … 880 878 { 881 879 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 882 maybeMutate_impl( node->parameters, *this );883 880 maybeMutate_impl( node->base , *this ); 884 881 } … … 904 901 { 905 902 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 906 maybeAccept_impl( node->parameters, *this );907 903 maybeAccept_impl( node->base , *this ); 908 904 } … … 921 917 { 922 918 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 923 maybeAccept_impl( node->parameters, *this );924 919 maybeAccept_impl( node->base , *this ); 925 920 } … … 938 933 { 939 934 auto guard = makeFuncGuard( [this]() { indexerScopeEnter(); }, [this]() { indexerScopeLeave(); } ); 940 maybeMutate_impl( node->parameters, *this );941 935 maybeMutate_impl( node->base , *this ); 942 936 } -
src/Parser/TypeData.cc
r98168b9 r73a3be5 900 900 ret = new TypeDecl( name, scs, typebuild( td->base ), TypeDecl::Dtype, true ); 901 901 } // if 902 buildList( td->symbolic.params, ret->get_parameters() );903 902 buildList( td->symbolic.assertions, ret->get_assertions() ); 904 903 ret->base->attributes.splice( ret->base->attributes.end(), attributes ); -
src/SynTree/Declaration.h
r98168b9 r73a3be5 181 181 public: 182 182 Type * base; 183 std::list< TypeDecl * > parameters;184 183 std::list< DeclarationWithType * > assertions; 185 184 … … 190 189 Type * get_base() const { return base; } 191 190 void set_base( Type * newValue ) { base = newValue; } 192 std::list< TypeDecl* > & get_parameters() { return parameters; }193 191 std::list< DeclarationWithType * >& get_assertions() { return assertions; } 194 192 -
src/SynTree/NamedTypeDecl.cc
r98168b9 r73a3be5 29 29 NamedTypeDecl::NamedTypeDecl( const NamedTypeDecl &other ) 30 30 : Parent( other ), base( maybeClone( other.base ) ) { 31 cloneAll( other.parameters, parameters );32 31 cloneAll( other.assertions, assertions ); 33 32 } … … 35 34 NamedTypeDecl::~NamedTypeDecl() { 36 35 delete base; 37 deleteAll( parameters );38 36 deleteAll( assertions ); 39 37 } … … 56 54 base->print( os, indent+1 ); 57 55 } // if 58 if ( ! parameters.empty() ) {59 os << endl << indent << "... with parameters" << endl;60 printAll( parameters, os, indent+1 );61 } // if62 56 if ( ! assertions.empty() ) { 63 57 os << endl << indent << "... with assertions" << endl; … … 76 70 base->print( os, indent+1 ); 77 71 } // if 78 if ( ! parameters.empty() ) {79 os << endl << indent << "... with parameters" << endl;80 printAll( parameters, os, indent+1 );81 } // if82 72 } 83 73
Note: See TracChangeset
for help on using the changeset viewer.