- Timestamp:
- May 15, 2018, 4:17:15 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 2e5fa345
- Parents:
- 7d0a3ba (diff), a61fa0bb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src
- Files:
-
- 2 deleted
- 27 edited
Legend:
- Unmodified
- Added
- Removed
-
src/CodeGen/CodeGenerator.cc
r7d0a3ba r358cba0 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S un Sep 3 20:42:52 201713 // Update Count : 49 012 // Last Modified On : Sat May 5 09:08:32 2018 13 // Update Count : 494 14 14 // 15 15 #include "CodeGenerator.h" … … 370 370 371 371 void CodeGenerator::postvisit( Constant * constant ) { 372 output << constant->get_value() 372 output << constant->get_value(); 373 373 } 374 374 … … 587 587 output << "("; 588 588 if ( castExpr->get_result()->isVoid() ) { 589 output << "(void)" 589 output << "(void)"; 590 590 } else { 591 591 // at least one result type of cast. … … 878 878 } // for 879 879 } // if 880 output << " );" 880 output << " );"; 881 881 } 882 882 … … 886 886 output << "( "; 887 887 if ( asmStmt->get_instruction() ) asmStmt->get_instruction()->accept( *visitor ); 888 output << " )" 888 output << " )"; 889 889 } 890 890 891 891 void CodeGenerator::postvisit( DirectiveStmt * dirStmt ) { 892 output << dirStmt->directive;892 output << endl << dirStmt->directive; // endl prevents spaces before directive 893 893 } 894 894 … … 907 907 908 908 void CodeGenerator::postvisit( SwitchStmt * switchStmt ) { 909 output << "switch ( " 909 output << "switch ( "; 910 910 switchStmt->get_condition()->accept( *visitor ); 911 911 output << " ) "; … … 933 933 ++indent; 934 934 for ( std::list<Statement *>::iterator i = sts.begin(); i != sts.end(); i++) { 935 output << indent << printLabels( (*i)->get_labels() ) 935 output << indent << printLabels( (*i)->get_labels() ) ; 936 936 (*i)->accept( *visitor ); 937 937 output << endl; … … 1070 1070 void CodeGenerator::postvisit( WhileStmt * whileStmt ) { 1071 1071 if ( whileStmt->get_isDoWhile() ) { 1072 output << "do" 1073 } else { 1074 output << "while (" 1072 output << "do"; 1073 } else { 1074 output << "while ("; 1075 1075 whileStmt->get_condition()->accept( *visitor ); 1076 1076 output << ")"; … … 1084 1084 1085 1085 if ( whileStmt->get_isDoWhile() ) { 1086 output << " while (" 1086 output << " while ("; 1087 1087 whileStmt->get_condition()->accept( *visitor ); 1088 1088 output << ");"; -
src/CodeGen/FixNames.cc
r7d0a3ba r358cba0 56 56 auto && name = SymTab::Mangler::mangle( mainDecl.get() ); 57 57 // std::cerr << name << std::endl; 58 return name;58 return std::move(name); 59 59 } 60 60 std::string mangle_main_args() { … … 79 79 auto&& name = SymTab::Mangler::mangle( mainDecl.get() ); 80 80 // std::cerr << name << std::endl; 81 return name;81 return std::move(name); 82 82 } 83 83 -
src/Common/Heap.cc
r7d0a3ba r358cba0 7 7 // Heap.cc -- 8 8 // 9 // Author : Peter A. Buhr10 // Created On : 9 // Author : Thierry Delisle 10 // Created On : Thu May 3 16:16:10 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu May 3 16:16:10201813 // Update Count : 2 12 // Last Modified On : Fri May 4 17:27:31 2018 13 // Update Count : 28 14 14 // 15 15 16 namespace HeapStats{ 17 void newPass( const char * const name ) {} 18 void printStats() {} 16 #include <cassert> 17 #include <cmath> 18 #include <cstddef> 19 #include <cstring> 20 #include <iomanip> 21 #include <iostream> 22 23 //#define WITH_HEAP_STATISTICS 24 25 namespace HeapStats { 26 #if !defined( WITH_HEAP_STATISTICS ) 27 void newPass( const char * const ) {} 28 29 void printStats() {} 30 #else 31 struct StatBlock { 32 const char * name = nullptr; 33 size_t mallocs = 0; 34 size_t frees = 0; 35 }; 36 37 StatBlock passes[100] = {{ "Pre-Parse", 0, 0 }}; 38 const size_t passes_size = sizeof(passes) / sizeof(passes[0]); 39 size_t passes_cnt = 1; 40 41 void newPass( const char * const name ) { 42 passes[passes_cnt].name = name; 43 passes[passes_cnt].mallocs = 0; 44 passes[passes_cnt].frees = 0; 45 passes_cnt++; 46 47 assertf(passes_cnt < passes_size, "Too many passes for HeapStats, increase the size of the array in Heap.h"); 48 } 49 50 void print(size_t value, size_t total) { 51 std::cerr << std::setw(12) << value; 52 std::cerr << "(" << std::setw(3); 53 std::cerr << (value == 0 ? 0 : value * 100 / total); 54 std::cerr << "%) | "; 55 } 56 57 void print(const StatBlock& stat, size_t nc, size_t total_mallocs, size_t total_frees) { 58 std::cerr << std::setw(nc) << stat.name; 59 std::cerr << " | "; 60 61 print(stat.mallocs, total_mallocs); 62 print(stat.frees , total_frees ); 63 std::cerr << "\n"; 64 } 65 66 void print(char c, size_t nc) { 67 for(size_t i = 0; i < nc; i++) { 68 std::cerr << c; 69 } 70 std::cerr << '\n'; 71 } 72 73 void printStats() { 74 size_t nc = 0; 75 size_t total_mallocs = 0; 76 size_t total_frees = 0; 77 for(size_t i = 0; i < passes_cnt; i++) { 78 nc = std::max(nc, std::strlen(passes[i].name)); 79 total_mallocs += passes[i].mallocs; 80 total_frees += passes[i].frees; 81 } 82 size_t nct = nc + 44; 83 84 const char * const title = "Heap Usage Statistic"; 85 print('=', nct); 86 for(size_t i = 0; i < (nct - std::strlen(title)) / 2; i++) std::cerr << ' '; 87 std::cerr << title << std::endl; 88 print('-', nct); 89 std::cerr << std::setw(nc) << "Pass"; 90 std::cerr << " | Malloc Count | Free Count |" << std::endl; 91 92 print('-', nct); 93 for(size_t i = 0; i < passes_cnt; i++) { 94 print(passes[i], nc, total_mallocs, total_frees); 95 } 96 print('-', nct); 97 print({"Sum", total_mallocs, total_frees}, nc, total_mallocs, total_frees); 98 99 } 100 101 #include <stdarg.h> 102 #include <stddef.h> 103 #include <stdio.h> 104 #include <string.h> 105 #include <unistd.h> 106 #include <signal.h> 107 extern "C" { 108 #include <dlfcn.h> 109 #include <execinfo.h> 110 } 111 112 //============================================================================================= 113 // Interposing helpers 114 //============================================================================================= 115 116 typedef void (* generic_fptr_t)(void); 117 generic_fptr_t interpose_symbol( const char * symbol, const char * version ) { 118 const char * error; 119 120 static void * library; 121 if ( ! library ) { 122 #if defined( RTLD_NEXT ) 123 library = RTLD_NEXT; 124 #else 125 // missing RTLD_NEXT => must hard-code library name, assuming libstdc++ 126 library = dlopen( "libc.so.6", RTLD_LAZY ); 127 error = dlerror(); 128 if ( error ) { 129 std::cerr << "interpose_symbol : failed to open libc, " << error << std::endl; 130 abort(); 131 } 132 #endif // RTLD_NEXT 133 } // if 134 135 generic_fptr_t fptr; 136 137 #if defined( _GNU_SOURCE ) 138 if ( version ) { 139 fptr = (generic_fptr_t)dlvsym( library, symbol, version ); 140 } else { 141 fptr = (generic_fptr_t)dlsym( library, symbol ); 142 } 143 #else 144 fptr = (generic_fptr_t)dlsym( library, symbol ); 145 #endif // _GNU_SOURCE 146 147 error = dlerror(); 148 if ( error ) { 149 std::cerr << "interpose_symbol : internal error, " << error << std::endl; 150 abort(); 151 } 152 153 return fptr; 154 } 155 156 extern "C" { 157 void * malloc( size_t size ) __attribute__((malloc)); 158 void * malloc( size_t size ) { 159 static auto __malloc = reinterpret_cast<void * (*)(size_t)>(interpose_symbol( "malloc", nullptr )); 160 if( passes_cnt > 0 ) passes[passes_cnt - 1].mallocs++; 161 return __malloc( size ); 162 } 163 164 void free( void * ptr ) { 165 static auto __free = reinterpret_cast<void (*)(void *)>(interpose_symbol( "free", nullptr )); 166 if( passes_cnt > 0 ) passes[passes_cnt - 1].frees++; 167 return __free( ptr ); 168 } 169 170 void * calloc( size_t nelem, size_t size ) { 171 static auto __calloc = reinterpret_cast<void * (*)(size_t, size_t)>(interpose_symbol( "calloc", nullptr )); 172 if( passes_cnt > 0 ) passes[passes_cnt - 1].mallocs++; 173 return __calloc( nelem, size ); 174 } 175 176 void * realloc( void * ptr, size_t size ) { 177 static auto __realloc = reinterpret_cast<void * (*)(void *, size_t)>(interpose_symbol( "realloc", nullptr )); 178 void * s = __realloc( ptr, size ); 179 if ( s != ptr && passes_cnt > 0 ) { // did realloc get new storage ? 180 passes[passes_cnt - 1].mallocs++; 181 passes[passes_cnt - 1].frees++; 182 } // if 183 return s; 184 } 185 } 186 #endif 19 187 } 20 -
src/Common/Heap.h
r7d0a3ba r358cba0 7 7 // Heap.h -- 8 8 // 9 // Author : Peter A. Buhr10 // Created On : 9 // Author : Thierry Delisle 10 // Created On : Thu May 3 16:16:10 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu May 3 16:16:10201813 // Update Count : 212 // Last Modified On : Fri May 4 14:34:08 2018 13 // Update Count : 3 14 14 // 15 15 … … 17 17 18 18 namespace HeapStats { 19 20 19 void newPass( const char * const name ); 20 void printStats(); 21 21 } -
src/Common/PassVisitor.proto.h
r7d0a3ba r358cba0 47 47 48 48 operator bool() { return m_ref ? *m_ref : true; } 49 bool operator=( bool val ) { return *m_ref = val; }49 bool operator=( bool val ) { assert(m_ref); return *m_ref = val; } 50 50 51 51 private: … … 53 53 friend class ChildrenGuard; 54 54 55 bool * set( bool &val ) {55 bool * set( bool * val ) { 56 56 bool * prev = m_ref; 57 m_ref = &val;57 m_ref = val; 58 58 return prev; 59 59 } … … 67 67 ChildrenGuard( bool_ref * ref ) 68 68 : m_val ( true ) 69 , m_prev( ref ? ref->set( m_val ) : nullptr )69 , m_prev( ref ? ref->set( &m_val ) : nullptr ) 70 70 , m_ref ( ref ) 71 71 {} … … 73 73 ~ChildrenGuard() { 74 74 if( m_ref ) { 75 m_ref->set( *m_prev );75 m_ref->set( m_prev ); 76 76 } 77 77 } -
src/Common/utility.h
r7d0a3ba r358cba0 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu May 3 12:18:31201813 // Update Count : 3912 // Last Modified On : Sun May 6 22:24:16 2018 13 // Update Count : 40 14 14 // 15 15 … … 442 442 template<typename T> 443 443 inline 444 #if __GNUC__ > 4444 #if defined(__GNUC__) && __GNUC__ > 4 445 445 constexpr 446 446 #endif -
src/Parser/parser.yy
r7d0a3ba r358cba0 10 10 // Created On : Sat Sep 1 20:22:55 2001 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu May 3 08:20:09201813 // Update Count : 32 2512 // Last Modified On : Fri May 11 17:51:38 2018 13 // Update Count : 3261 14 14 // 15 15 … … 302 302 303 303 %type<decl> field_declaration field_declaration_list field_declarator field_declaring_list 304 %type<en> field field_list field_name fraction_constants 304 %type<en> field field_list field_name fraction_constants_opt 305 305 306 306 %type<decl> external_function_definition function_definition function_array function_declarator function_no_ptr function_ptr … … 499 499 | type_name '.' no_attr_identifier // CFA, nested type 500 500 { SemanticError( yylloc, "Qualified names are currently unimplemented." ); $$ = nullptr; } 501 // { $$ = nullptr; } 501 502 | type_name '.' '[' push field_list pop ']' // CFA, nested type / tuple field selector 502 503 { SemanticError( yylloc, "Qualified names are currently unimplemented." ); $$ = nullptr; } 504 // { $$ = nullptr; } 503 505 | GENERIC '(' assignment_expression ',' generic_assoc_list ')' // C11 504 506 { SemanticError( yylloc, "_Generic is currently unimplemented." ); $$ = nullptr; } … … 533 535 | postfix_expression '.' no_attr_identifier 534 536 { $$ = new ExpressionNode( build_fieldSel( $1, build_varref( $3 ) ) ); } 537 | postfix_expression '.' INTEGERconstant // CFA, tuple index 538 { $$ = new ExpressionNode( build_fieldSel( $1, build_constantInteger( *$3 ) ) ); } 539 | postfix_expression FLOATING_FRACTIONconstant // CFA, tuple index 540 { $$ = new ExpressionNode( build_fieldSel( $1, build_field_name_FLOATING_FRACTIONconstant( *$2 ) ) ); } 535 541 | postfix_expression '.' '[' push field_list pop ']' // CFA, tuple field selector 536 542 { $$ = new ExpressionNode( build_fieldSel( $1, build_tuple( $5 ) ) ); } 537 | postfix_expression FLOATING_FRACTIONconstant // CFA, tuple index538 { $$ = new ExpressionNode( build_fieldSel( $1, build_field_name_FLOATING_FRACTIONconstant( *$2 ) ) ); }539 543 | postfix_expression ARROW no_attr_identifier 540 544 { 541 545 $$ = new ExpressionNode( build_pfieldSel( $1, *$3 == "0" || *$3 == "1" ? build_constantInteger( *$3 ) : build_varref( $3 ) ) ); 542 546 } 543 | postfix_expression ARROW '[' push field_list pop ']' // CFA, tuple field selector544 { $$ = new ExpressionNode( build_pfieldSel( $1, build_tuple( $5 ) ) ); }545 547 | postfix_expression ARROW INTEGERconstant // CFA, tuple index 546 548 { $$ = new ExpressionNode( build_pfieldSel( $1, build_constantInteger( *$3 ) ) ); } 549 | postfix_expression ARROW '[' push field_list pop ']' // CFA, tuple field selector 550 { $$ = new ExpressionNode( build_pfieldSel( $1, build_tuple( $5 ) ) ); } 547 551 | postfix_expression ICR 548 552 { $$ = new ExpressionNode( build_unary_ptr( OperKinds::IncrPost, $1 ) ); } … … 597 601 598 602 field_name: 599 INTEGERconstant fraction_constants 603 INTEGERconstant fraction_constants_opt 600 604 { $$ = new ExpressionNode( build_field_name_fraction_constants( build_constantInteger( *$1 ), $2 ) ); } 601 | FLOATINGconstant fraction_constants 605 | FLOATINGconstant fraction_constants_opt 602 606 { $$ = new ExpressionNode( build_field_name_fraction_constants( build_field_name_FLOATINGconstant( *$1 ), $2 ) ); } 603 | no_attr_identifier fraction_constants 607 | no_attr_identifier fraction_constants_opt 604 608 { 605 609 $$ = new ExpressionNode( build_field_name_fraction_constants( build_varref( $1 ), $2 ) ); … … 607 611 ; 608 612 609 fraction_constants :613 fraction_constants_opt: 610 614 // empty 611 615 { $$ = nullptr; } 612 | fraction_constants FLOATING_FRACTIONconstant616 | fraction_constants_opt FLOATING_FRACTIONconstant 613 617 { 614 618 Expression * constant = build_field_name_FLOATING_FRACTIONconstant( *$2 ); … … 2390 2394 external_definition_list: 2391 2395 external_definition 2392 | external_definition_list { forall = xxx; } push external_definition 2396 | external_definition_list 2397 { forall = xxx; } 2398 push external_definition 2393 2399 { $$ = $1 ? $1->appendList( $4 ) : $4; } 2394 2400 ; … … 2430 2436 { 2431 2437 for ( DeclarationNode * iter = $5; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2432 iter->addQualifiers( $1->clone() ); 2438 if ( isMangled( iter->linkage ) ) { // ignore extern "C" 2439 iter->addQualifiers( $1->clone() ); 2440 } // if 2433 2441 } // for 2434 2442 xxx = false; … … 2443 2451 { 2444 2452 for ( DeclarationNode * iter = $5; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2445 iter->addQualifiers( $1->clone() ); 2453 if ( isMangled( iter->linkage ) ) { // ignore extern "C" 2454 iter->addQualifiers( $1->clone() ); 2455 } // if 2446 2456 } // for 2447 2457 xxx = false; … … 2457 2467 { 2458 2468 for ( DeclarationNode * iter = $6; iter != nullptr; iter = (DeclarationNode *)iter->get_next() ) { 2459 iter->addQualifiers( $1->clone() ); 2460 iter->addQualifiers( $2->clone() ); 2469 if ( isMangled( iter->linkage ) && isMangled( $2->linkage ) ) { // ignore extern "C" 2470 iter->addQualifiers( $1->clone() ); 2471 iter->addQualifiers( $2->clone() ); 2472 } // if 2461 2473 } // for 2462 2474 xxx = false; -
src/benchmark/Makefile.am
r7d0a3ba r358cba0 43 43 44 44 %.runquiet : 45 @+make $(basename $@) 45 @+make $(basename $@) CFLAGS="-w" 46 46 @taskset -c 1 ./a.out 47 47 @rm -f a.out -
src/benchmark/Makefile.in
r7d0a3ba r358cba0 459 459 460 460 %.runquiet : 461 @+make $(basename $@) 461 @+make $(basename $@) CFLAGS="-w" 462 462 @taskset -c 1 ./a.out 463 463 @rm -f a.out -
src/driver/cc1.cc
r7d0a3ba r358cba0 10 10 // Created On : Fri Aug 26 14:23:51 2005 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jan 18 08:14:21 201713 // Update Count : 8112 // Last Modified On : Sat May 12 16:11:53 2018 13 // Update Count : 94 14 14 // 15 15 … … 32 32 string compiler_name( CFA_BACKEND_CC ); // path/name of C compiler 33 33 34 string D__GCC_X__( "-D__GCC_X__=" ); 34 35 string D__GCC_BPREFIX__( "-D__GCC_BPREFIX__=" ); 35 36 string D__CFA_FLAGPREFIX__( "-D__CFA_FLAG__=" ); … … 162 163 cargs[ncargs] = ( *new string( string( argv[i + 1] ).substr( D__CFA_FLAGPREFIX__.size() - 2 ) ) ).c_str(); 163 164 ncargs += 1; 165 i += 1; // and the argument 166 } else if ( prefix( arg, D__GCC_X__ ) ) { 167 args[nargs] = "-x"; 168 nargs += 1; 169 args[nargs] = ( *new string( arg.substr( D__GCC_X__.size() ) ) ).c_str(); // pass the flag along 170 nargs += 1; 171 } else if ( arg == "-D" && prefix( argv[i + 1], D__GCC_X__.substr(2) ) ) { 172 args[nargs] = "-x"; 173 nargs += 1; 174 args[nargs] = ( *new string( string( argv[i + 1] ).substr( D__GCC_X__.size() - 2 ) ) ).c_str(); // pass the flag along 175 nargs += 1; 164 176 i += 1; // and the argument 165 177 } else if ( prefix( arg, D__GCC_BPREFIX__ ) ) { -
src/driver/cfa.cc
r7d0a3ba r358cba0 10 10 // Created On : Tue Aug 20 13:44:49 2002 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed May 2 17:57:43201813 // Update Count : 2 2412 // Last Modified On : Mon May 14 07:52:50 2018 13 // Update Count : 243 14 14 // 15 15 … … 158 158 args[nargs] = argv[i]; // pass the argument along 159 159 nargs += 1; 160 } else if ( prefix( arg, "-std=" ) ) {160 } else if ( prefix( arg, "-std=" ) || prefix( arg, "--std=" ) ) { 161 161 std_flag = true; // -std=XX provided 162 162 args[nargs] = argv[i]; // pass the argument along 163 nargs += 1; 164 } else if ( arg == "-x" ) { // lost so force along 165 args[nargs] = argv[i]; // pass the argument along 166 nargs += 1; 167 i += 1; // advance to argument 168 args[nargs] = argv[i]; // pass the argument along 169 nargs += 1; 170 args[nargs] = ( *new string( string("-D__GCC_X__=") + argv[i] ) ).c_str(); // add the argument for -x 171 nargs += 1; 172 } else if ( prefix( arg, "-x" ) ) { // lost so force along 173 args[nargs] = argv[i]; // pass the argument along 174 nargs += 1; 175 args[nargs] = ( *new string( string("-D__GCC_X__=") + arg.substr(2) ) ).c_str(); // add the argument for -x 163 176 nargs += 1; 164 177 } else if ( arg == "-w" ) { … … 240 253 } // for 241 254 255 #ifdef __x86_64__ 256 args[nargs] = "-mcx16"; // allow double-wide CAA 257 nargs += 1; 258 #endif // __x86_64__ 259 242 260 #ifdef __DEBUG_H__ 243 261 cerr << "args:"; … … 268 286 if ( link ) { 269 287 #if ! defined(HAVE_LIBCFA_RELEASE) 270 if ( !debug ) {288 if ( ! debug ) { 271 289 cerr << "error: Option -nodebug is unavailable, libcfa was not installed." << endl; 272 290 exit( EXIT_FAILURE ); 273 }291 } // if 274 292 #endif 275 293 #if ! defined(HAVE_LIBCFA_DEBUG) 276 if ( debug ) {294 if ( debug ) { 277 295 cerr << "error: Option -debug is unavailable, libcfa-d was not installed." << endl; 278 296 exit( EXIT_FAILURE ); 279 }297 } // if 280 298 #endif 281 299 … … 292 310 args[nargs] = "-L" CFA_LIBDIR; 293 311 nargs += 1; 294 if ( debug ) {312 if ( debug ) { 295 313 args[nargs] = "-lcfa-d"; 296 314 } else { 297 315 args[nargs] = "-lcfa"; 298 } 316 } // if 299 317 nargs += 1; 300 318 args[nargs] = "-lpthread"; -
src/libcfa/bits/containers.h
r7d0a3ba r358cba0 186 186 #endif 187 187 188 189 //----------------------------------------------------------------------------- 190 // Doubly Linked List 191 //----------------------------------------------------------------------------- 192 #ifdef __cforall 193 forall(dtype TYPE | sized(TYPE)) 194 #define T TYPE 195 #define __getter_t * [T * & next, T * & prev] ( T & ) 196 #else 197 typedef void (*__generit_c_getter_t)(); 198 #define T void 199 #define __getter_t __generit_c_getter_t 200 #endif 201 struct __dllist { 202 T * head; 203 __getter_t __get; 204 }; 205 #undef T 206 #undef __getter_t 207 208 #ifdef __cforall 209 #define __dllist_t(T) __dllist(T) 210 #else 211 #define __dllist_t(T) struct __dllist 212 #endif 213 214 #ifdef __cforall 215 216 forall(dtype T | sized(T)) 217 static inline [void] ?{}( __dllist(T) & this, * [T * & next, T * & prev] ( T & ) __get ) { 218 this.head{ NULL }; 219 this.__get = __get; 220 } 221 222 #define _next .0 223 #define _prev .1 224 forall(dtype T | sized(T)) 225 static inline void push_front( __dllist(T) & this, T & node ) with( this ) { 226 if ( head ) { 227 __get( node )_next = head; 228 __get( node )_prev = __get( *head )_prev; 229 // inserted node must be consistent before it is seen 230 // prevent code movement across barrier 231 asm( "" : : : "memory" ); 232 __get( *head )_prev = &node; 233 T & prev = *__get( node )_prev; 234 __get( prev )_next = &node; 235 } 236 else { 237 __get( node )_next = &node; 238 __get( node )_prev = &node; 239 } 240 241 // prevent code movement across barrier 242 asm( "" : : : "memory" ); 243 head = &node; 244 } 245 246 forall(dtype T | sized(T)) 247 static inline void remove( __dllist(T) & this, T & node ) with( this ) { 248 if ( &node == head ) { 249 if ( __get( *head )_next == head ) { 250 head = NULL; 251 } 252 else { 253 head = __get( *head )_next; 254 } 255 } 256 __get( *__get( node )_next )_prev = __get( node )_prev; 257 __get( *__get( node )_prev )_next = __get( node )_next; 258 __get( node )_next = NULL; 259 __get( node )_prev = NULL; 260 } 261 #undef _next 262 #undef _prev 263 #endif 264 188 265 //----------------------------------------------------------------------------- 189 266 // Tools -
src/libcfa/concurrency/coroutine
r7d0a3ba r358cba0 72 72 // Suspend implementation inlined for performance 73 73 static inline void suspend() { 74 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 74 // optimization : read TLS once and reuse it 75 // Safety note: this is preemption safe since if 76 // preemption occurs after this line, the pointer 77 // will also migrate which means this value will 78 // stay in syn with the TLS 79 coroutine_desc * src = TL_GET( this_coroutine ); 75 80 76 81 assertf( src->last != 0, … … 89 94 forall(dtype T | is_coroutine(T)) 90 95 static inline void resume(T & cor) { 91 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 96 // optimization : read TLS once and reuse it 97 // Safety note: this is preemption safe since if 98 // preemption occurs after this line, the pointer 99 // will also migrate which means this value will 100 // stay in syn with the TLS 101 coroutine_desc * src = TL_GET( this_coroutine ); 92 102 coroutine_desc * dst = get_coroutine(cor); 93 103 … … 107 117 dst->last = src; 108 118 dst->starter = dst->starter ? dst->starter : src; 109 } // if119 } 110 120 111 121 // always done for performance testing … … 114 124 115 125 static inline void resume(coroutine_desc * dst) { 116 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 126 // optimization : read TLS once and reuse it 127 // Safety note: this is preemption safe since if 128 // preemption occurs after this line, the pointer 129 // will also migrate which means this value will 130 // stay in syn with the TLS 131 coroutine_desc * src = TL_GET( this_coroutine ); 117 132 118 133 // not resuming self ? … … 125 140 // set last resumer 126 141 dst->last = src; 127 } // if142 } 128 143 129 144 // always done for performance testing -
src/libcfa/concurrency/coroutine.c
r7d0a3ba r358cba0 84 84 // Wrapper for co 85 85 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 86 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 86 // Safety note : This could cause some false positives due to preemption 87 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 87 88 disable_interrupts(); 88 89 … … 91 92 92 93 // set new coroutine that task is executing 93 TL_SET( this_coroutine, dst );94 kernelTLS.this_coroutine = dst; 94 95 95 96 // context switch to specified coroutine … … 102 103 103 104 enable_interrupts( __cfaabi_dbg_ctx ); 104 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 105 // Safety note : This could cause some false positives due to preemption 106 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 105 107 } //ctxSwitchDirect 106 108 -
src/libcfa/concurrency/invoke.c
r7d0a3ba r358cba0 69 69 // Fetch the thread handle from the user defined thread structure 70 70 struct thread_desc* thrd = get_thread( this ); 71 thrd->self_cor.last = NULL; 71 72 72 73 // Officially start the thread by enabling preemption -
src/libcfa/concurrency/invoke.h
r7d0a3ba r358cba0 18 18 #include "bits/locks.h" 19 19 20 #define TL_GET( member ) kernelT hreadData.member21 #define TL_SET( member, value ) kernelT hreadData.member = value;20 #define TL_GET( member ) kernelTLS.member 21 #define TL_SET( member, value ) kernelTLS.member = value; 22 22 23 23 #ifdef __cforall … … 44 44 volatile bool in_progress; 45 45 } preemption_state; 46 } kernelT hreadData;46 } kernelTLS; 47 47 } 48 48 49 49 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); } 50 static inline struct thread_desc * volatile active_thread() { return TL_GET( this_thread); }51 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); }50 static inline struct thread_desc * volatile active_thread () { return TL_GET( this_thread ); } 51 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE 52 52 #endif 53 53 … … 136 136 struct thread_desc * next; 137 137 138 __cfaabi_dbg_debug_do( 139 // instrusive link field for debugging 140 struct thread_desc * dbg_next; 141 struct thread_desc * dbg_prev; 142 ) 138 struct { 139 struct thread_desc * next; 140 struct thread_desc * prev; 141 } node; 143 142 }; 144 143 … … 147 146 static inline thread_desc * & get_next( thread_desc & this ) { 148 147 return this.next; 148 } 149 150 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) { 151 return this.node.[next, prev]; 149 152 } 150 153 -
src/libcfa/concurrency/kernel
r7d0a3ba r358cba0 40 40 41 41 //----------------------------------------------------------------------------- 42 // Cluster 43 struct cluster { 44 // Ready queue locks 45 __spinlock_t ready_queue_lock; 42 // Processor 43 extern struct cluster * mainCluster; 46 44 47 // Ready queue for threads48 __queue_t(thread_desc) ready_queue;49 50 // Name of the cluster51 const char * name;52 53 // Preemption rate on this cluster54 Duration preemption_rate;55 };56 57 extern struct cluster * mainCluster;58 extern Duration default_preemption();59 60 void ?{} (cluster & this, const char * name, Duration preemption_rate);61 void ^?{}(cluster & this);62 63 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; }64 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; }65 static inline void ?{} (cluster & this, const char * name) { this{name, default_preemption()}; }66 67 //-----------------------------------------------------------------------------68 // Processor69 45 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule }; 70 46 … … 98 74 99 75 // Cluster from which to get threads 100 cluster * cltr;76 struct cluster * cltr; 101 77 102 78 // Name of the processor … … 124 100 bool pending_preemption; 125 101 102 // Idle lock 103 104 // Link lists fields 105 struct { 106 struct processor * next; 107 struct processor * prev; 108 } node; 109 126 110 #ifdef __CFA_DEBUG__ 127 111 // Last function to enable preemption on this processor … … 130 114 }; 131 115 132 void ?{}(processor & this, const char * name, cluster & cltr);116 void ?{}(processor & this, const char * name, struct cluster & cltr); 133 117 void ^?{}(processor & this); 134 118 135 119 static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } 136 static inline void ?{}(processor & this, cluster & cltr) { this{ "Anonymous Processor", cltr}; }120 static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } 137 121 static inline void ?{}(processor & this, const char * name) { this{name, *mainCluster }; } 122 123 static inline [processor *&, processor *& ] __get( processor & this ) { 124 return this.node.[next, prev]; 125 } 126 127 //----------------------------------------------------------------------------- 128 // Cluster 129 struct cluster { 130 // Ready queue locks 131 __spinlock_t ready_queue_lock; 132 133 // Ready queue for threads 134 __queue_t(thread_desc) ready_queue; 135 136 // Name of the cluster 137 const char * name; 138 139 // Preemption rate on this cluster 140 Duration preemption_rate; 141 142 // List of processors 143 __spinlock_t proc_list_lock; 144 __dllist_t(struct processor) procs; 145 __dllist_t(struct processor) idles; 146 147 // Link lists fields 148 struct { 149 cluster * next; 150 cluster * prev; 151 } node; 152 }; 153 extern Duration default_preemption(); 154 155 void ?{} (cluster & this, const char * name, Duration preemption_rate); 156 void ^?{}(cluster & this); 157 158 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; } 159 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; } 160 static inline void ?{} (cluster & this, const char * name) { this{name, default_preemption()}; } 161 162 static inline [cluster *&, cluster *& ] __get( cluster & this ) { 163 return this.node.[next, prev]; 164 } 138 165 139 166 // Local Variables: // -
src/libcfa/concurrency/kernel.c
r7d0a3ba r358cba0 49 49 thread_desc * mainThread; 50 50 51 struct { __dllist_t(thread_desc) list; __spinlock_t lock; } global_threads ; 52 struct { __dllist_t(cluster ) list; __spinlock_t lock; } global_clusters; 53 51 54 //----------------------------------------------------------------------------- 52 55 // Global state … … 56 59 // volatile thread_local unsigned short disable_preempt_count = 1; 57 60 58 thread_local struct KernelThreadData kernelT hreadData= {61 thread_local struct KernelThreadData kernelTLS = { 59 62 NULL, 60 63 NULL, … … 117 120 self_mon_p = &self_mon; 118 121 next = NULL; 119 __cfaabi_dbg_debug_do( 120 dbg_next = NULL; 121 dbg_prev = NULL; 122 __cfaabi_dbg_thread_register(&this); 123 ) 122 123 node.next = NULL; 124 node.prev = NULL; 125 doregister(this); 124 126 125 127 monitors{ &self_mon_p, 1, (fptr_t)0 }; … … 155 157 terminate(&this); 156 158 verify(this.do_terminate); 157 verify( TL_GET( this_processor )!= &this);159 verify( kernelTLS.this_processor != &this); 158 160 P( terminated ); 159 verify( TL_GET( this_processor )!= &this);161 verify( kernelTLS.this_processor != &this); 160 162 pthread_join( kernel_thread, NULL ); 161 163 } … … 167 169 ready_queue{}; 168 170 ready_queue_lock{}; 171 172 procs{ __get }; 173 idles{ __get }; 174 175 doregister(this); 169 176 } 170 177 171 178 void ^?{}(cluster & this) { 172 179 unregister(this); 173 180 } 174 181 … … 183 190 __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this); 184 191 192 doregister(this->cltr, this); 193 185 194 { 186 195 // Setup preemption data … … 196 205 if(readyThread) 197 206 { 198 verify( ! TL_GET( preemption_state ).enabled );207 verify( ! kernelTLS.preemption_state.enabled ); 199 208 200 209 runThread(this, readyThread); 201 210 202 verify( ! TL_GET( preemption_state ).enabled );211 verify( ! kernelTLS.preemption_state.enabled ); 203 212 204 213 //Some actions need to be taken from the kernel … … 216 225 } 217 226 227 unregister(this->cltr, this); 228 218 229 V( this->terminated ); 219 230 … … 221 232 } 222 233 234 // KERNEL ONLY 223 235 // runThread runs a thread by context switching 224 236 // from the processor coroutine to the target thread … … 228 240 coroutine_desc * thrd_cor = dst->curr_cor; 229 241 230 // Reset the terminating actions here242 // Reset the terminating actions here 231 243 this->finish.action_code = No_Action; 232 244 233 // Update global state234 TL_SET( this_thread, dst );245 // Update global state 246 kernelTLS.this_thread = dst; 235 247 236 248 // Context Switch to the thread … … 239 251 } 240 252 253 // KERNEL_ONLY 241 254 void returnToKernel() { 242 coroutine_desc * proc_cor = get_coroutine( TL_GET( this_processor )->runner);243 coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine );255 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 256 coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine; 244 257 ThreadCtxSwitch(thrd_cor, proc_cor); 245 258 } 246 259 260 // KERNEL_ONLY 247 261 // Once a thread has finished running, some of 248 262 // its final actions must be executed from the kernel 249 263 void finishRunning(processor * this) with( this->finish ) { 250 264 if( action_code == Release ) { 251 verify( ! TL_GET( preemption_state ).enabled );265 verify( ! kernelTLS.preemption_state.enabled ); 252 266 unlock( *lock ); 253 267 } … … 256 270 } 257 271 else if( action_code == Release_Schedule ) { 258 verify( ! TL_GET( preemption_state ).enabled );272 verify( ! kernelTLS.preemption_state.enabled ); 259 273 unlock( *lock ); 260 274 ScheduleThread( thrd ); 261 275 } 262 276 else if( action_code == Release_Multi ) { 263 verify( ! TL_GET( preemption_state ).enabled );277 verify( ! kernelTLS.preemption_state.enabled ); 264 278 for(int i = 0; i < lock_count; i++) { 265 279 unlock( *locks[i] ); … … 285 299 } 286 300 301 // KERNEL_ONLY 287 302 // Context invoker for processors 288 303 // This is the entry point for processors (kernel threads) … … 290 305 void * CtxInvokeProcessor(void * arg) { 291 306 processor * proc = (processor *) arg; 292 TL_SET( this_processor, proc );293 TL_SET( this_coroutine, NULL );294 TL_SET( this_thread, NULL );295 TL_GET( preemption_state ).[enabled, disable_count] = [false, 1];307 kernelTLS.this_processor = proc; 308 kernelTLS.this_coroutine = NULL; 309 kernelTLS.this_thread = NULL; 310 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 296 311 // SKULLDUGGERY: We want to create a context for the processor coroutine 297 312 // which is needed for the 2-step context switch. However, there is no reason … … 305 320 306 321 //Set global state 307 TL_SET( this_coroutine, get_coroutine(proc->runner));308 TL_SET( this_thread, NULL );322 kernelTLS.this_coroutine = get_coroutine(proc->runner); 323 kernelTLS.this_thread = NULL; 309 324 310 325 //We now have a proper context from which to schedule threads … … 333 348 } 334 349 350 // KERNEL_ONLY 335 351 void kernel_first_resume(processor * this) { 336 coroutine_desc * src = TL_GET( this_coroutine );352 coroutine_desc * src = kernelTLS.this_coroutine; 337 353 coroutine_desc * dst = get_coroutine(this->runner); 338 354 339 verify( ! TL_GET( preemption_state ).enabled );355 verify( ! kernelTLS.preemption_state.enabled ); 340 356 341 357 create_stack(&dst->stack, dst->stack.size); 342 358 CtxStart(&this->runner, CtxInvokeCoroutine); 343 359 344 verify( ! TL_GET( preemption_state ).enabled );360 verify( ! kernelTLS.preemption_state.enabled ); 345 361 346 362 dst->last = src; … … 351 367 352 368 // set new coroutine that task is executing 353 TL_SET( this_coroutine, dst );369 kernelTLS.this_coroutine = dst; 354 370 355 371 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. … … 368 384 src->state = Active; 369 385 370 verify( ! TL_GET( preemption_state ).enabled );386 verify( ! kernelTLS.preemption_state.enabled ); 371 387 } 372 388 373 389 //----------------------------------------------------------------------------- 374 390 // Scheduler routines 391 392 // KERNEL ONLY 375 393 void ScheduleThread( thread_desc * thrd ) { 376 // if( ! thrd ) return;377 394 verify( thrd ); 378 395 verify( thrd->self_cor.state != Halted ); 379 396 380 verify( ! TL_GET( preemption_state ).enabled );397 verify( ! kernelTLS.preemption_state.enabled ); 381 398 382 399 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); … … 388 405 } 389 406 390 verify( ! TL_GET( preemption_state ).enabled ); 391 } 392 407 verify( ! kernelTLS.preemption_state.enabled ); 408 } 409 410 // KERNEL ONLY 393 411 thread_desc * nextThread(cluster * this) with( *this ) { 394 verify( ! TL_GET( preemption_state ).enabled );412 verify( ! kernelTLS.preemption_state.enabled ); 395 413 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 396 414 thread_desc * head = pop_head( ready_queue ); 397 415 unlock( ready_queue_lock ); 398 verify( ! TL_GET( preemption_state ).enabled );416 verify( ! kernelTLS.preemption_state.enabled ); 399 417 return head; 400 418 } … … 402 420 void BlockInternal() { 403 421 disable_interrupts(); 404 verify( ! TL_GET( preemption_state ).enabled );422 verify( ! kernelTLS.preemption_state.enabled ); 405 423 returnToKernel(); 406 verify( ! TL_GET( preemption_state ).enabled );424 verify( ! kernelTLS.preemption_state.enabled ); 407 425 enable_interrupts( __cfaabi_dbg_ctx ); 408 426 } … … 410 428 void BlockInternal( __spinlock_t * lock ) { 411 429 disable_interrupts(); 412 with( * TL_GET( this_processor )) {430 with( *kernelTLS.this_processor ) { 413 431 finish.action_code = Release; 414 432 finish.lock = lock; 415 433 } 416 434 417 verify( ! TL_GET( preemption_state ).enabled );435 verify( ! kernelTLS.preemption_state.enabled ); 418 436 returnToKernel(); 419 verify( ! TL_GET( preemption_state ).enabled );437 verify( ! kernelTLS.preemption_state.enabled ); 420 438 421 439 enable_interrupts( __cfaabi_dbg_ctx ); … … 424 442 void BlockInternal( thread_desc * thrd ) { 425 443 disable_interrupts(); 426 with( * TL_GET( this_processor )) {444 with( * kernelTLS.this_processor ) { 427 445 finish.action_code = Schedule; 428 446 finish.thrd = thrd; 429 447 } 430 448 431 verify( ! TL_GET( preemption_state ).enabled );449 verify( ! kernelTLS.preemption_state.enabled ); 432 450 returnToKernel(); 433 verify( ! TL_GET( preemption_state ).enabled );451 verify( ! kernelTLS.preemption_state.enabled ); 434 452 435 453 enable_interrupts( __cfaabi_dbg_ctx ); … … 439 457 assert(thrd); 440 458 disable_interrupts(); 441 with( * TL_GET( this_processor )) {459 with( * kernelTLS.this_processor ) { 442 460 finish.action_code = Release_Schedule; 443 461 finish.lock = lock; … … 445 463 } 446 464 447 verify( ! TL_GET( preemption_state ).enabled );465 verify( ! kernelTLS.preemption_state.enabled ); 448 466 returnToKernel(); 449 verify( ! TL_GET( preemption_state ).enabled );467 verify( ! kernelTLS.preemption_state.enabled ); 450 468 451 469 enable_interrupts( __cfaabi_dbg_ctx ); … … 454 472 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 455 473 disable_interrupts(); 456 with( * TL_GET( this_processor )) {474 with( * kernelTLS.this_processor ) { 457 475 finish.action_code = Release_Multi; 458 476 finish.locks = locks; … … 460 478 } 461 479 462 verify( ! TL_GET( preemption_state ).enabled );480 verify( ! kernelTLS.preemption_state.enabled ); 463 481 returnToKernel(); 464 verify( ! TL_GET( preemption_state ).enabled );482 verify( ! kernelTLS.preemption_state.enabled ); 465 483 466 484 enable_interrupts( __cfaabi_dbg_ctx ); … … 469 487 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 470 488 disable_interrupts(); 471 with( * TL_GET( this_processor )) {489 with( *kernelTLS.this_processor ) { 472 490 finish.action_code = Release_Multi_Schedule; 473 491 finish.locks = locks; … … 477 495 } 478 496 479 verify( ! TL_GET( preemption_state ).enabled );497 verify( ! kernelTLS.preemption_state.enabled ); 480 498 returnToKernel(); 481 verify( ! TL_GET( preemption_state ).enabled );499 verify( ! kernelTLS.preemption_state.enabled ); 482 500 483 501 enable_interrupts( __cfaabi_dbg_ctx ); 484 502 } 485 503 504 // KERNEL ONLY 486 505 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 487 verify( ! TL_GET( preemption_state ).enabled );488 with( * TL_GET( this_processor )) {506 verify( ! kernelTLS.preemption_state.enabled ); 507 with( * kernelTLS.this_processor ) { 489 508 finish.action_code = thrd ? Release_Schedule : Release; 490 509 finish.lock = lock; … … 501 520 // Kernel boot procedures 502 521 void kernel_startup(void) { 503 verify( ! TL_GET( preemption_state ).enabled );522 verify( ! kernelTLS.preemption_state.enabled ); 504 523 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 524 525 global_threads. list{ __get }; 526 global_threads. lock{}; 527 global_clusters.list{ __get }; 528 global_clusters.lock{}; 505 529 506 530 // Initialize the main cluster … … 547 571 548 572 //initialize the global state variables 549 TL_SET( this_processor, mainProcessor );550 TL_SET( this_thread, mainThread );551 TL_SET( this_coroutine, &mainThread->self_cor );573 kernelTLS.this_processor = mainProcessor; 574 kernelTLS.this_thread = mainThread; 575 kernelTLS.this_coroutine = &mainThread->self_cor; 552 576 553 577 // Enable preemption … … 561 585 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 562 586 // mainThread is on the ready queue when this call is made. 563 kernel_first_resume( TL_GET( this_processor ));587 kernel_first_resume( kernelTLS.this_processor ); 564 588 565 589 … … 568 592 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 569 593 570 verify( ! TL_GET( preemption_state ).enabled );594 verify( ! kernelTLS.preemption_state.enabled ); 571 595 enable_interrupts( __cfaabi_dbg_ctx ); 572 verify( TL_GET( preemption_state ).enabled);596 verify( TL_GET( preemption_state.enabled ) ); 573 597 } 574 598 … … 576 600 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 577 601 578 verify( TL_GET( preemption_state ).enabled);602 verify( TL_GET( preemption_state.enabled ) ); 579 603 disable_interrupts(); 580 verify( ! TL_GET( preemption_state ).enabled );604 verify( ! kernelTLS.preemption_state.enabled ); 581 605 582 606 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. … … 604 628 605 629 //============================================================================================= 630 // Kernel Quiescing 631 //============================================================================================= 632 633 // void halt(processor * this) with( this ) { 634 // pthread_mutex_lock( &idle.lock ); 635 636 637 638 // // SKULLDUGGERY: Even if spurious wake-up is a thing 639 // // spuriously waking up a kernel thread is not a big deal 640 // // if it is very rare. 641 // pthread_cond_wait( &idle.cond, &idle.lock); 642 // pthread_mutex_unlock( &idle.lock ); 643 // } 644 645 // void wake(processor * this) with( this ) { 646 // pthread_mutex_lock (&idle.lock); 647 // pthread_cond_signal (&idle.cond); 648 // pthread_mutex_unlock(&idle.lock); 649 // } 650 651 //============================================================================================= 606 652 // Unexpected Terminating logic 607 653 //============================================================================================= … … 609 655 610 656 static __spinlock_t kernel_abort_lock; 611 static __spinlock_t kernel_debug_lock;612 657 static bool kernel_abort_called = false; 613 658 614 void * kernel_abort 659 void * kernel_abort(void) __attribute__ ((__nothrow__)) { 615 660 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 616 661 // the globalAbort flag is true. … … 618 663 619 664 // first task to abort ? 620 if ( ! kernel_abort_called ) { // not first task to abort ? 665 if ( kernel_abort_called ) { // not first task to abort ? 666 unlock( kernel_abort_lock ); 667 668 sigset_t mask; 669 sigemptyset( &mask ); 670 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 671 sigsuspend( &mask ); // block the processor to prevent further damage during abort 672 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 673 } 674 else { 621 675 kernel_abort_called = true; 622 676 unlock( kernel_abort_lock ); 623 677 } 624 else { 625 unlock( kernel_abort_lock ); 626 627 sigset_t mask; 628 sigemptyset( &mask ); 629 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 630 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals 631 sigsuspend( &mask ); // block the processor to prevent further damage during abort 632 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 633 } 634 635 return TL_GET( this_thread ); 678 679 return kernelTLS.this_thread; 636 680 } 637 681 … … 639 683 thread_desc * thrd = kernel_data; 640 684 641 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->self_cor.name, thrd ); 642 __cfaabi_dbg_bits_write( abort_text, len ); 643 644 if ( get_coroutine(thrd) != TL_GET( this_coroutine ) ) { 645 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) ); 685 if(thrd) { 686 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 646 687 __cfaabi_dbg_bits_write( abort_text, len ); 688 689 if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) { 690 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine ); 691 __cfaabi_dbg_bits_write( abort_text, len ); 692 } 693 else { 694 __cfaabi_dbg_bits_write( ".\n", 2 ); 695 } 647 696 } 648 697 else { 649 __cfaabi_dbg_bits_write( ".\n", 2);698 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); 650 699 } 651 700 } 652 701 653 702 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 654 return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2; 655 } 703 return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2; 704 } 705 706 static __spinlock_t kernel_debug_lock; 656 707 657 708 extern "C" { … … 682 733 if ( count < 0 ) { 683 734 // queue current task 684 append( waiting, (thread_desc *)TL_GET( this_thread ));735 append( waiting, kernelTLS.this_thread ); 685 736 686 737 // atomically release spin lock and block … … 708 759 709 760 //----------------------------------------------------------------------------- 761 // Global Queues 762 void doregister( thread_desc & thrd ) { 763 // lock ( global_thread.lock ); 764 // push_front( global_thread.list, thrd ); 765 // unlock ( global_thread.lock ); 766 } 767 768 void unregister( thread_desc & thrd ) { 769 // lock ( global_thread.lock ); 770 // remove( global_thread.list, thrd ); 771 // unlock( global_thread.lock ); 772 } 773 774 void doregister( cluster & cltr ) { 775 // lock ( global_cluster.lock ); 776 // push_front( global_cluster.list, cltr ); 777 // unlock ( global_cluster.lock ); 778 } 779 780 void unregister( cluster & cltr ) { 781 // lock ( global_cluster.lock ); 782 // remove( global_cluster.list, cltr ); 783 // unlock( global_cluster.lock ); 784 } 785 786 787 void doregister( cluster * cltr, processor * proc ) { 788 // lock (cltr->proc_list_lock __cfaabi_dbg_ctx2); 789 // push_front(cltr->procs, *proc); 790 // unlock (cltr->proc_list_lock); 791 } 792 793 void unregister( cluster * cltr, processor * proc ) { 794 // lock (cltr->proc_list_lock __cfaabi_dbg_ctx2); 795 // remove(cltr->procs, *proc ); 796 // unlock(cltr->proc_list_lock); 797 } 798 799 //----------------------------------------------------------------------------- 710 800 // Debug 711 801 __cfaabi_dbg_debug_do( 712 struct {713 thread_desc * tail;714 } __cfaabi_dbg_thread_list = { NULL };715 716 void __cfaabi_dbg_thread_register( thread_desc * thrd ) {717 if( !__cfaabi_dbg_thread_list.tail ) {718 __cfaabi_dbg_thread_list.tail = thrd;719 return;720 }721 __cfaabi_dbg_thread_list.tail->dbg_next = thrd;722 thrd->dbg_prev = __cfaabi_dbg_thread_list.tail;723 __cfaabi_dbg_thread_list.tail = thrd;724 }725 726 void __cfaabi_dbg_thread_unregister( thread_desc * thrd ) {727 thread_desc * prev = thrd->dbg_prev;728 thread_desc * next = thrd->dbg_next;729 730 if( next ) { next->dbg_prev = prev; }731 else {732 assert( __cfaabi_dbg_thread_list.tail == thrd );733 __cfaabi_dbg_thread_list.tail = prev;734 }735 736 if( prev ) { prev->dbg_next = next; }737 738 thrd->dbg_prev = NULL;739 thrd->dbg_next = NULL;740 }741 742 802 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) { 743 803 this.prev_name = prev_name; 744 this.prev_thrd = TL_GET( this_thread );804 this.prev_thrd = kernelTLS.this_thread; 745 805 } 746 806 ) -
src/libcfa/concurrency/kernel_private.h
r7d0a3ba r358cba0 100 100 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 101 101 102 103 void doregister( struct thread_desc & thrd ); 104 void unregister( struct thread_desc & thrd ); 105 106 void doregister( struct cluster & cltr ); 107 void unregister( struct cluster & cltr ); 108 109 void doregister( struct cluster * cltr, struct processor * proc ); 110 void unregister( struct cluster * cltr, struct processor * proc ); 111 102 112 // Local Variables: // 103 113 // mode: c // -
src/libcfa/concurrency/monitor.c
r7d0a3ba r358cba0 85 85 // Lock the monitor spinlock 86 86 lock( this->lock __cfaabi_dbg_ctx2 ); 87 thread_desc * thrd = TL_GET( this_thread ); 87 // Interrupts disable inside critical section 88 thread_desc * thrd = kernelTLS.this_thread; 88 89 89 90 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); … … 134 135 // Lock the monitor spinlock 135 136 lock( this->lock __cfaabi_dbg_ctx2 ); 136 thread_desc * thrd = TL_GET( this_thread ); 137 // Interrupts disable inside critical section 138 thread_desc * thrd = kernelTLS.this_thread; 137 139 138 140 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); … … 168 170 169 171 // Create the node specific to this wait operation 170 wait_ctx_primed( TL_GET( this_thread ), 0 )172 wait_ctx_primed( thrd, 0 ) 171 173 172 174 // Some one else has the monitor, wait for him to finish and then run … … 179 181 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 180 182 181 wait_ctx( TL_GET( this_thread ), 0 )183 wait_ctx( thrd, 0 ) 182 184 this->dtor_node = &waiter; 183 185 … … 199 201 lock( this->lock __cfaabi_dbg_ctx2 ); 200 202 201 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", TL_GET( this_thread ), this, this->owner);202 203 verifyf( TL_GET( this_thread ) == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", TL_GET( this_thread ), this->owner, this->recursion, this );203 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 204 205 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 204 206 205 207 // Leaving a recursion level, decrement the counter … … 289 291 // Sorts monitors before entering 290 292 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) { 293 thread_desc * thrd = TL_GET( this_thread ); 294 291 295 // Store current array 292 296 this.m = m; … … 297 301 298 302 // Save previous thread context 299 this.prev = TL_GET( this_thread )->monitors;303 this.prev = thrd->monitors; 300 304 301 305 // Update thread context (needed for conditions) 302 ( TL_GET( this_thread )->monitors){m, count, func};306 (thrd->monitors){m, count, func}; 303 307 304 308 // __cfaabi_dbg_print_safe( "MGUARD : enter %d\n", count); … … 328 332 // Sorts monitors before entering 329 333 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) { 334 // optimization 335 thread_desc * thrd = TL_GET( this_thread ); 336 330 337 // Store current array 331 338 this.m = *m; 332 339 333 340 // Save previous thread context 334 this.prev = TL_GET( this_thread )->monitors;341 this.prev = thrd->monitors; 335 342 336 343 // Update thread context (needed for conditions) 337 ( TL_GET( this_thread )->monitors){m, 1, func};344 (thrd->monitors){m, 1, func}; 338 345 339 346 __enter_monitor_dtor( this.m, func ); … … 473 480 474 481 // Create the node specific to this wait operation 475 wait_ctx_primed( TL_GET( this_thread ), 0 )482 wait_ctx_primed( kernelTLS.this_thread, 0 ) 476 483 477 484 //save contexts … … 566 573 567 574 // Create the node specific to this wait operation 568 wait_ctx_primed( TL_GET( this_thread ), 0 );575 wait_ctx_primed( kernelTLS.this_thread, 0 ); 569 576 570 577 // Save monitor states … … 612 619 613 620 // Create the node specific to this wait operation 614 wait_ctx_primed( TL_GET( this_thread ), 0 );621 wait_ctx_primed( kernelTLS.this_thread, 0 ); 615 622 616 623 monitor_save; … … 618 625 619 626 for( __lock_size_t i = 0; i < count; i++) { 620 verify( monitors[i]->owner == TL_GET( this_thread ));627 verify( monitors[i]->owner == kernelTLS.this_thread ); 621 628 } 622 629 -
src/libcfa/concurrency/preemption.c
r7d0a3ba r358cba0 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 with( TL_GET( preemption_state )) {151 with( kernelTLS.preemption_state ) { 152 152 enabled = false; 153 153 __attribute__((unused)) unsigned short new_val = disable_count + 1; … … 160 160 // If counter reaches 0, execute any pending CtxSwitch 161 161 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 162 processor * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add163 thread_desc * thrd = TL_GET( this_thread ); // Cache the thread now since interrupts can start happening after the atomic add164 165 with( TL_GET( preemption_state )){162 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add 163 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add 164 165 with( kernelTLS.preemption_state ){ 166 166 unsigned short prev = disable_count; 167 167 disable_count -= 1; … … 185 185 // Don't execute any pending CtxSwitch even if counter reaches 0 186 186 void enable_interrupts_noPoll() { 187 unsigned short prev = TL_GET( preemption_state ).disable_count;188 TL_GET( preemption_state ).disable_count -= 1;187 unsigned short prev = kernelTLS.preemption_state.disable_count; 188 kernelTLS.preemption_state.disable_count -= 1; 189 189 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 190 190 if( prev == 1 ) { 191 TL_GET( preemption_state ).enabled = true;191 kernelTLS.preemption_state.enabled = true; 192 192 } 193 193 } … … 234 234 } 235 235 236 236 // KERNEL ONLY 237 237 // Check if a CtxSwitch signal handler shoud defer 238 238 // If true : preemption is safe 239 239 // If false : preemption is unsafe and marked as pending 240 240 static inline bool preemption_ready() { 241 bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe 242 TL_GET( this_processor )->pending_preemption = !ready; // Adjust the pending flag accordingly 241 // Check if preemption is safe 242 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress; 243 244 // Adjust the pending flag accordingly 245 kernelTLS.this_processor->pending_preemption = !ready; 243 246 return ready; 244 247 } … … 254 257 255 258 // Start with preemption disabled until ready 256 TL_GET( preemption_state ).enabled = false;257 TL_GET( preemption_state ).disable_count = 1;259 kernelTLS.preemption_state.enabled = false; 260 kernelTLS.preemption_state.disable_count = 1; 258 261 259 262 // Initialize the event kernel … … 320 323 // before the kernel thread has even started running. When that happens an iterrupt 321 324 // we a null 'this_processor' will be caught, just ignore it. 322 if(! TL_GET( this_processor )) return;325 if(! kernelTLS.this_processor ) return; 323 326 324 327 choose(sfp->si_value.sival_int) { 325 328 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 326 case PREEMPT_TERMINATE: verify( TL_GET( this_processor )->do_terminate);329 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate); 327 330 default: 328 331 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 332 335 if( !preemption_ready() ) { return; } 333 336 334 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", TL_GET( this_processor ), TL_GET( this_thread ) ); 335 336 TL_GET( preemption_state ).in_progress = true; // Sync flag : prevent recursive calls to the signal handler 337 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 338 TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag 337 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread ); 338 339 // Sync flag : prevent recursive calls to the signal handler 340 kernelTLS.preemption_state.in_progress = true; 341 342 // We are about to CtxSwitch out of the signal handler, let other handlers in 343 signal_unblock( SIGUSR1 ); 344 345 // TODO: this should go in finish action 346 // Clear the in progress flag 347 kernelTLS.preemption_state.in_progress = false; 339 348 340 349 // Preemption can occur here 341 350 342 BlockInternal( (thread_desc*)TL_GET( this_thread )); // Do the actual CtxSwitch351 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch 343 352 } 344 353 … … 348 357 // Block sigalrms to control when they arrive 349 358 sigset_t mask; 359 sigfillset(&mask); 360 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 361 abort( "internal error, pthread_sigmask" ); 362 } 363 350 364 sigemptyset( &mask ); 351 365 sigaddset( &mask, SIGALRM ); 352 353 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {354 abort( "internal error, pthread_sigmask" );355 }356 366 357 367 // Main loop … … 409 419 410 420 void __cfaabi_check_preemption() { 411 bool ready = TL_GET( preemption_state ).enabled;421 bool ready = kernelTLS.preemption_state.enabled; 412 422 if(!ready) { abort("Preemption should be ready"); } 413 423 -
src/libcfa/concurrency/thread.c
r7d0a3ba r358cba0 39 39 curr_cluster = &cl; 40 40 next = NULL; 41 __cfaabi_dbg_debug_do( 42 dbg_next = NULL; 43 dbg_prev = NULL; 44 __cfaabi_dbg_thread_register(&this); 45 ) 41 42 node.next = NULL; 43 node.prev = NULL; 44 doregister(this); 46 45 47 46 monitors{ &self_mon_p, 1, (fptr_t)0 }; … … 49 48 50 49 void ^?{}(thread_desc& this) with( this ) { 50 unregister(this); 51 51 ^self_cor{}; 52 52 } … … 81 81 disable_interrupts(); 82 82 create_stack(&thrd_c->stack, thrd_c->stack.size); 83 TL_SET( this_coroutine, thrd_c );83 kernelTLS.this_coroutine = thrd_c; 84 84 CtxStart(&this, CtxInvokeThread); 85 85 assert( thrd_c->last->stack.context ); … … 91 91 92 92 extern "C" { 93 // KERNEL ONLY 93 94 void __finish_creation(void) { 94 coroutine_desc* thrd_c = TL_GET( this_coroutine );95 coroutine_desc* thrd_c = kernelTLS.this_coroutine; 95 96 ThreadCtxSwitch( thrd_c, thrd_c->last ); 96 97 } … … 98 99 99 100 void yield( void ) { 100 verify( TL_GET( preemption_state ).enabled ); 101 // Safety note : This could cause some false positives due to preemption 102 verify( TL_GET( preemption_state.enabled ) ); 101 103 BlockInternal( TL_GET( this_thread ) ); 102 verify( TL_GET( preemption_state ).enabled ); 104 // Safety note : This could cause some false positives due to preemption 105 verify( TL_GET( preemption_state.enabled ) ); 103 106 } 104 107 … … 109 112 } 110 113 114 // KERNEL ONLY 111 115 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 112 116 // set state of current coroutine to inactive … … 116 120 // set new coroutine that the processor is executing 117 121 // and context switch to it 118 TL_SET( this_coroutine, dst );122 kernelTLS.this_coroutine = dst; 119 123 assert( src->stack.context ); 120 124 CtxSwitch( src->stack.context, dst->stack.context ); 121 TL_SET( this_coroutine, src );125 kernelTLS.this_coroutine = src; 122 126 123 127 // set state of new coroutine to active -
src/libcfa/interpose.c
r7d0a3ba r358cba0 10 10 // Created On : Wed Mar 29 16:10:31 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue May 1 15:05:35 2018 13 // Update Count : 83 14 // 15 16 #include <stdarg.h> 17 #include <stddef.h> 18 19 extern "C" { 20 #include <stdio.h> 21 #include <string.h> 22 #include <dlfcn.h> 23 #include <unistd.h> 12 // Last Modified On : Sat May 5 11:37:35 2018 13 // Update Count : 111 14 // 15 16 #include <stdarg.h> // va_start, va_end 17 #include <string.h> // strlen 18 #include <unistd.h> // _exit, getpid 24 19 #define __USE_GNU 25 20 #include <signal.h> 26 21 #undef __USE_GNU 27 #include <execinfo.h> 22 extern "C" { 23 #include <dlfcn.h> // dlopen, dlsym 24 #include <execinfo.h> // backtrace, messages 28 25 } 29 26 30 27 #include "bits/debug.h" 31 28 #include "bits/defs.h" 32 #include "bits/signal.h" 33 #include "startup.h" 29 #include "bits/signal.h" // sigHandler_? 30 #include "startup.h" // STARTUP_PRIORITY_CORE 34 31 35 32 //============================================================================================= … … 37 34 //============================================================================================= 38 35 39 typedef void (* generic_fptr_t)(void);40 generic_fptr_t interpose_symbol( const char * symbol, const char *version ) {36 typedef void (* generic_fptr_t)(void); 37 generic_fptr_t interpose_symbol( const char * symbol, const char * version ) { 41 38 const char * error; 42 39 … … 55 52 } // if 56 53 57 union { generic_fptr_t fptr; void * ptr; } originalFunc;54 union { generic_fptr_t fptr; void * ptr; } originalFunc; 58 55 59 56 #if defined( _GNU_SOURCE ) … … 73 70 } 74 71 75 forall(dtype T) 76 static inline void ptr_from_symbol( T** symbol_ptr, const char * symbol_name, const char * version) { 77 union { 78 generic_fptr_t gp; 79 T* tp; 80 } u; 81 82 u.gp = interpose_symbol( symbol_name, version ); 83 84 *symbol_ptr = u.tp; 85 } 86 87 #define INTERPOSE_LIBC( x, ver ) ptr_from_symbol( (void**)&__cabi_libc.x, #x, ver) 88 89 //============================================================================================= 90 // Terminating Signals logic 72 #define INTERPOSE_LIBC( x, ver ) __cabi_libc.x = (typeof(__cabi_libc.x))interpose_symbol( #x, ver ) 73 74 //============================================================================================= 75 // Interposition Startup logic 91 76 //============================================================================================= 92 77 … … 98 83 99 84 struct { 100 void (* exit)( int ) __attribute__ 101 void (* abort)( void ) __attribute__ 85 void (* exit)( int ) __attribute__(( __noreturn__ )); 86 void (* abort)( void ) __attribute__(( __noreturn__ )); 102 87 } __cabi_libc; 103 88 … … 107 92 const char *version = NULL; 108 93 94 #pragma GCC diagnostic push 95 #pragma GCC diagnostic ignored "-Wdiscarded-qualifiers" 109 96 INTERPOSE_LIBC( abort, version ); 110 97 INTERPOSE_LIBC( exit , version ); 111 112 __cfaabi_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); // Failure handler 113 __cfaabi_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); // Failure handler 114 __cfaabi_sigaction( SIGILL , sigHandler_ill , SA_SIGINFO ); // Failure handler 115 __cfaabi_sigaction( SIGFPE , sigHandler_fpe , SA_SIGINFO ); // Failure handler 116 __cfaabi_sigaction( SIGABRT, sigHandler_abort, SA_SIGINFO ); // Failure handler 117 __cfaabi_sigaction( SIGTERM, sigHandler_term , SA_SIGINFO ); // Failure handler 118 __cfaabi_sigaction( SIGINT , sigHandler_term , SA_SIGINFO ); // Failure handler 98 #pragma GCC diagnostic pop 99 100 // Failure handler 101 __cfaabi_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); 102 __cfaabi_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); 103 __cfaabi_sigaction( SIGILL , sigHandler_ill , SA_SIGINFO ); 104 __cfaabi_sigaction( SIGFPE , sigHandler_fpe , SA_SIGINFO ); 105 __cfaabi_sigaction( SIGABRT, sigHandler_abort, SA_SIGINFO | SA_RESETHAND); 106 __cfaabi_sigaction( SIGTERM, sigHandler_term , SA_SIGINFO ); 107 __cfaabi_sigaction( SIGINT , sigHandler_term , SA_SIGINFO ); 119 108 } 120 109 } … … 125 114 126 115 // Forward declare abort after the __typeof__ call to avoid ambiguities 127 void exit( int status, const char fmt[], ... ) __attribute__ 128 void abort( const char fmt[], ... ) __attribute__ 116 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )); 117 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )); 129 118 130 119 extern "C" { 131 void abort( void ) __attribute__ 120 void abort( void ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { 132 121 abort( NULL ); 133 122 } 134 123 135 void __cabi_abort( const char fmt[], ... ) __attribute__ 124 void __cabi_abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) { 136 125 va_list argp; 137 126 va_start( argp, fmt ); … … 140 129 } 141 130 142 void exit( int status ) __attribute__ 131 void exit( int status ) __attribute__(( __nothrow__, __leaf__, __noreturn__ )) { 143 132 __cabi_libc.exit( status ); 144 133 } 145 134 } 146 135 147 void * kernel_abort ( void ) __attribute__ 148 void kernel_abort_msg( void * data, char * buffer, int size ) __attribute__ 149 int kernel_abort_lastframe( void ) __attribute__ 136 void * kernel_abort ( void ) __attribute__(( __nothrow__, __leaf__, __weak__ )) { return NULL; } 137 void kernel_abort_msg( void * data, char * buffer, int size ) __attribute__(( __nothrow__, __leaf__, __weak__ )) {} 138 int kernel_abort_lastframe( void ) __attribute__(( __nothrow__, __leaf__, __weak__ )) { return 4; } 150 139 151 140 enum { abort_text_size = 1024 }; … … 153 142 static int abort_lastframe; 154 143 155 void exit( int status, const char fmt[], ... ) __attribute__ 144 void exit( int status, const char fmt[], ... ) __attribute__(( format(printf, 2, 3), __nothrow__, __leaf__, __noreturn__ )) { 156 145 va_list args; 157 146 va_start( args, fmt ); … … 161 150 } 162 151 163 void abort( const char fmt[], ... ) __attribute__ 152 void abort( const char fmt[], ... ) __attribute__(( format(printf, 1, 2), __nothrow__, __leaf__, __noreturn__ )) { 164 153 void * kernel_data = kernel_abort(); // must be done here to lock down kernel 165 154 int len; -
src/libcfa/stdlib
r7d0a3ba r358cba0 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Jan 2 12:21:04201813 // Update Count : 29 212 // Last Modified On : Sun May 13 23:22:23 2018 13 // Update Count : 299 14 14 // 15 15 16 16 #pragma once 17 17 18 //#define _XOPEN_SOURCE 600 // posix_memalign, *rand48 18 #define __USE_ISOC11 // aligned_alloc 19 19 #include <stdlib.h> // strto*, *abs 20 20 … … 28 28 //--------------------------------------- 29 29 30 // allocation, non-array types 31 static inline forall( dtype T | sized(T) ) T * malloc( void ) { 32 // printf( "* malloc\n" ); 33 return (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 34 } // malloc 35 36 // static inline forall( dtype T | sized(T) ) T & malloc( void ) { 37 // int & p = *(T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 38 // printf( "& malloc %p\n", &p ); 39 // return p; 40 // // return (T &)*(T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 41 // } // malloc 42 43 extern "C" { void * calloc( size_t dim, size_t size ); } // default C routine 44 static inline forall( dtype T | sized(T) ) T * calloc( size_t dim ) { 45 //printf( "X2\n" ); 46 return (T *)(void *)calloc( dim, sizeof(T) ); // C cmalloc 47 } 48 49 extern "C" { void * realloc( void * ptr, size_t size ); } // default C routine for void * 50 static inline forall( dtype T | sized(T) ) T * realloc( T * ptr, size_t size ) { 51 //printf( "X3\n" ); 52 return (T *)(void *)realloc( (void *)ptr, size ); 53 } 54 55 extern "C" { void * memalign( size_t align, size_t size ); } // use default C routine for void * 56 static inline forall( dtype T | sized(T) ) T * memalign( size_t align ) { 57 //printf( "X4\n" ); 58 return (T *)memalign( align, sizeof(T) ); 59 } // memalign 60 61 static inline forall( dtype T | sized(T) ) T * aligned_alloc( size_t align ) { 62 //printf( "X5\n" ); 63 return (T *)memalign( align, sizeof(T) ); 64 } // aligned_alloc 65 66 extern "C" { int posix_memalign( void ** ptr, size_t align, size_t size ); } // use default C routine for void * 67 static inline forall( dtype T | sized(T) ) int posix_memalign( T ** ptr, size_t align ) { 68 //printf( "X6\n" ); 69 return posix_memalign( (void **)ptr, align, sizeof(T) ); 70 } // posix_memalign 71 72 73 extern "C" { void * memset( void * dest, int c, size_t size ); } // use default C routine for void * 74 75 static inline forall( dtype T | sized(T) ) T * alloc( void ) { 76 //printf( "X7\n" ); 77 return (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 78 } // alloc 79 static inline forall( dtype T | sized(T) ) T * alloc( char fill ) { 80 //printf( "X8\n" ); 81 T * ptr = (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 82 return (T *)memset( ptr, (int)fill, sizeof(T) ); // initial with fill value 83 } // alloc 84 85 static inline forall( dtype T | sized(T) ) T * alloc( size_t dim ) { 86 //printf( "X9\n" ); 87 return (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C malloc 88 } // alloc 89 static inline forall( dtype T | sized(T) ) T * alloc( size_t dim, char fill ) { 90 //printf( "X10\n" ); 91 T * ptr = (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C malloc 92 return (T *)memset( ptr, (int)fill, dim * sizeof(T) ); 93 } // alloc 94 95 static inline forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim ) { 96 //printf( "X11\n" ); 97 return (T *)(void *)realloc( (void *)ptr, dim * (size_t)sizeof(T) ); // C realloc 98 } // alloc 30 // C dynamic allocation 31 static inline forall( dtype T | sized(T) ) { 32 T * malloc( void ) { 33 // printf( "* malloc\n" ); 34 return (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 35 } // malloc 36 37 // T & malloc( void ) { 38 // int & p = *(T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 39 // printf( "& malloc %p\n", &p ); 40 // return p; 41 // // return (T &)*(T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 42 // } // malloc 43 44 T * calloc( size_t dim ) { 45 //printf( "X2\n" ); 46 return (T *)(void *)calloc( dim, sizeof(T) ); // C calloc 47 } // calloc 48 49 T * realloc( T * ptr, size_t size ) { 50 //printf( "X3\n" ); 51 return (T *)(void *)realloc( (void *)ptr, size ); 52 } // realloc 53 54 extern "C" { void * memalign( size_t align, size_t size ); } // use default C routine for void * 55 T * memalign( size_t align ) { 56 //printf( "X4\n" ); 57 return (T *)memalign( align, sizeof(T) ); 58 } // memalign 59 60 extern "C" { void * aligned_alloc( size_t align, size_t size ); } // use default C routine for void * 61 T * aligned_alloc( size_t align ) { 62 //printf( "X5\n" ); 63 return (T *)aligned_alloc( align, sizeof(T) ); 64 } // aligned_alloc 65 66 int posix_memalign( T ** ptr, size_t align ) { 67 //printf( "X6\n" ); 68 return posix_memalign( (void **)ptr, align, sizeof(T) ); // C posix_memalign 69 } // posix_memalign 70 71 72 // Cforall dynamic allocation 73 extern "C" { void * memset( void * dest, int c, size_t size ); } // use default C routine for void * 74 75 T * alloc( void ) { 76 //printf( "X7\n" ); 77 return (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 78 } // alloc 79 80 T * alloc( char fill ) { 81 //printf( "X8\n" ); 82 T * ptr = (T *)(void *)malloc( (size_t)sizeof(T) ); // C malloc 83 return (T *)memset( ptr, (int)fill, sizeof(T) ); // initial with fill value 84 } // alloc 85 86 T * alloc( size_t dim ) { 87 //printf( "X9\n" ); 88 return (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C malloc 89 } // alloc 90 91 T * alloc( size_t dim, char fill ) { 92 //printf( "X10\n" ); 93 T * ptr = (T *)(void *)malloc( dim * (size_t)sizeof(T) ); // C malloc 94 return (T *)memset( ptr, (int)fill, dim * sizeof(T) ); // initial with fill value 95 } // alloc 96 97 T * alloc( T ptr[], size_t dim ) { 98 //printf( "X11\n" ); 99 return (T *)(void *)realloc( (void *)ptr, dim * (size_t)sizeof(T) ); // C realloc 100 } // alloc 101 } // distribution 102 103 99 104 forall( dtype T | sized(T) ) T * alloc( T ptr[], size_t dim, char fill ); 100 105 -
src/libcfa/time.c
r7d0a3ba r358cba0 10 10 // Created On : Tue Mar 27 13:33:14 2018 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Apr 12 14:41:00 201813 // Update Count : 2212 // Last Modified On : Sun May 6 22:26:00 2018 13 // Update Count : 37 14 14 // 15 15 … … 17 17 #include "iostream" 18 18 #include <stdio.h> // snprintf 19 #include <assert.h> 19 20 20 21 static char * nanomsd( long int ns, char * buf ) { // most significant digits … … 87 88 time_t s = tv / TIMEGRAN; 88 89 tm tm; 89 gmtime_r( &s, &tm ); 90 gmtime_r( &s, &tm ); // tm_mon <= 11, tm_mday <= 31 91 #if defined(__GNUC__) && __GNUC__ >= 7 92 #pragma GCC diagnostic push 93 #pragma GCC diagnostic ignored "-Wformat-truncation" 94 #endif 90 95 snprintf( buf, 9, "%02d/%02d/%02d", tm.tm_year % 99, tm.tm_mon + 1, tm.tm_mday ); 96 #if defined(__GNUC__) && __GNUC__ >= 7 97 #pragma GCC diagnostic pop 98 #endif 91 99 return buf; 92 100 } // yy_mm_dd … … 95 103 time_t s = tv / TIMEGRAN; 96 104 tm tm; 97 gmtime_r( &s, &tm ); 105 gmtime_r( &s, &tm ); // tm_mon <= 11, tm_mday <= 31 106 #if defined(__GNUC__) && __GNUC__ >= 7 107 #pragma GCC diagnostic push 108 #pragma GCC diagnostic ignored "-Wformat-truncation" 109 #endif 98 110 snprintf( buf, 9, "%02d/%02d/%02d", tm.tm_mon + 1, tm.tm_mday, tm.tm_year % 99 ); 111 #if defined(__GNUC__) && __GNUC__ >= 7 112 #pragma GCC diagnostic pop 113 #endif 99 114 return buf; 100 115 } // mm_dd_yy … … 103 118 time_t s = tv / TIMEGRAN; 104 119 tm tm; 105 gmtime_r( &s, &tm ); 120 gmtime_r( &s, &tm ); // tm_mon <= 11, tm_mday <= 31 121 #if defined(__GNUC__) && __GNUC__ >= 7 122 #pragma GCC diagnostic push 123 #pragma GCC diagnostic ignored "-Wformat-truncation" 124 #endif 106 125 snprintf( buf, 9, "%02d/%02d/%02d", tm.tm_mday, tm.tm_mon + 1, tm.tm_year % 99 ); 126 #if defined(__GNUC__) && __GNUC__ >= 7 127 #pragma GCC diagnostic pop 128 #endif 107 129 return buf; 108 130 } // dd_mm_yy -
src/main.cc
r7d0a3ba r358cba0 10 10 // Created On : Fri May 15 23:12:02 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed May 2 14:59:02201813 // Update Count : 49 012 // Last Modified On : Mon May 7 14:35:57 2018 13 // Update Count : 492 14 14 // 15 15 … … 64 64 65 65 #define PASS(name, pass) \ 66 if ( errorp ) { cerr << #name << endl; } \67 HeapStats::newPass( #name); \66 if ( errorp ) { cerr << name << endl; } \ 67 HeapStats::newPass(name); \ 68 68 pass; 69 69 … … 371 371 } // if 372 372 return 1; 373 } // try 373 } catch(...) { 374 std::exception_ptr eptr = std::current_exception(); 375 try { 376 if (eptr) { 377 std::rethrow_exception(eptr); 378 } 379 else { 380 std::cerr << "Exception Uncaught and Unkown" << std::endl; 381 } 382 } catch(const std::exception& e) { 383 std::cerr << "Unaught Exception \"" << e.what() << "\"\n"; 384 } 385 return 1; 386 }// try 374 387 375 388 deleteAll( translationUnit ); 376 HeapStats::printStats();389 if(!libcfap && !treep) HeapStats::printStats(); 377 390 return 0; 378 391 } // main … … 407 420 opterr = 0; // (global) prevent getopt from printing error messages 408 421 409 bool W error = false;422 bool Wsuppress = false, Werror = false; 410 423 int c; 411 424 while ( (c = getopt_long( argc, argv, "abBcCdefgGlLmnNpqrstTvwW:yzZD:F:", long_opts, &long_index )) != -1 ) { … … 495 508 break; 496 509 case 'w': 497 SemanticWarning_SuppressAll();510 Wsuppress = true; 498 511 break; 499 512 case 'W': … … 534 547 assertf( false, "Unknown option: %s\n", argv[optind - 1] ); 535 548 } // if 536 #if __GNUC__ < 7 537 #else 549 #if defined(__GNUC__) && __GNUC__ >= 7 538 550 __attribute__((fallthrough)); 539 551 #endif … … 545 557 if ( Werror ) { 546 558 SemanticWarning_WarningAsError(); 559 } // if 560 if ( Wsuppress ) { 561 SemanticWarning_SuppressAll(); 547 562 } // if 548 563 // for ( const auto w : WarningFormats ) { -
src/tests/coroutine/fmtLines.c
r7d0a3ba r358cba0 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // fmtLines.cc -- 7 // fmtLines.cc -- format characters into blocks of 4 and groups of 5 blocks per line 8 8 // 9 9 // Author : Peter A. Buhr 10 10 // Created On : Sun Sep 17 21:56:15 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Dec 5 21:56:35 201713 // Update Count : 3812 // Last Modified On : Tue May 15 12:25:33 2018 13 // Update Count : 42 14 14 // 15 15 … … 21 21 int g, b; // global because used in destructor 22 22 }; 23 24 void ?{}( Format & fmt ) {25 resume( fmt ); // prime (start) coroutine26 }27 28 void ^?{}( Format & fmt ) with( fmt ) {29 if ( g != 0 || b != 0 ) sout | endl;30 }31 23 32 24 void main( Format & fmt ) with( fmt ) { … … 46 38 } // main 47 39 48 void prt( Format & fmt, char ch ) { 49 fmt.ch = ch; 40 void ?{}( Format & fmt ) { 41 resume( fmt ); // prime (start) coroutine 42 } 43 44 void ^?{}( Format & fmt ) with( fmt ) { 45 if ( g != 0 || b != 0 ) sout | endl; 46 } 47 48 void format( Format & fmt ) { 50 49 resume( fmt ); 51 50 } // prt 52 51 53 52 int main() { 54 Format fmt; // format characters into blocks of 4 and groups of 5 blocks per line 55 char ch; 53 Format fmt; 56 54 57 Eof: for ( ;; ) { // read until end of file58 sin | ch;// read one character59 if ( eof( sin ) ) break Eof; // eof ?60 prt( fmt, ch); // push character for formatting55 eof: for ( ;; ) { // read until end of file 56 sin | fmt.ch; // read one character 57 if ( eof( sin ) ) break eof; // eof ? 58 format( fmt ); // push character for formatting 61 59 } // for 62 60 } // main
Note:
See TracChangeset
for help on using the changeset viewer.