Changeset 6ff08d8
- Timestamp:
- Jul 12, 2021, 1:44:35 PM (2 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 605673f, 9345684
- Parents:
- cf444b6 (diff), a953c2e3 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 1 added
- 66 edited
- 4 moved
Legend:
- Unmodified
- Added
- Removed
-
benchmark/readyQ/rq_bench.hfa
rcf444b6 r6ff08d8 93 93 94 94 struct __attribute__((aligned(128))) bench_sem { 95 struct $thread* volatile ptr;95 struct thread$ * volatile ptr; 96 96 }; 97 97 … … 105 105 bool wait(bench_sem & this) { 106 106 for() { 107 struct $thread* expected = this.ptr;107 struct thread$ * expected = this.ptr; 108 108 if(expected == 1p) { 109 109 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { … … 124 124 bool post(bench_sem & this) { 125 125 for() { 126 struct $thread* expected = this.ptr;126 struct thread$ * expected = this.ptr; 127 127 if(expected == 1p) return false; 128 128 if(expected == 0p) { -
benchmark/readyQ/transfer.cfa
rcf444b6 r6ff08d8 14 14 bool exhaust = false; 15 15 16 $thread* the_main;16 thread$ * the_main; 17 17 18 18 thread __attribute__((aligned(128))) MyThread { -
benchmark/size/size.cfa
rcf444b6 r6ff08d8 2 2 3 3 int main(){ 4 printf("Coroutine : %zu bytes\n", sizeof( $coroutine));5 printf("Monitor : %zu bytes\n", sizeof( $monitor));6 printf("Thread : %zu bytes\n", sizeof( $thread));4 printf("Coroutine : %zu bytes\n", sizeof( coroutine$ )); 5 printf("Monitor : %zu bytes\n", sizeof( monitor$ )); 6 printf("Thread : %zu bytes\n", sizeof( thread$ )); 7 7 printf("Processor : %zu bytes\n", sizeof( processor )); 8 8 printf("Cluster : %zu bytes\n", sizeof( cluster )); -
doc/theses/andrew_beach_MMath/code/CondCatch.java
rcf444b6 r6ff08d8 3 3 class EmptyException extends Exception {} 4 4 5 public class Cond Match {5 public class CondCatch { 6 6 static boolean should_catch = false; 7 7 … … 20 20 } 21 21 22 public static void main(String[] args) { 23 int times = 1; 24 int total_frames = 1; 25 if (0 < args.length) { 26 times = Integer.parseInt(args[0]); 27 } 28 if (1 < args.length) { 29 total_frames = Integer.parseInt(args[1]); 30 } 31 22 private static long loop(int times) { 23 long startTime = System.nanoTime(); 32 24 for (int count = 0 ; count < times ; ++count) { 33 25 try { … … 37 29 } 38 30 } 31 long endTime = System.nanoTime(); 32 return endTime - startTime; 33 } 34 35 public static void main(String[] args) { 36 int times = 1; 37 if (0 < args.length) { 38 times = Integer.parseInt(args[0]); 39 } 40 if (1 < args.length) { 41 should_catch = 0 != Integer.parseInt(args[1]); 42 } 43 44 // Warm-Up: 45 loop(1000); 46 47 long time = loop(times); 48 System.out.println("Run-Time (ns): " + time); 39 49 } 40 50 } -
doc/theses/andrew_beach_MMath/code/CrossCatch.java
rcf444b6 r6ff08d8 4 4 5 5 public class CrossCatch { 6 public static void main(String[] args) { 7 int times = 1; 8 boolean shouldThrow = false; 9 if (0 < args.length) { 10 times = Integer.parseInt(args[0]); 11 } 6 private static boolean shouldThrow = false; 12 7 8 private static long loop(int times) { 9 long startTime = System.nanoTime(); 13 10 for (int count = 0 ; count < times ; ++count) { 14 11 try { … … 20 17 } 21 18 } 19 long endTime = System.nanoTime(); 20 return endTime - startTime; 21 } 22 23 public static void main(String[] args) { 24 int times = 1; 25 if (0 < args.length) { 26 times = Integer.parseInt(args[0]); 27 } 28 29 // Warm-Up: 30 loop(1000); 31 32 long time = loop(times); 33 System.out.println("Run-Time (ns): " + time); 22 34 } 23 35 } -
doc/theses/andrew_beach_MMath/code/CrossFinally.java
rcf444b6 r6ff08d8 2 2 3 3 public class CrossFinally { 4 public static void main(String[] args) { 5 int times = 1; 6 boolean shouldThrow = false; 7 if (0 < args.length) { 8 times = Integer.parseInt(args[0]); 9 } 4 private static boolean shouldThrow = false; 10 5 6 private static long loop(int times) { 7 long startTime = System.nanoTime(); 11 8 for (int count = 0 ; count < times ; ++count) { 12 9 try { … … 16 13 } 17 14 } 15 long endTime = System.nanoTime(); 16 return endTime - startTime; 17 } 18 19 public static void main(String[] args) { 20 int times = 1; 21 if (0 < args.length) { 22 times = Integer.parseInt(args[0]); 23 } 24 25 // Warm-Up: 26 loop(1000); 27 28 long time = loop(times); 29 System.out.println("Run-Time (ns): " + time); 18 30 } 19 31 } -
doc/theses/andrew_beach_MMath/code/ThrowEmpty.java
rcf444b6 r6ff08d8 12 12 } 13 13 14 private static long loop(int times, int total_frames) { 15 long startTime = System.nanoTime(); 16 for (int count = 0 ; count < times ; ++count) { 17 try { 18 unwind_empty(total_frames); 19 } catch (EmptyException e) { 20 // ... 21 } 22 } 23 long endTime = System.nanoTime(); 24 return endTime - startTime; 25 } 26 14 27 public static void main(String[] args) { 15 28 int times = 1; … … 22 35 } 23 36 24 for (int count = 0 ; count < times ; ++count) { 25 try { 26 unwind_empty(total_frames); 27 } catch (EmptyException e) { 28 // ... 29 } 30 } 37 // Warm-Up: 38 loop(1000, total_frames); 39 40 long time = loop(times, total_frames); 41 System.out.println("Run-Time (ns): " + time); 31 42 } 32 43 } -
doc/theses/andrew_beach_MMath/code/ThrowFinally.java
rcf444b6 r6ff08d8 13 13 } 14 14 15 private static long loop(int times, int total_frames) { 16 long startTime = System.nanoTime(); 17 for (int count = 0 ; count < times ; ++count) { 18 try { 19 unwind_finally(total_frames); 20 } catch (EmptyException e) { 21 // ... 22 } 23 } 24 long endTime = System.nanoTime(); 25 return endTime - startTime; 26 } 27 15 28 public static void main(String[] args) { 16 29 int times = 1; … … 23 36 } 24 37 25 for (int count = 0 ; count < times ; ++count) { 26 try { 27 unwind_finally(total_frames); 28 } catch (EmptyException e) { 29 // ... 30 } 31 } 38 // Warm-Up: 39 loop(1000, total_frames); 40 41 long time = loop(times, total_frames); 42 System.out.println("Run-Time (ns): " + time); 32 43 } 33 44 } -
doc/theses/andrew_beach_MMath/code/ThrowOther.java
rcf444b6 r6ff08d8 24 24 } 25 25 26 private static long loop(int times, int total_frames) { 27 long startTime = System.nanoTime(); 28 for (int count = 0 ; count < times ; ++count) { 29 try { 30 unwind_other(total_frames); 31 } catch (EmptyException e) { 32 // ... 33 } catch (NotRaisedException e) { 34 // ... 35 } 36 } 37 long endTime = System.nanoTime(); 38 return endTime - startTime; 39 } 40 26 41 public static void main(String[] args) { 27 42 int times = 1; … … 34 49 } 35 50 36 for (int count = 0 ; count < times ; ++count) { 37 try { 38 unwind_other(total_frames); 39 } catch (EmptyException e) { 40 // ... 41 } catch (NotRaisedException e) { 42 // ... 43 } 44 } 51 // Warm-Up: 52 loop(1000, total_frames); 53 54 long time = loop(times, total_frames); 55 System.out.println("Run-Time (ns): " + time); 45 56 } 46 57 } -
doc/theses/andrew_beach_MMath/code/cond-catch.cfa
rcf444b6 r6ff08d8 1 1 // Conditional Match (or Re-Raise) 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.h> 4 6 … … 23 25 int main(int argc, char * argv[]) { 24 26 unsigned int times = 1; 25 if ( 2< argc) {27 if (1 < argc) { 26 28 times = strtol(argv[1], 0p, 10); 27 29 } 30 if (2 < argc) { 31 should_catch = strtol(argv[2], 0p, 10); 32 } 28 33 34 Time start_time = timeHiRes(); 29 35 for (unsigned int count = 0 ; count < times ; ++count) { 30 36 try { … … 34 40 } 35 41 } 42 Time end_time = timeHiRes(); 43 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 36 44 } -
doc/theses/andrew_beach_MMath/code/cond-catch.cpp
rcf444b6 r6ff08d8 1 1 // Conditional Match (or Re-Raise) 2 #include <chrono> 3 #include <cstdlib> 2 4 #include <exception> 3 #include <cstdlib> 5 #include <iostream> 6 7 using namespace std::chrono; 4 8 5 9 struct EmptyException : public std::exception {}; … … 23 27 int main(int argc, char * argv[]) { 24 28 unsigned int times = 1; 25 unsigned int total_frames = 1; 26 if (2 < argc) { 29 if (1 < argc) { 27 30 times = strtol(argv[1], nullptr, 10); 28 31 } 29 if ( 3< argc) {30 total_frames= strtol(argv[2], nullptr, 10);32 if (2 < argc) { 33 should_catch = strtol(argv[2], nullptr, 10); 31 34 } 32 35 36 time_point<steady_clock> start_time = steady_clock::now(); 33 37 for (unsigned int count = 0 ; count < times ; ++count) { 34 38 try { … … 38 42 } 39 43 } 44 time_point<steady_clock> end_time = steady_clock::now(); 45 nanoseconds duration = duration_cast<nanoseconds>(end_time - start_time); 46 std::cout << "Run-Time (ns): " << duration.count() << std::endl; 40 47 } -
doc/theses/andrew_beach_MMath/code/cond-fixup.cfa
rcf444b6 r6ff08d8 1 1 // Conditional Match (or Re-Raise) 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 23 25 int main(int argc, char * argv[]) { 24 26 unsigned int times = 1; 25 unsigned int total_frames = 1; 26 if (2 < argc) { 27 if (1 < argc) { 27 28 times = strtol(argv[1], 0p, 10); 28 29 } 29 if ( 3< argc) {30 total_frames= strtol(argv[2], 0p, 10);30 if (2 < argc) { 31 should_catch = strtol(argv[2], 0p, 10); 31 32 } 32 33 34 Time start_time = timeHiRes(); 33 35 for (unsigned int count = 0 ; count < times ; ++count) { 34 36 try { … … 38 40 } 39 41 } 42 Time end_time = timeHiRes(); 43 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 40 44 } -
doc/theses/andrew_beach_MMath/code/cross-catch.cfa
rcf444b6 r6ff08d8 1 1 // Cross a Try Statement with a Termination Handler 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 8 10 unsigned int times = 1; 9 11 unsigned int total_frames = 1; 10 if ( 2< argc) {12 if (1 < argc) { 11 13 times = strtol(argv[1], 0p, 10); 12 14 } 13 if ( 3< argc) {15 if (2 < argc) { 14 16 total_frames = strtol(argv[2], 0p, 10); 15 17 } 16 18 19 Time start_time = timeHiRes(); 17 20 for (unsigned int count = 0 ; count < times ; ++count) { 18 21 try { … … 22 25 } 23 26 } 27 Time end_time = timeHiRes(); 28 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 24 29 } -
doc/theses/andrew_beach_MMath/code/cross-catch.cpp
rcf444b6 r6ff08d8 1 1 // Cross a Try Statement with a Termination Handler 2 #include <chrono> 3 #include <cstdlib> 2 4 #include <exception> 3 #include <cstdlib> 5 #include <iostream> 6 7 using namespace std::chrono; 4 8 5 9 struct NotRaisedException : public std::exception {}; … … 7 11 int main(int argc, char * argv[]) { 8 12 unsigned int times = 1; 9 if ( 2< argc) {13 if (1 < argc) { 10 14 times = strtol(argv[1], nullptr, 10); 11 15 } 12 16 17 time_point<steady_clock> start_time = steady_clock::now(); 13 18 for (unsigned int count = 0 ; count < times ; ++count) { 14 19 try { … … 18 23 } 19 24 } 25 time_point<steady_clock> end_time = steady_clock::now(); 26 nanoseconds duration = duration_cast<nanoseconds>(end_time - start_time); 27 std::cout << "Run-Time (ns): " << duration.count() << std::endl; 20 28 } -
doc/theses/andrew_beach_MMath/code/cross-finally.cfa
rcf444b6 r6ff08d8 1 1 // Cross a Try Statement With Finally Clause 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 6 8 unsigned int times = 1; 7 9 unsigned int total_frames = 1; 8 if ( 2< argc) {10 if (1 < argc) { 9 11 times = strtol(argv[1], 0p, 10); 10 12 } 11 if ( 3< argc) {13 if (2 < argc) { 12 14 total_frames = strtol(argv[2], 0p, 10); 13 15 } 14 16 17 Time start_time = timeHiRes(); 15 18 for (unsigned int count = 0 ; count < times ; ++count) { 16 19 try { … … 20 23 } 21 24 } 25 Time end_time = timeHiRes(); 26 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 22 27 } -
doc/theses/andrew_beach_MMath/code/cross-resume.cfa
rcf444b6 r6ff08d8 1 1 // Cross a Try Statement With Finally Clause 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 8 10 unsigned int times = 1; 9 11 unsigned int total_frames = 1; 10 if ( 2< argc) {12 if (1 < argc) { 11 13 times = strtol(argv[1], 0p, 10); 12 14 } 13 if ( 3< argc) {15 if (2 < argc) { 14 16 total_frames = strtol(argv[2], 0p, 10); 15 17 } 16 18 19 Time start_time = timeHiRes(); 17 20 for (unsigned int count = 0 ; count < times ; ++count) { 18 21 try { … … 22 25 } 23 26 } 27 Time end_time = timeHiRes(); 28 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 24 29 } -
doc/theses/andrew_beach_MMath/code/resume-detor.cfa
rcf444b6 r6ff08d8 1 1 // Throw Across Destructor 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 26 28 unsigned int times = 1; 27 29 unsigned int total_frames = 1; 28 if ( 2< argc) {30 if (1 < argc) { 29 31 times = strtol(argv[1], 0p, 10); 30 32 } 31 if ( 3< argc) {33 if (2 < argc) { 32 34 total_frames = strtol(argv[2], 0p, 10); 33 35 } 34 36 37 Time start_time = timeHiRes(); 35 38 for (int count = 0 ; count < times ; ++count) { 36 39 try { … … 40 43 } 41 44 } 45 Time end_time = timeHiRes(); 46 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 42 47 } -
doc/theses/andrew_beach_MMath/code/resume-empty.cfa
rcf444b6 r6ff08d8 1 1 // Resume Across Empty Function 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 9 11 void unwind_empty(unsigned int frames) { 10 12 if (frames) { 11 12 13 unwind_empty(frames - 1); 13 14 } else { … … 19 20 unsigned int times = 1; 20 21 unsigned int total_frames = 1; 21 if ( 2< argc) {22 if (1 < argc) { 22 23 times = strtol(argv[1], 0p, 10); 23 24 } 24 if ( 3< argc) {25 if (2 < argc) { 25 26 total_frames = strtol(argv[2], 0p, 10); 26 27 } 27 28 29 Time start_time = timeHiRes(); 28 30 for (int count = 0 ; count < times ; ++count) { 29 31 try { … … 33 35 } 34 36 } 37 Time end_time = timeHiRes(); 38 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 35 39 } -
doc/theses/andrew_beach_MMath/code/resume-finally.cfa
rcf444b6 r6ff08d8 1 1 // Throw Across Finally 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 22 24 unsigned int times = 1; 23 25 unsigned int total_frames = 1; 24 if ( 2< argc) {26 if (1 < argc) { 25 27 times = strtol(argv[1], 0p, 10); 26 28 } 27 if ( 3< argc) {29 if (2 < argc) { 28 30 total_frames = strtol(argv[2], 0p, 10); 29 31 } 30 32 33 Time start_time = timeHiRes(); 31 34 for (int count = 0 ; count < times ; ++count) { 32 35 try { … … 36 39 } 37 40 } 41 Time end_time = timeHiRes(); 42 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 38 43 } -
doc/theses/andrew_beach_MMath/code/resume-other.cfa
rcf444b6 r6ff08d8 1 1 // Resume Across Other Handler 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 24 26 unsigned int times = 1; 25 27 unsigned int total_frames = 1; 26 if ( 2< argc) {28 if (1 < argc) { 27 29 times = strtol(argv[1], 0p, 10); 28 30 } 29 if ( 3< argc) {31 if (2 < argc) { 30 32 total_frames = strtol(argv[2], 0p, 10); 31 33 } 32 34 35 Time start_time = timeHiRes(); 33 36 for (int count = 0 ; count < times ; ++count) { 34 37 try { … … 38 41 } 39 42 } 43 Time end_time = timeHiRes(); 44 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 40 45 } -
doc/theses/andrew_beach_MMath/code/throw-detor.cfa
rcf444b6 r6ff08d8 1 1 // Throw Across Destructor 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 25 27 unsigned int times = 1; 26 28 unsigned int total_frames = 1; 27 if ( 2< argc) {29 if (1 < argc) { 28 30 times = strtol(argv[1], 0p, 10); 29 31 } 30 if ( 3< argc) {32 if (2 < argc) { 31 33 total_frames = strtol(argv[2], 0p, 10); 32 34 } 33 35 36 Time start_time = timeHiRes(); 34 37 for (int count = 0 ; count < times ; ++count) { 35 38 try { … … 39 42 } 40 43 } 44 Time end_time = timeHiRes(); 45 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 41 46 } -
doc/theses/andrew_beach_MMath/code/throw-detor.cpp
rcf444b6 r6ff08d8 1 1 // Throw Across Destructor 2 #include <chrono> 3 #include <cstdlib> 2 4 #include <exception> 3 #include <cstdlib> 5 #include <iostream> 6 7 using namespace std::chrono; 4 8 5 9 struct EmptyException : public std::exception {}; … … 21 25 unsigned int times = 1; 22 26 unsigned int total_frames = 1; 23 if ( 2< argc) {27 if (1 < argc) { 24 28 times = strtol(argv[1], nullptr, 10); 25 29 } 26 if ( 3< argc) {30 if (2 < argc) { 27 31 total_frames = strtol(argv[2], nullptr, 10); 28 32 } 29 33 34 time_point<steady_clock> start_time = steady_clock::now(); 30 35 for (int count = 0 ; count < times ; ++count) { 31 36 try { … … 35 40 } 36 41 } 42 time_point<steady_clock> end_time = steady_clock::now(); 43 nanoseconds duration = duration_cast<nanoseconds>(end_time - start_time); 44 std::cout << "Run-Time (ns): " << duration.count() << std::endl; 37 45 } -
doc/theses/andrew_beach_MMath/code/throw-empty.cfa
rcf444b6 r6ff08d8 1 1 // Throw Across Empty Function 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 18 20 unsigned int times = 1; 19 21 unsigned int total_frames = 1; 20 if ( 2< argc) {22 if (1 < argc) { 21 23 times = strtol(argv[1], 0p, 10); 22 24 } 23 if ( 3< argc) {25 if (2 < argc) { 24 26 total_frames = strtol(argv[2], 0p, 10); 25 27 } 26 28 29 Time start_time = timeHiRes(); 27 30 for (unsigned int count = 0 ; count < times ; ++count) { 28 31 try { … … 32 35 } 33 36 } 37 Time end_time = timeHiRes(); 38 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 34 39 } -
doc/theses/andrew_beach_MMath/code/throw-empty.cpp
rcf444b6 r6ff08d8 1 1 // Throw Across Empty Function 2 #include <chrono> 3 #include <cstdlib> 2 4 #include <exception> 3 #include <cstdlib> 5 #include <iostream> 6 7 using namespace std::chrono; 4 8 5 9 struct EmptyException : public std::exception {}; … … 16 20 unsigned int times = 1; 17 21 unsigned int total_frames = 1; 18 if ( 2< argc) {22 if (1 < argc) { 19 23 times = strtol(argv[1], nullptr, 10); 20 24 } 21 if ( 3< argc) {25 if (2 < argc) { 22 26 total_frames = strtol(argv[2], nullptr, 10); 23 27 } 24 28 29 time_point<steady_clock> start_time = steady_clock::now(); 25 30 for (unsigned int count = 0 ; count < times ; ++count) { 26 31 try { … … 30 35 } 31 36 } 37 time_point<steady_clock> end_time = steady_clock::now(); 38 nanoseconds duration = duration_cast<nanoseconds>(end_time - start_time); 39 std::cout << "Run-Time (ns): " << duration.count() << std::endl; 32 40 } -
doc/theses/andrew_beach_MMath/code/throw-finally.cfa
rcf444b6 r6ff08d8 1 1 // Throw Across Finally 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 22 24 unsigned int times = 1; 23 25 unsigned int total_frames = 1; 24 if ( 2< argc) {26 if (1 < argc) { 25 27 times = strtol(argv[1], 0p, 10); 26 28 } 27 if ( 3< argc) {29 if (2 < argc) { 28 30 total_frames = strtol(argv[2], 0p, 10); 29 31 } 30 32 33 Time start_time = timeHiRes(); 31 34 for (int count = 0 ; count < times ; ++count) { 32 35 try { … … 36 39 } 37 40 } 41 Time end_time = timeHiRes(); 42 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 38 43 } -
doc/theses/andrew_beach_MMath/code/throw-other.cfa
rcf444b6 r6ff08d8 1 1 // Throw Across Other Handler 2 #include <clock.hfa> 2 3 #include <exception.hfa> 4 #include <fstream.hfa> 3 5 #include <stdlib.hfa> 4 6 … … 24 26 unsigned int times = 1; 25 27 unsigned int total_frames = 1; 26 if ( 2< argc) {28 if (1 < argc) { 27 29 times = strtol(argv[1], 0p, 10); 28 30 } 29 if ( 3< argc) {31 if (2 < argc) { 30 32 total_frames = strtol(argv[2], 0p, 10); 31 33 } 32 34 35 Time start_time = timeHiRes(); 33 36 for (int count = 0 ; count < times ; ++count) { 34 37 try { … … 38 41 } 39 42 } 43 Time end_time = timeHiRes(); 44 sout | "Run-Time (ns): " | (end_time - start_time)`ns; 40 45 } -
doc/theses/andrew_beach_MMath/code/throw-other.cpp
rcf444b6 r6ff08d8 1 1 // Throw Across Other Handler 2 #include <chrono> 3 #include <cstdlib> 2 4 #include <exception> 3 #include <cstdlib> 5 #include <iostream> 6 7 using namespace std::chrono; 4 8 5 9 struct EmptyException : public std::exception {}; … … 22 26 unsigned int times = 1; 23 27 unsigned int total_frames = 1; 24 if ( 2< argc) {28 if (1 < argc) { 25 29 times = strtol(argv[1], nullptr, 10); 26 30 } 27 if ( 3< argc) {31 if (2 < argc) { 28 32 total_frames = strtol(argv[2], nullptr, 10); 29 33 } 30 34 35 time_point<steady_clock> start_time = steady_clock::now(); 31 36 for (int count = 0 ; count < times ; ++count) { 32 37 try { … … 36 41 } 37 42 } 43 time_point<steady_clock> end_time = steady_clock::now(); 44 nanoseconds duration = duration_cast<nanoseconds>(end_time - start_time); 45 std::cout << "Run-Time (ns): " << duration.count() << std::endl; 38 46 } -
doc/theses/mubeen_zulfiqar_MMath/allocator.tex
rcf444b6 r6ff08d8 44 44 45 45 \subsection{Design philosophy} 46 46 The objective of uHeapLmmm's new design was to fulfill following requirements: 47 \begin{itemize} 48 \item It should be concurrent to be used in multi-threaded programs. 49 \item It should avoid global locks, on resources shared across all threads, as much as possible. 50 \item It's performance (FIX ME: cite performance benchmarks) should be comparable to the commonly used allocators (FIX ME: cite common allocators). 51 \item It should be a lightweight memory allocator. 52 \end{itemize} 47 53 48 54 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 49 55 50 56 \section{Background and previous design of uHeapLmmm} 51 57 uHeapLmmm was originally designed by X in X (FIX ME: add original author after confirming with Peter). 58 (FIX ME: make and add figure of previous design with description) 52 59 53 60 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 54 61 55 62 \section{Distributed design of uHeapLmmm} 63 uHeapLmmm's design was reviewed and changed to fulfill new requirements (FIX ME: cite allocator philosophy). For this purpose, following two designs of uHeapLmm were proposed: 56 64 65 \paragraph{Design 1: Decentralized} 66 Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area. 67 Kernel threads (KT) are assigned to the N heaps. 68 When KTs $\le$ N, the heaps are uncontented. 69 When KTs $>$ N, the heaps are contented. 70 By adjusting N, this approach reduces storage at the cost of speed due to contention. 71 In all cases, a thread acquires/releases a lock, contented or uncontented. 72 \begin{cquote} 73 \centering 74 \input{AllocDS1} 75 \end{cquote} 76 Problems: need to know when a KT is created and destroyed to know when to assign/un-assign a heap to the KT. 77 78 \paragraph{Design 2: Centralized} 79 One heap, but lower bucket sizes are N-shared across KTs. 80 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes. 81 When KTs $\le$ N, the important bucket sizes are uncontented. 82 When KTs $>$ N, the free buckets are contented. 83 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention. 84 \begin{cquote} 85 \centering 86 \input{AllocDS2} 87 \end{cquote} 88 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number. 89 When no thread is assigned a bucket number, its free storage is unavailable. All KTs will be contended for one lock on sbrk for their initial allocations (before free-lists gets populated). 90 91 Out of the two designs, Design 1 was chosen because it's concurrency is better across all bucket-sizes as design-2 shards a few buckets of selected sizes while design-1 shards all the buckets. Design-2 shards the whole heap which has all the buckets with the addition of sharding sbrk area. 57 92 58 93 \subsection{Advantages of distributed design} 94 The distributed design of uHeapLmmm is concurrent to work in multi-threaded applications. 59 95 96 Some key benefits of the distributed design of uHeapLmmm are as follows: 97 98 \begin{itemize} 99 \item 100 The bump allocation is concurrent as memory taken from sbrk is sharded across all heaps as bump allocation reserve. The lock on bump allocation (on memory taken from sbrk) will only be contended if KTs > N. The contention on sbrk area is less likely as it will only happen in the case if heaps assigned to two KTs get short of bump allocation reserve simultanously. 101 \item 102 N heaps are created at the start of the program and destroyed at the end of program. When a KT is created, we only assign it to one of the heaps. When a KT is destroyed, we only dissociate it from the assigned heap but we do not destroy that heap. That heap will go back to our pool-of-heaps, ready to be used by some new KT. And if that heap was shared among multiple KTs (like the case of KTs > N) then, on deletion of one KT, that heap will be still in-use of the other KTs. This will prevent creation and deletion of heaps during run-time as heaps are re-usable which helps in keeping low-memory footprint. 103 \item 104 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty. 105 \item 106 Distributed design avoids unnecassry locks on resources shared across all KTs. 107 \end{itemize} 108 109 FIX ME: Cite performance comparison of the two heap designs if required 60 110 61 111 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% … … 72 122 Why did we need it? 73 123 The added benefits. 74 75 76 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%77 % Following is added by Peter78 79 \noindent80 ====================81 82 \newpage83 \paragraph{Design 1: Decentralized}84 Fixed number of heaps: shard the heap into N heaps each with a bump-area allocated from the @sbrk@ area.85 Kernel threads (KT) are assigned to the N heaps.86 When KTs $\le$ N, the heaps are uncontented.87 When KTs $>$ N, the heaps are contented.88 By adjusting N, this approach reduces storage at the cost of speed due to contention.89 In all cases, a thread acquires/releases a lock, contented or uncontented.90 \begin{cquote}91 \centering92 \input{AllocDS1}93 \end{cquote}94 Problems: need to know when a KT is created and destroyed to know when to create/delete the KT's heap.95 On KT deletion, its heap freed-storage needs to be distributed somewhere.96 97 \paragraph{Design 2: Centralized}98 99 One heap, but lower bucket sizes are N-shared across KTs.100 This design leverages the fact that 95\% of allocation requests are less than 512 bytes and there are only 3--5 different request sizes.101 When KTs $\le$ N, the important bucket sizes are uncontented.102 When KTs $>$ N, the free buckets are contented.103 Therefore, threads are only contending for a small number of buckets, which are distributed among them to reduce contention.104 \begin{cquote}105 \centering106 \input{AllocDS2}107 \end{cquote}108 Problems: need to know when a kernel thread (KT) is created and destroyed to know when to assign a shared bucket-number.109 When no thread is assigned a bucket number, its free storage is unavailable.110 It is possible to use sharing and stealing techniques to share/find unused storage, when a free list is unused or empty. -
libcfa/prelude/builtins.c
rcf444b6 r6ff08d8 57 57 58 58 // generator support 59 struct $generator{59 struct generator$ { 60 60 inline int; 61 61 }; 62 62 63 static inline void ?{}( $generator& this) { ((int&)this) = 0; }64 static inline void ^?{}( $generator&) {}63 static inline void ?{}(generator$ & this) { ((int&)this) = 0; } 64 static inline void ^?{}(generator$ &) {} 65 65 66 66 trait is_generator(T &) { 67 67 void main(T & this); 68 $generator* get_generator(T & this);68 generator$ * get_generator(T & this); 69 69 }; 70 70 -
libcfa/src/bits/weakso_locks.cfa
rcf444b6 r6ff08d8 24 24 bool try_lock( blocking_lock & ) { return false; } 25 25 void unlock( blocking_lock & ) {} 26 void on_notify( blocking_lock &, struct $thread* ) {}26 void on_notify( blocking_lock &, struct thread$ * ) {} 27 27 size_t on_wait( blocking_lock & ) { return 0; } 28 28 void on_wakeup( blocking_lock &, size_t ) {} -
libcfa/src/bits/weakso_locks.hfa
rcf444b6 r6ff08d8 23 23 #include "containers/list.hfa" 24 24 25 struct $thread;25 struct thread$; 26 26 27 27 //----------------------------------------------------------------------------- … … 32 32 33 33 // List of blocked threads 34 dlist( $thread) blocked_threads;34 dlist( thread$ ) blocked_threads; 35 35 36 36 // Count of current blocked threads … … 44 44 45 45 // Current thread owning the lock 46 struct $thread* owner;46 struct thread$ * owner; 47 47 48 48 // Number of recursion level … … 56 56 bool try_lock( blocking_lock & this ) OPTIONAL_THREAD; 57 57 void unlock( blocking_lock & this ) OPTIONAL_THREAD; 58 void on_notify( blocking_lock & this, struct $thread* t ) OPTIONAL_THREAD;58 void on_notify( blocking_lock & this, struct thread$ * t ) OPTIONAL_THREAD; 59 59 size_t on_wait( blocking_lock & this ) OPTIONAL_THREAD; 60 60 void on_wakeup( blocking_lock & this, size_t ) OPTIONAL_THREAD; … … 74 74 static inline size_t on_wait ( multiple_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 75 75 static inline void on_wakeup( multiple_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 76 static inline void on_notify( multiple_acquisition_lock & this, struct $thread* t ){ on_notify( (blocking_lock &)this, t ); }76 static inline void on_notify( multiple_acquisition_lock & this, struct thread$ * t ){ on_notify( (blocking_lock &)this, t ); } -
libcfa/src/concurrency/alarm.cfa
rcf444b6 r6ff08d8 51 51 //============================================================================================= 52 52 53 void ?{}( alarm_node_t & this, $thread* thrd, Duration alarm, Duration period) with( this ) {53 void ?{}( alarm_node_t & this, thread$ * thrd, Duration alarm, Duration period) with( this ) { 54 54 this.initial = alarm; 55 55 this.period = period; -
libcfa/src/concurrency/alarm.hfa
rcf444b6 r6ff08d8 25 25 #include "containers/list.hfa" 26 26 27 struct $thread;27 struct thread$; 28 28 struct processor; 29 29 … … 52 52 53 53 union { 54 $thread* thrd; // thrd who created event54 thread$ * thrd; // thrd who created event 55 55 processor * proc; // proc who created event 56 56 Alarm_Callback callback; // callback to handle event … … 63 63 P9_EMBEDDED( alarm_node_t, dlink(alarm_node_t) ) 64 64 65 void ?{}( alarm_node_t & this, $thread* thrd, Duration alarm, Duration period );65 void ?{}( alarm_node_t & this, thread$ * thrd, Duration alarm, Duration period ); 66 66 void ?{}( alarm_node_t & this, processor * proc, Duration alarm, Duration period ); 67 67 void ?{}( alarm_node_t & this, Alarm_Callback callback, Duration alarm, Duration period ); -
libcfa/src/concurrency/clib/cfathread.cfa
rcf444b6 r6ff08d8 23 23 #include "cfathread.h" 24 24 25 extern void ?{}(processor &, const char[], cluster &, $thread*);25 extern void ?{}(processor &, const char[], cluster &, thread$ *); 26 26 extern "C" { 27 27 extern void __cfactx_invoke_thread(void (*main)(void *), void * this); … … 34 34 35 35 struct cfathread_object { 36 $threadself;36 thread$ self; 37 37 void * (*themain)( void * ); 38 38 void * arg; … … 42 42 void ^?{}(cfathread_object & mutex this); 43 43 44 static inline $thread* get_thread( cfathread_object & this ) { return &this.self; }44 static inline thread$ * get_thread( cfathread_object & this ) { return &this.self; } 45 45 46 46 typedef ThreadCancelled(cfathread_object) cfathread_exception; … … 81 81 // Special Init Thread responsible for the initialization or processors 82 82 struct __cfainit { 83 $threadself;83 thread$ self; 84 84 void (*init)( void * ); 85 85 void * arg; … … 88 88 void ^?{}(__cfainit & mutex this); 89 89 90 static inline $thread* get_thread( __cfainit & this ) { return &this.self; }90 static inline thread$ * get_thread( __cfainit & this ) { return &this.self; } 91 91 92 92 typedef ThreadCancelled(__cfainit) __cfainit_exception; … … 109 109 110 110 // Don't use __thrd_start! just prep the context manually 111 $thread* this_thrd = get_thread(this);111 thread$ * this_thrd = get_thread(this); 112 112 void (*main_p)(__cfainit &) = main; 113 113 -
libcfa/src/concurrency/coroutine.cfa
rcf444b6 r6ff08d8 37 37 38 38 extern "C" { 39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine*) __attribute__ ((__noreturn__));39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__)); 40 40 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__)); 41 41 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) { … … 62 62 forall(T & | is_coroutine(T)) 63 63 void __cfaehm_cancelled_coroutine( 64 T & cor, $coroutine* desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) { 65 65 verify( desc->cancellation ); 66 66 desc->state = Cancelled; … … 114 114 } 115 115 116 void ?{}( $coroutine& this, const char name[], void * storage, size_t storageSize ) with( this ) {116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) { 117 117 (this.context){0p, 0p}; 118 118 (this.stack){storage, storageSize}; … … 124 124 } 125 125 126 void ^?{}( $coroutine& this) {126 void ^?{}(coroutine$& this) { 127 127 if(this.state != Halted && this.state != Start && this.state != Primed) { 128 $coroutine* src = active_coroutine();129 $coroutine* dst = &this;128 coroutine$ * src = active_coroutine(); 129 coroutine$ * dst = &this; 130 130 131 131 struct _Unwind_Exception storage; … … 148 148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); }) 149 149 void prime(T& cor) { 150 $coroutine* this = get_coroutine(cor);150 coroutine$* this = get_coroutine(cor); 151 151 assert(this->state == Start); 152 152 … … 248 248 // is not inline (We can't inline Cforall in C) 249 249 extern "C" { 250 void __cfactx_cor_leave( struct $coroutine* src ) {251 $coroutine* starter = src->cancellation != 0 ? src->last : src->starter;250 void __cfactx_cor_leave( struct coroutine$ * src ) { 251 coroutine$ * starter = src->cancellation != 0 ? src->last : src->starter; 252 252 253 253 src->state = Halted; … … 265 265 } 266 266 267 struct $coroutine* __cfactx_cor_finish(void) {268 struct $coroutine* cor = active_coroutine();267 struct coroutine$ * __cfactx_cor_finish(void) { 268 struct coroutine$ * cor = active_coroutine(); 269 269 270 270 // get the active thread once 271 $thread* athrd = active_thread();271 thread$ * athrd = active_thread(); 272 272 273 273 /* paranoid */ verify( athrd->corctx_flag ); -
libcfa/src/concurrency/coroutine.hfa
rcf444b6 r6ff08d8 39 39 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled, (T))) { 40 40 void main(T & this); 41 $coroutine* get_coroutine(T & this);41 coroutine$ * get_coroutine(T & this); 42 42 }; 43 43 44 #define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this)44 #define DECL_COROUTINE(X) static inline coroutine$* get_coroutine(X& this) { return &this.__cor; } void main(X& this) 45 45 46 46 //----------------------------------------------------------------------------- … … 49 49 // void ^?{}( coStack_t & this ); 50 50 51 void ?{}( $coroutine& this, const char name[], void * storage, size_t storageSize );52 void ^?{}( $coroutine& this );53 54 static inline void ?{}( $coroutine& this) { this{ "Anonymous Coroutine", 0p, 0 }; }55 static inline void ?{}( $coroutine& this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; }56 static inline void ?{}( $coroutine& this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; }57 static inline void ?{}( $coroutine& this, const char name[]) { this{ name, 0p, 0 }; }58 static inline void ?{}( $coroutine& this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }51 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ); 52 void ^?{}( coroutine$ & this ); 53 54 static inline void ?{}( coroutine$ & this) { this{ "Anonymous Coroutine", 0p, 0 }; } 55 static inline void ?{}( coroutine$ & this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; } 56 static inline void ?{}( coroutine$ & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 57 static inline void ?{}( coroutine$ & this, const char name[]) { this{ name, 0p, 0 }; } 58 static inline void ?{}( coroutine$ & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; } 59 59 60 60 //----------------------------------------------------------------------------- … … 63 63 void prime(T & cor); 64 64 65 static inline struct $coroutine* active_coroutine() { return active_thread()->curr_cor; }65 static inline struct coroutine$ * active_coroutine() { return active_thread()->curr_cor; } 66 66 67 67 //----------------------------------------------------------------------------- … … 73 73 74 74 forall(T &) 75 void __cfactx_start(void (*main)(T &), struct $coroutine* cor, T & this, void (*invoke)(void (*main)(void *), void *));76 77 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine*) __attribute__ ((__noreturn__));75 void __cfactx_start(void (*main)(T &), struct coroutine$ * cor, T & this, void (*invoke)(void (*main)(void *), void *)); 76 77 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__)); 78 78 79 79 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); … … 82 82 // Private wrappers for context switch and stack creation 83 83 // Wrapper for co 84 static inline void $ctx_switch( $coroutine * src, $coroutine* dst ) __attribute__((nonnull (1, 2))) {84 static inline void $ctx_switch( coroutine$ * src, coroutine$ * dst ) __attribute__((nonnull (1, 2))) { 85 85 // set state of current coroutine to inactive 86 86 src->state = src->state == Halted ? Halted : Blocked; 87 87 88 88 // get the active thread once 89 $thread* athrd = active_thread();89 thread$ * athrd = active_thread(); 90 90 91 91 // Mark the coroutine … … 124 124 // will also migrate which means this value will 125 125 // stay in syn with the TLS 126 $coroutine* src = active_coroutine();126 coroutine$ * src = active_coroutine(); 127 127 128 128 assertf( src->last != 0, … … 141 141 forall(T & | is_coroutine(T)) 142 142 void __cfaehm_cancelled_coroutine( 143 T & cor, $coroutine* desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) );143 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ); 144 144 145 145 // Resume implementation inlined for performance … … 151 151 // will also migrate which means this value will 152 152 // stay in syn with the TLS 153 $coroutine* src = active_coroutine();154 $coroutine* dst = get_coroutine(cor);153 coroutine$ * src = active_coroutine(); 154 coroutine$ * dst = get_coroutine(cor); 155 155 156 156 if( unlikely(dst->context.SP == 0p) ) { … … 180 180 } 181 181 182 static inline void resume( $coroutine* dst ) __attribute__((nonnull (1))) {182 static inline void resume( coroutine$ * dst ) __attribute__((nonnull (1))) { 183 183 // optimization : read TLS once and reuse it 184 184 // Safety note: this is preemption safe since if … … 186 186 // will also migrate which means this value will 187 187 // stay in syn with the TLS 188 $coroutine* src = active_coroutine();188 coroutine$ * src = active_coroutine(); 189 189 190 190 // not resuming self ? -
libcfa/src/concurrency/exception.cfa
rcf444b6 r6ff08d8 20 20 #include "coroutine.hfa" 21 21 22 extern struct $thread* mainThread;22 extern struct thread$ * mainThread; 23 23 extern "C" { 24 24 extern void __cfactx_thrd_leave(); … … 55 55 56 56 STOP_AT_END_FUNCTION(coroutine_cancelstop, 57 struct $coroutine * src = ($coroutine*)stop_param;58 struct $coroutine* dst = src->last;57 struct coroutine$ * src = (coroutine$ *)stop_param; 58 struct coroutine$ * dst = src->last; 59 59 60 60 $ctx_switch( src, dst ); … … 72 72 void * stop_param; 73 73 74 struct $thread* this_thread = active_thread();74 struct thread$ * this_thread = active_thread(); 75 75 if ( &this_thread->self_cor != this_thread->curr_cor ) { 76 struct $coroutine* cor = this_thread->curr_cor;76 struct coroutine$ * cor = this_thread->curr_cor; 77 77 cor->cancellation = unwind_exception; 78 78 -
libcfa/src/concurrency/future.hfa
rcf444b6 r6ff08d8 37 37 38 38 // Fulfil the future, returns whether or not someone was unblocked 39 $thread* fulfil( future(T) & this, T result ) {39 thread$ * fulfil( future(T) & this, T result ) { 40 40 this.result = result; 41 41 return fulfil( (future_t&)this ); -
libcfa/src/concurrency/invoke.c
rcf444b6 r6ff08d8 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern struct $coroutine* __cfactx_cor_finish(void);32 extern void __cfactx_cor_leave ( struct $coroutine* );31 extern struct coroutine$ * __cfactx_cor_finish(void); 32 extern void __cfactx_cor_leave ( struct coroutine$ * ); 33 33 extern void __cfactx_thrd_leave(); 34 34 … … 41 41 ) { 42 42 // Finish setting up the coroutine by setting its state 43 struct $coroutine* cor = __cfactx_cor_finish();43 struct coroutine$ * cor = __cfactx_cor_finish(); 44 44 45 45 // Call the main of the coroutine … … 70 70 } 71 71 72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine* cor) __attribute__ ((__noreturn__));73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine* cor) {72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__)); 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) { 74 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); 75 75 printf("UNWIND ERROR %d after force unwind\n", ret); … … 100 100 void __cfactx_start( 101 101 void (*main)(void *), 102 struct $coroutine* cor,102 struct coroutine$ * cor, 103 103 void *this, 104 104 void (*invoke)(void *) -
libcfa/src/concurrency/invoke.h
rcf444b6 r6ff08d8 71 71 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting }; 72 72 73 struct $coroutine{73 struct coroutine$ { 74 74 // context that is switch during a __cfactx_switch 75 75 struct __stack_context_t context; … … 85 85 86 86 // first coroutine to resume this one 87 struct $coroutine* starter;87 struct coroutine$ * starter; 88 88 89 89 // last coroutine to resume this one 90 struct $coroutine* last;90 struct coroutine$ * last; 91 91 92 92 // If non-null stack must be unwound with this exception … … 95 95 }; 96 96 // Wrapper for gdb 97 struct cfathread_coroutine_t { struct $coroutinedebug; };98 99 static inline struct __stack_t * __get_stack( struct $coroutine* cor ) {97 struct cfathread_coroutine_t { struct coroutine$ debug; }; 98 99 static inline struct __stack_t * __get_stack( struct coroutine$ * cor ) { 100 100 return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2)); 101 101 } … … 110 110 }; 111 111 112 struct $monitor{112 struct monitor$ { 113 113 // spinlock to protect internal data 114 114 struct __spinlock_t lock; 115 115 116 116 // current owner of the monitor 117 struct $thread* owner;117 struct thread$ * owner; 118 118 119 119 // queue of threads that are blocked waiting for the monitor 120 __queue_t(struct $thread) entry_queue;120 __queue_t(struct thread$) entry_queue; 121 121 122 122 // stack of conditions to run next once we exit the monitor … … 133 133 }; 134 134 // Wrapper for gdb 135 struct cfathread_monitor_t { struct $monitordebug; };135 struct cfathread_monitor_t { struct monitor$ debug; }; 136 136 137 137 struct __monitor_group_t { 138 138 // currently held monitors 139 __cfa_anonymous_object( __small_array_t( $monitor*) );139 __cfa_anonymous_object( __small_array_t(monitor$*) ); 140 140 141 141 // last function that acquired monitors … … 146 146 // instrusive link field for threads 147 147 struct __thread_desc_link { 148 struct $thread* next;148 struct thread$ * next; 149 149 volatile unsigned long long ts; 150 150 }; 151 151 152 struct $thread{152 struct thread$ { 153 153 // Core threading fields 154 154 // context that is switch during a __cfactx_switch … … 179 179 180 180 // coroutine body used to store context 181 struct $coroutineself_cor;181 struct coroutine$ self_cor; 182 182 183 183 // current active context 184 struct $coroutine* curr_cor;184 struct coroutine$ * curr_cor; 185 185 186 186 // monitor body used for mutual exclusion 187 struct $monitorself_mon;187 struct monitor$ self_mon; 188 188 189 189 // pointer to monitor with sufficient lifetime for current monitors 190 struct $monitor* self_mon_p;190 struct monitor$ * self_mon_p; 191 191 192 192 // monitors currently held by this thread … … 195 195 // used to put threads on user data structures 196 196 struct { 197 struct $thread* next;198 struct $thread* back;197 struct thread$ * next; 198 struct thread$ * back; 199 199 } seqable; 200 200 201 201 // used to put threads on dlist data structure 202 __cfa_dlink( $thread);202 __cfa_dlink(thread$); 203 203 204 204 struct { 205 struct $thread* next;206 struct $thread* prev;205 struct thread$ * next; 206 struct thread$ * prev; 207 207 } node; 208 208 … … 214 214 }; 215 215 #ifdef __cforall 216 P9_EMBEDDED( $thread, dlink($thread) )216 P9_EMBEDDED( thread$, dlink(thread$) ) 217 217 #endif 218 218 // Wrapper for gdb 219 struct cfathread_thread_t { struct $threaddebug; };219 struct cfathread_thread_t { struct thread$ debug; }; 220 220 221 221 #ifdef __CFA_DEBUG__ 222 void __cfaabi_dbg_record_thrd( $thread& this, bool park, const char prev_name[]);222 void __cfaabi_dbg_record_thrd(thread$ & this, bool park, const char prev_name[]); 223 223 #else 224 224 #define __cfaabi_dbg_record_thrd(x, y, z) … … 228 228 extern "Cforall" { 229 229 230 static inline $thread *& get_next( $thread& this ) __attribute__((const)) {230 static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) { 231 231 return this.link.next; 232 232 } 233 233 234 static inline [ $thread *&, $thread *& ] __get( $thread& this ) __attribute__((const)) {234 static inline [thread$ *&, thread$ *& ] __get( thread$ & this ) __attribute__((const)) { 235 235 return this.node.[next, prev]; 236 236 } 237 237 238 static inline $thread * volatile & ?`next ( $thread* this ) __attribute__((const)) {238 static inline thread$ * volatile & ?`next ( thread$ * this ) __attribute__((const)) { 239 239 return this->seqable.next; 240 240 } 241 241 242 static inline $thread *& Back( $thread* this ) __attribute__((const)) {242 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) { 243 243 return this->seqable.back; 244 244 } 245 245 246 static inline $thread *& Next( $thread* this ) __attribute__((const)) {246 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) { 247 247 return this->seqable.next; 248 248 } 249 249 250 static inline bool listed( $thread* this ) {250 static inline bool listed( thread$ * this ) { 251 251 return this->seqable.next != 0p; 252 252 } … … 258 258 } 259 259 260 static inline void ?{}(__monitor_group_t & this, struct $monitor** data, __lock_size_t size, fptr_t func) {260 static inline void ?{}(__monitor_group_t & this, struct monitor$ ** data, __lock_size_t size, fptr_t func) { 261 261 (this.data){data}; 262 262 (this.size){size}; -
libcfa/src/concurrency/io.cfa
rcf444b6 r6ff08d8 90 90 static inline unsigned __flush( struct $io_context & ); 91 91 static inline __u32 __release_sqes( struct $io_context & ); 92 extern void __kernel_unpark( $thread* thrd );92 extern void __kernel_unpark( thread$ * thrd ); 93 93 94 94 bool __cfa_io_drain( processor * proc ) { -
libcfa/src/concurrency/io/types.hfa
rcf444b6 r6ff08d8 179 179 180 180 static inline { 181 $thread* fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {181 thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) { 182 182 this.result = result; 183 183 return fulfil(this.self, do_unpark); -
libcfa/src/concurrency/kernel.cfa
rcf444b6 r6ff08d8 110 110 #endif 111 111 112 extern $thread* mainThread;112 extern thread$ * mainThread; 113 113 extern processor * mainProcessor; 114 114 115 115 //----------------------------------------------------------------------------- 116 116 // Kernel Scheduling logic 117 static $thread* __next_thread(cluster * this);118 static $thread* __next_thread_slow(cluster * this);119 static inline bool __must_unpark( $thread* thrd ) __attribute((nonnull(1)));120 static void __run_thread(processor * this, $thread* dst);117 static thread$ * __next_thread(cluster * this); 118 static thread$ * __next_thread_slow(cluster * this); 119 static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); 120 static void __run_thread(processor * this, thread$ * dst); 121 121 static void __wake_one(cluster * cltr); 122 122 … … 181 181 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); 182 182 183 $thread* readyThread = 0p;183 thread$ * readyThread = 0p; 184 184 MAIN_LOOP: 185 185 for() { … … 388 388 // runThread runs a thread by context switching 389 389 // from the processor coroutine to the target thread 390 static void __run_thread(processor * this, $thread* thrd_dst) {390 static void __run_thread(processor * this, thread$ * thrd_dst) { 391 391 /* paranoid */ verify( ! __preemption_enabled() ); 392 392 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); … … 396 396 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); 397 397 398 $coroutine* proc_cor = get_coroutine(this->runner);398 coroutine$ * proc_cor = get_coroutine(this->runner); 399 399 400 400 // set state of processor coroutine to inactive … … 415 415 /* paranoid */ verify( thrd_dst->context.SP ); 416 416 /* paranoid */ verify( thrd_dst->state != Halted ); 417 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor418 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor417 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 418 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor 419 419 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 420 420 … … 428 428 429 429 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 430 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst );431 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst );430 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); 431 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 432 432 /* paranoid */ verify( thrd_dst->context.SP ); 433 433 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); … … 497 497 void returnToKernel() { 498 498 /* paranoid */ verify( ! __preemption_enabled() ); 499 $coroutine* proc_cor = get_coroutine(kernelTLS().this_processor->runner);500 $thread* thrd_src = kernelTLS().this_thread;499 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); 500 thread$ * thrd_src = kernelTLS().this_thread; 501 501 502 502 __STATS( thrd_src->last_proc = kernelTLS().this_processor; ) … … 526 526 527 527 /* paranoid */ verify( ! __preemption_enabled() ); 528 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too small.\n", thrd_src );529 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too large.\n", thrd_src );528 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src ); 529 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src ); 530 530 } 531 531 … … 533 533 // Scheduler routines 534 534 // KERNEL ONLY 535 static void __schedule_thread( $thread* thrd ) {535 static void __schedule_thread( thread$ * thrd ) { 536 536 /* paranoid */ verify( ! __preemption_enabled() ); 537 537 /* paranoid */ verify( ready_schedule_islocked()); … … 583 583 } 584 584 585 void schedule_thread$( $thread* thrd ) {585 void schedule_thread$( thread$ * thrd ) { 586 586 ready_schedule_lock(); 587 587 __schedule_thread( thrd ); … … 590 590 591 591 // KERNEL ONLY 592 static inline $thread* __next_thread(cluster * this) with( *this ) {592 static inline thread$ * __next_thread(cluster * this) with( *this ) { 593 593 /* paranoid */ verify( ! __preemption_enabled() ); 594 594 595 595 ready_schedule_lock(); 596 $thread* thrd = pop_fast( this );596 thread$ * thrd = pop_fast( this ); 597 597 ready_schedule_unlock(); 598 598 … … 602 602 603 603 // KERNEL ONLY 604 static inline $thread* __next_thread_slow(cluster * this) with( *this ) {604 static inline thread$ * __next_thread_slow(cluster * this) with( *this ) { 605 605 /* paranoid */ verify( ! __preemption_enabled() ); 606 606 607 607 ready_schedule_lock(); 608 $thread* thrd;608 thread$ * thrd; 609 609 for(25) { 610 610 thrd = pop_slow( this ); … … 620 620 } 621 621 622 static inline bool __must_unpark( $thread* thrd ) {622 static inline bool __must_unpark( thread$ * thrd ) { 623 623 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 624 624 switch(old_ticket) { … … 636 636 } 637 637 638 void __kernel_unpark( $thread* thrd ) {638 void __kernel_unpark( thread$ * thrd ) { 639 639 /* paranoid */ verify( ! __preemption_enabled() ); 640 640 /* paranoid */ verify( ready_schedule_islocked()); … … 651 651 } 652 652 653 void unpark( $thread* thrd ) {653 void unpark( thread$ * thrd ) { 654 654 if( !thrd ) return; 655 655 … … 675 675 // Should never return 676 676 void __cfactx_thrd_leave() { 677 $thread* thrd = active_thread();678 $monitor* this = &thrd->self_mon;677 thread$ * thrd = active_thread(); 678 monitor$ * this = &thrd->self_mon; 679 679 680 680 // Lock the monitor now … … 688 688 /* paranoid */ verify( kernelTLS().this_thread == thrd ); 689 689 /* paranoid */ verify( thrd->context.SP ); 690 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread%p has been corrupted.\n StackPointer too large.\n", thrd );691 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread%p has been corrupted.\n StackPointer too small.\n", thrd );690 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); 691 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 692 692 693 693 thrd->state = Halting; … … 707 707 bool force_yield( __Preemption_Reason reason ) { 708 708 __disable_interrupts_checked(); 709 $thread* thrd = kernelTLS().this_thread;709 thread$ * thrd = kernelTLS().this_thread; 710 710 /* paranoid */ verify(thrd->state == Active); 711 711 … … 819 819 //============================================================================================= 820 820 void __kernel_abort_msg( char * abort_text, int abort_text_size ) { 821 $thread* thrd = __cfaabi_tls.this_thread;821 thread$ * thrd = __cfaabi_tls.this_thread; 822 822 823 823 if(thrd) { -
libcfa/src/concurrency/kernel.hfa
rcf444b6 r6ff08d8 115 115 // it is not a particularly safe scheme as it can make processors less homogeneous 116 116 struct { 117 $thread* thrd;117 thread$ * thrd; 118 118 } init; 119 119 … … 215 215 // List of threads 216 216 __spinlock_t thread_list_lock; 217 __dllist_t(struct $thread) threads;217 __dllist_t(struct thread$) threads; 218 218 unsigned int nthreads; 219 219 -
libcfa/src/concurrency/kernel/fwd.hfa
rcf444b6 r6ff08d8 24 24 #endif 25 25 26 struct $thread;26 struct thread$; 27 27 struct processor; 28 28 struct cluster; … … 36 36 extern "Cforall" { 37 37 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 38 struct $thread* volatile this_thread;38 struct thread$ * volatile this_thread; 39 39 struct processor * volatile this_processor; 40 40 volatile bool sched_lock; … … 120 120 extern "Cforall" { 121 121 extern void park( void ); 122 extern void unpark( struct $thread* this );123 static inline struct $thread* active_thread () {124 struct $thread* t = publicTLS_get( this_thread );122 extern void unpark( struct thread$ * this ); 123 static inline struct thread$ * active_thread () { 124 struct thread$ * t = publicTLS_get( this_thread ); 125 125 /* paranoid */ verify( t ); 126 126 return t; … … 144 144 // Semaphore which only supports a single thread 145 145 struct single_sem { 146 struct $thread* volatile ptr;146 struct thread$ * volatile ptr; 147 147 }; 148 148 … … 156 156 bool wait(single_sem & this) { 157 157 for() { 158 struct $thread* expected = this.ptr;158 struct thread$ * expected = this.ptr; 159 159 if(expected == 1p) { 160 160 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { … … 175 175 bool post(single_sem & this) { 176 176 for() { 177 struct $thread* expected = this.ptr;177 struct thread$ * expected = this.ptr; 178 178 if(expected == 1p) return false; 179 179 if(expected == 0p) { … … 200 200 // 1p : fulfilled (wait won't block) 201 201 // any thread : a thread is currently waiting 202 struct $thread* volatile ptr;202 struct thread$ * volatile ptr; 203 203 }; 204 204 … … 214 214 bool wait(oneshot & this) { 215 215 for() { 216 struct $thread* expected = this.ptr;216 struct thread$ * expected = this.ptr; 217 217 if(expected == 1p) return false; 218 218 /* paranoid */ verify( expected == 0p ); … … 227 227 // Mark as fulfilled, wake thread if needed 228 228 // return true if a thread was unparked 229 $thread* post(oneshot & this, bool do_unpark = true) {230 struct $thread* got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);229 thread$ * post(oneshot & this, bool do_unpark = true) { 230 struct thread$ * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 231 231 if( got == 0p ) return 0p; 232 232 if(do_unpark) unpark( got ); … … 343 343 // from the server side, mark the future as fulfilled 344 344 // delete it if needed 345 $thread* fulfil( future_t & this, bool do_unpark = true ) {345 thread$ * fulfil( future_t & this, bool do_unpark = true ) { 346 346 for() { 347 347 struct oneshot * expected = this.ptr; … … 364 364 if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 365 365 if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p); return 0p; } 366 $thread* ret = post( *expected, do_unpark );366 thread$ * ret = post( *expected, do_unpark ); 367 367 __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 368 368 return ret; -
libcfa/src/concurrency/kernel/startup.cfa
rcf444b6 r6ff08d8 77 77 static void __kernel_first_resume( processor * this ); 78 78 static void __kernel_last_resume ( processor * this ); 79 static void init(processor & this, const char name[], cluster & _cltr, $thread* initT);79 static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT); 80 80 static void deinit(processor & this); 81 81 static void doregister( struct cluster & cltr ); … … 83 83 static void register_tls( processor * this ); 84 84 static void unregister_tls( processor * this ); 85 static void ?{}( $coroutine& this, current_stack_info_t * info);86 static void ?{}( $thread& this, current_stack_info_t * info);85 static void ?{}( coroutine$ & this, current_stack_info_t * info); 86 static void ?{}( thread$ & this, current_stack_info_t * info); 87 87 static void ?{}(processorCtx_t & this) {} 88 88 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info); … … 105 105 KERNEL_STORAGE(cluster, mainCluster); 106 106 KERNEL_STORAGE(processor, mainProcessor); 107 KERNEL_STORAGE( $thread, mainThread);107 KERNEL_STORAGE(thread$, mainThread); 108 108 KERNEL_STORAGE(__stack_t, mainThreadCtx); 109 109 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); … … 114 114 cluster * mainCluster; 115 115 processor * mainProcessor; 116 $thread* mainThread;116 thread$ * mainThread; 117 117 __scheduler_RWLock_t * __scheduler_lock; 118 118 … … 203 203 // SKULLDUGGERY: the mainThread steals the process main thread 204 204 // which will then be scheduled by the mainProcessor normally 205 mainThread = ( $thread*)&storage_mainThread;205 mainThread = (thread$ *)&storage_mainThread; 206 206 current_stack_info_t info; 207 207 info.storage = (__stack_t*)&storage_mainThreadCtx; … … 397 397 398 398 static void __kernel_first_resume( processor * this ) { 399 $thread* src = mainThread;400 $coroutine* dst = get_coroutine(this->runner);399 thread$ * src = mainThread; 400 coroutine$ * dst = get_coroutine(this->runner); 401 401 402 402 /* paranoid */ verify( ! __preemption_enabled() ); … … 430 430 // KERNEL_ONLY 431 431 static void __kernel_last_resume( processor * this ) { 432 $coroutine* src = &mainThread->self_cor;433 $coroutine* dst = get_coroutine(this->runner);432 coroutine$ * src = &mainThread->self_cor; 433 coroutine$ * dst = get_coroutine(this->runner); 434 434 435 435 /* paranoid */ verify( ! __preemption_enabled() ); … … 459 459 //----------------------------------------------------------------------------- 460 460 // Main thread construction 461 static void ?{}( $coroutine& this, current_stack_info_t * info) with( this ) {461 static void ?{}( coroutine$ & this, current_stack_info_t * info) with( this ) { 462 462 stack.storage = info->storage; 463 463 with(*stack.storage) { … … 474 474 } 475 475 476 static void ?{}( $thread& this, current_stack_info_t * info) with( this ) {476 static void ?{}( thread$ & this, current_stack_info_t * info) with( this ) { 477 477 ticket = TICKET_RUNNING; 478 478 state = Start; … … 506 506 } 507 507 508 static void init(processor & this, const char name[], cluster & _cltr, $thread* initT) with( this ) {508 static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT) with( this ) { 509 509 this.name = name; 510 510 this.cltr = &_cltr; … … 545 545 } 546 546 547 void ?{}(processor & this, const char name[], cluster & _cltr, $thread* initT) {547 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) { 548 548 ( this.terminated ){}; 549 549 ( this.runner ){}; … … 663 663 } 664 664 665 void doregister( cluster * cltr, $thread& thrd ) {665 void doregister( cluster * cltr, thread$ & thrd ) { 666 666 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 667 667 cltr->nthreads += 1; … … 670 670 } 671 671 672 void unregister( cluster * cltr, $thread& thrd ) {672 void unregister( cluster * cltr, thread$ & thrd ) { 673 673 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 674 674 remove(cltr->threads, thrd ); -
libcfa/src/concurrency/kernel_private.hfa
rcf444b6 r6ff08d8 46 46 } 47 47 48 void schedule_thread$( $thread* ) __attribute__((nonnull (1)));48 void schedule_thread$( thread$ * ) __attribute__((nonnull (1))); 49 49 50 50 extern bool __preemption_enabled(); 51 51 52 52 //release/wake-up the following resources 53 void __thread_finish( $thread* thrd );53 void __thread_finish( thread$ * thrd ); 54 54 55 55 //----------------------------------------------------------------------------- … … 95 95 96 96 __cfaabi_dbg_debug_do( 97 extern void __cfaabi_dbg_thread_register ( $thread* thrd );98 extern void __cfaabi_dbg_thread_unregister( $thread* thrd );97 extern void __cfaabi_dbg_thread_register ( thread$ * thrd ); 98 extern void __cfaabi_dbg_thread_unregister( thread$ * thrd ); 99 99 ) 100 100 … … 105 105 //----------------------------------------------------------------------------- 106 106 // Utils 107 void doregister( struct cluster * cltr, struct $thread& thrd );108 void unregister( struct cluster * cltr, struct $thread& thrd );107 void doregister( struct cluster * cltr, struct thread$ & thrd ); 108 void unregister( struct cluster * cltr, struct thread$ & thrd ); 109 109 110 110 //----------------------------------------------------------------------------- … … 300 300 // push thread onto a ready queue for a cluster 301 301 // returns true if the list was previously empty, false otherwise 302 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool local);302 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool local); 303 303 304 304 //----------------------------------------------------------------------- … … 306 306 // returns 0p if empty 307 307 // May return 0p spuriously 308 __attribute__((hot)) struct $thread* pop_fast(struct cluster * cltr);308 __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr); 309 309 310 310 //----------------------------------------------------------------------- … … 312 312 // returns 0p if empty 313 313 // May return 0p spuriously 314 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr);314 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr); 315 315 316 316 //----------------------------------------------------------------------- … … 318 318 // returns 0p if empty 319 319 // guaranteed to find any threads added before this call 320 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr);320 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr); 321 321 322 322 //----------------------------------------------------------------------- -
libcfa/src/concurrency/locks.cfa
rcf444b6 r6ff08d8 32 32 33 33 // waiting thread 34 struct $thread* t;34 struct thread$ * t; 35 35 36 36 // shadow field … … 45 45 P9_EMBEDDED( info_thread(L), dlink(info_thread(L)) ) 46 46 47 void ?{}( info_thread(L) & this, $thread* t, uintptr_t info, L * l ) {47 void ?{}( info_thread(L) & this, thread$ * t, uintptr_t info, L * l ) { 48 48 this.t = t; 49 49 this.info = info; … … 71 71 void lock( blocking_lock & this ) with( this ) { 72 72 lock( lock __cfaabi_dbg_ctx2 ); 73 $thread* thrd = active_thread();73 thread$ * thrd = active_thread(); 74 74 75 75 // single acquisition lock is held by current thread … … 117 117 118 118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 119 $thread* t = &try_pop_front( blocked_threads );119 thread$ * t = &try_pop_front( blocked_threads ); 120 120 owner = t; 121 121 recursion_count = ( t ? 1 : 0 ); … … 142 142 } 143 143 144 void on_notify( blocking_lock & this, $thread* t ) with( this ) {144 void on_notify( blocking_lock & this, thread$ * t ) with( this ) { 145 145 lock( lock __cfaabi_dbg_ctx2 ); 146 146 // lock held … … 366 366 } 367 367 368 $thread* V (semaphore & this, const bool doUnpark ) with( this ) {369 $thread* thrd = 0p;368 thread$ * V (semaphore & this, const bool doUnpark ) with( this ) { 369 thread$ * thrd = 0p; 370 370 lock( lock __cfaabi_dbg_ctx2 ); 371 371 count += 1; … … 384 384 385 385 bool V(semaphore & this) with( this ) { 386 $thread* thrd = V(this, true);386 thread$ * thrd = V(this, true); 387 387 return thrd != 0p; 388 388 } 389 389 390 390 bool V(semaphore & this, unsigned diff) with( this ) { 391 $thread* thrd = 0p;391 thread$ * thrd = 0p; 392 392 lock( lock __cfaabi_dbg_ctx2 ); 393 393 int release = max(-count, (int)diff); -
libcfa/src/concurrency/locks.hfa
rcf444b6 r6ff08d8 39 39 struct Semaphore0nary { 40 40 __spinlock_t lock; // needed to protect 41 mpsc_queue( $thread) queue;42 }; 43 44 static inline bool P(Semaphore0nary & this, $thread* thrd) {41 mpsc_queue(thread$) queue; 42 }; 43 44 static inline bool P(Semaphore0nary & this, thread$ * thrd) { 45 45 /* paranoid */ verify(!thrd`next); 46 46 /* paranoid */ verify(!(&(*thrd)`next)); … … 51 51 52 52 static inline bool P(Semaphore0nary & this) { 53 $thread* thrd = active_thread();53 thread$ * thrd = active_thread(); 54 54 P(this, thrd); 55 55 park(); … … 57 57 } 58 58 59 static inline $thread* V(Semaphore0nary & this, bool doUnpark = true) {60 $thread* next;59 static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) { 60 thread$ * next; 61 61 lock(this.lock __cfaabi_dbg_ctx2); 62 62 for (;;) { … … 124 124 static inline bool P(ThreadBenaphore & this, bool wait) { return wait ? P(this) : tryP(this); } 125 125 126 static inline $thread* V(ThreadBenaphore & this, bool doUnpark = true) {126 static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) { 127 127 if (V(this.ben)) return 0p; 128 128 return V(this.sem, doUnpark); … … 134 134 __spinlock_t lock; 135 135 int count; 136 __queue_t( $thread) waiting;136 __queue_t(thread$) waiting; 137 137 }; 138 138 … … 142 142 bool V (semaphore & this); 143 143 bool V (semaphore & this, unsigned count); 144 $thread* V (semaphore & this, bool );144 thread$ * V (semaphore & this, bool ); 145 145 146 146 //---------- … … 156 156 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 157 157 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 158 static inline void on_notify( single_acquisition_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }158 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 159 159 160 160 //---------- … … 170 170 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } 171 171 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 172 static inline void on_notify( owner_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }172 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 173 173 174 174 struct fast_lock { 175 $thread* volatile owner;175 thread$ * volatile owner; 176 176 ThreadBenaphore sem; 177 177 }; … … 179 179 static inline void ?{}(fast_lock & this) { this.owner = 0p; } 180 180 181 static inline bool $try_lock(fast_lock & this, $thread* thrd) {182 $thread* exp = 0p;181 static inline bool $try_lock(fast_lock & this, thread$ * thrd) { 182 thread$ * exp = 0p; 183 183 return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); 184 184 } … … 186 186 static inline void lock( fast_lock & this ) __attribute__((artificial)); 187 187 static inline void lock( fast_lock & this ) { 188 $thread* thrd = active_thread();188 thread$ * thrd = active_thread(); 189 189 /* paranoid */verify(thrd != this.owner); 190 190 … … 197 197 static inline bool try_lock( fast_lock & this ) __attribute__((artificial)); 198 198 static inline bool try_lock ( fast_lock & this ) { 199 $thread* thrd = active_thread();199 thread$ * thrd = active_thread(); 200 200 /* paranoid */ verify(thrd != this.owner); 201 201 return $try_lock(this, thrd); 202 202 } 203 203 204 static inline $thread* unlock( fast_lock & this ) __attribute__((artificial));205 static inline $thread* unlock( fast_lock & this ) {204 static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial)); 205 static inline thread$ * unlock( fast_lock & this ) { 206 206 /* paranoid */ verify(active_thread() == this.owner); 207 207 … … 216 216 static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; } 217 217 static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); } 218 static inline void on_notify( fast_lock &, struct $thread* t ) { unpark(t); }218 static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); } 219 219 220 220 struct mcs_node { … … 248 248 249 249 // Current thread owning the lock 250 struct $thread* owner;250 struct thread$ * owner; 251 251 252 252 // List of blocked threads 253 dlist( $thread) blocked_threads;253 dlist( thread$ ) blocked_threads; 254 254 255 255 // Used for comparing and exchanging … … 343 343 // block until signalled 344 344 while (block(this)) if(try_lock_contention(this)) return true; 345 345 346 346 // this should never be reached as block(this) always returns true 347 347 return false; … … 385 385 // block until signalled 386 386 while (block(this)) if(try_lock_contention(this)) return true; 387 387 388 388 // this should never be reached as block(this) always returns true 389 389 return false; … … 395 395 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 396 396 lock( spinlock __cfaabi_dbg_ctx2 ); 397 $thread* t = &try_pop_front( blocked_threads );397 thread$ * t = &try_pop_front( blocked_threads ); 398 398 unlock( spinlock ); 399 399 unpark( t ); 400 400 } 401 401 402 static inline void on_notify(linear_backoff_then_block_lock & this, struct $thread* t ) { unpark(t); }402 static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 403 403 static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; } 404 404 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock_improved(this); } … … 408 408 trait is_blocking_lock(L & | sized(L)) { 409 409 // For synchronization locks to use when acquiring 410 void on_notify( L &, struct $thread* );410 void on_notify( L &, struct thread$ * ); 411 411 412 412 // For synchronization locks to use when releasing … … 442 442 int count; 443 443 }; 444 444 445 445 446 446 void ?{}( condition_variable(L) & this ); -
libcfa/src/concurrency/monitor.cfa
rcf444b6 r6ff08d8 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // $monitor.c--7 // monitor.cfa -- 8 8 // 9 9 // Author : Thierry Delisle … … 28 28 //----------------------------------------------------------------------------- 29 29 // Forward declarations 30 static inline void __set_owner ( $monitor * this, $thread* owner );31 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread* owner );32 static inline void set_mask ( $monitor* storage [], __lock_size_t count, const __waitfor_mask_t & mask );33 static inline void reset_mask( $monitor* this );34 35 static inline $thread * next_thread( $monitor* this );36 static inline bool is_accepted( $monitor* this, const __monitor_group_t & monitors );30 static inline void __set_owner ( monitor$ * this, thread$ * owner ); 31 static inline void __set_owner ( monitor$ * storage [], __lock_size_t count, thread$ * owner ); 32 static inline void set_mask ( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 33 static inline void reset_mask( monitor$ * this ); 34 35 static inline thread$ * next_thread( monitor$ * this ); 36 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & monitors ); 37 37 38 38 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 39 static inline void lock_all ( $monitor* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );39 static inline void lock_all ( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 40 40 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 41 static inline void unlock_all( $monitor* locks [], __lock_size_t count );42 43 static inline void save ( $monitor* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );44 static inline void restore( $monitor* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );45 46 static inline void init ( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );47 static inline void init_push( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );48 49 static inline $thread* check_condition ( __condition_criterion_t * );41 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ); 42 43 static inline void save ( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 46 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 48 49 static inline thread$ * check_condition ( __condition_criterion_t * ); 50 50 static inline void brand_condition ( condition & ); 51 static inline [ $thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor* monitors [], __lock_size_t count );51 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t &, monitor$ * monitors [], __lock_size_t count ); 52 52 53 53 forall(T & | sized( T )) 54 54 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 55 55 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 56 static inline __lock_size_t aggregate ( $monitor* storage [], const __waitfor_mask_t & mask );56 static inline __lock_size_t aggregate ( monitor$ * storage [], const __waitfor_mask_t & mask ); 57 57 58 58 //----------------------------------------------------------------------------- … … 69 69 70 70 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 71 $monitor** monitors = mons; /* Save the targeted monitors */ \71 monitor$ ** monitors = mons; /* Save the targeted monitors */ \ 72 72 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 73 73 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ … … 82 82 // Enter/Leave routines 83 83 // Enter single monitor 84 static void __enter( $monitor* this, const __monitor_group_t & group ) {85 $thread* thrd = active_thread();84 static void __enter( monitor$ * this, const __monitor_group_t & group ) { 85 thread$ * thrd = active_thread(); 86 86 87 87 // Lock the monitor spinlock … … 141 141 } 142 142 143 static void __dtor_enter( $monitor* this, fptr_t func, bool join ) {144 $thread* thrd = active_thread();143 static void __dtor_enter( monitor$ * this, fptr_t func, bool join ) { 144 thread$ * thrd = active_thread(); 145 145 #if defined( __CFA_WITH_VERIFY__ ) 146 146 bool is_thrd = this == &thrd->self_mon; … … 173 173 // because join will not release the monitor after it executed. 174 174 // to avoid that it sets the owner to the special value thrd | 1p before exiting 175 else if( this->owner == ( $thread*)(1 | (uintptr_t)thrd) ) {175 else if( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) { 176 176 // restore the owner and just return 177 177 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); … … 191 191 192 192 __lock_size_t count = 1; 193 $monitor** monitors = &this;193 monitor$ ** monitors = &this; 194 194 __monitor_group_t group = { &this, 1, func }; 195 195 if( is_accepted( this, group) ) { … … 243 243 244 244 // Leave single monitor 245 void __leave( $monitor* this ) {245 void __leave( monitor$ * this ) { 246 246 // Lock the monitor spinlock 247 247 lock( this->lock __cfaabi_dbg_ctx2 ); … … 263 263 264 264 // Get the next thread, will be null on low contention monitor 265 $thread* new_owner = next_thread( this );265 thread$ * new_owner = next_thread( this ); 266 266 267 267 // Check the new owner is consistent with who we wake-up … … 278 278 279 279 // Leave single monitor for the last time 280 void __dtor_leave( $monitor* this, bool join ) {280 void __dtor_leave( monitor$ * this, bool join ) { 281 281 __cfaabi_dbg_debug_do( 282 282 if( active_thread() != this->owner ) { … … 288 288 ) 289 289 290 this->owner = ( $thread*)(1 | (uintptr_t)this->owner);291 } 292 293 void __thread_finish( $thread* thrd ) {294 $monitor* this = &thrd->self_mon;290 this->owner = (thread$*)(1 | (uintptr_t)this->owner); 291 } 292 293 void __thread_finish( thread$ * thrd ) { 294 monitor$ * this = &thrd->self_mon; 295 295 296 296 // Lock the monitor now … … 298 298 /* paranoid */ verify( this->lock.lock ); 299 299 /* paranoid */ verify( thrd->context.SP ); 300 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread%p has been corrupted.\n StackPointer too large.\n", thrd );301 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread%p has been corrupted.\n StackPointer too small.\n", thrd );300 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); 301 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 302 302 /* paranoid */ verify( ! __preemption_enabled() ); 303 303 … … 311 311 312 312 // Fetch the next thread, can be null 313 $thread* new_owner = next_thread( this );313 thread$ * new_owner = next_thread( this ); 314 314 315 315 // Mark the state as fully halted … … 336 336 // Leave multiple monitor 337 337 // relies on the monitor array being sorted 338 static inline void leave( $monitor* monitors [], __lock_size_t count) {338 static inline void leave(monitor$ * monitors [], __lock_size_t count) { 339 339 for( __lock_size_t i = count - 1; i >= 0; i--) { 340 340 __leave( monitors[i] ); … … 344 344 // Ctor for monitor guard 345 345 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, $monitor* m [], __lock_size_t count, fptr_t func ) {347 $thread* thrd = active_thread();346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) { 347 thread$ * thrd = active_thread(); 348 348 349 349 // Store current array … … 385 385 // Ctor for monitor guard 386 386 // Sorts monitors before entering 387 void ?{}( monitor_dtor_guard_t & this, $monitor* m [], fptr_t func, bool join ) {387 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) { 388 388 // optimization 389 $thread* thrd = active_thread();389 thread$ * thrd = active_thread(); 390 390 391 391 // Store current array … … 415 415 //----------------------------------------------------------------------------- 416 416 // Internal scheduling types 417 void ?{}(__condition_node_t & this, $thread* waiting_thread, __lock_size_t count, uintptr_t user_info ) {417 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 418 418 this.waiting_thread = waiting_thread; 419 419 this.count = count; … … 429 429 } 430 430 431 void ?{}(__condition_criterion_t & this, $monitor* target, __condition_node_t & owner ) {431 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 432 432 this.ready = false; 433 433 this.target = target; … … 463 463 // Find the next thread(s) to run 464 464 __lock_size_t thread_count = 0; 465 $thread* threads[ count ];465 thread$ * threads[ count ]; 466 466 __builtin_memset( threads, 0, sizeof( threads ) ); 467 467 … … 471 471 // Remove any duplicate threads 472 472 for( __lock_size_t i = 0; i < count; i++) { 473 $thread* new_owner = next_thread( monitors[i] );473 thread$ * new_owner = next_thread( monitors[i] ); 474 474 insert_unique( threads, thread_count, new_owner ); 475 475 } … … 501 501 //Some more checking in debug 502 502 __cfaabi_dbg_debug_do( 503 $thread* this_thrd = active_thread();503 thread$ * this_thrd = active_thread(); 504 504 if ( this.monitor_count != this_thrd->monitors.size ) { 505 505 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 555 555 556 556 //Find the thread to run 557 $thread* signallee = pop_head( this.blocked )->waiting_thread;557 thread$ * signallee = pop_head( this.blocked )->waiting_thread; 558 558 __set_owner( monitors, count, signallee ); 559 559 … … 608 608 // Create one! 609 609 __lock_size_t max = count_max( mask ); 610 $monitor* mon_storage[max];610 monitor$ * mon_storage[max]; 611 611 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 612 612 __lock_size_t actual_count = aggregate( mon_storage, mask ); … … 626 626 { 627 627 // Check if the entry queue 628 $thread* next; int index;628 thread$ * next; int index; 629 629 [next, index] = search_entry_queue( mask, monitors, count ); 630 630 … … 636 636 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 637 637 638 $monitor* mon2dtor = accepted[0];638 monitor$ * mon2dtor = accepted[0]; 639 639 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 640 640 … … 730 730 // Utilities 731 731 732 static inline void __set_owner( $monitor * this, $thread* owner ) {732 static inline void __set_owner( monitor$ * this, thread$ * owner ) { 733 733 /* paranoid */ verify( this->lock.lock ); 734 734 … … 740 740 } 741 741 742 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread* owner ) {742 static inline void __set_owner( monitor$ * monitors [], __lock_size_t count, thread$ * owner ) { 743 743 /* paranoid */ verify ( monitors[0]->lock.lock ); 744 744 /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] ); … … 753 753 } 754 754 755 static inline void set_mask( $monitor* storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {755 static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 756 756 for( __lock_size_t i = 0; i < count; i++) { 757 757 storage[i]->mask = mask; … … 759 759 } 760 760 761 static inline void reset_mask( $monitor* this ) {761 static inline void reset_mask( monitor$ * this ) { 762 762 this->mask.accepted = 0p; 763 763 this->mask.data = 0p; … … 765 765 } 766 766 767 static inline $thread * next_thread( $monitor* this ) {767 static inline thread$ * next_thread( monitor$ * this ) { 768 768 //Check the signaller stack 769 769 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 781 781 // No signaller thread 782 782 // Get the next thread in the entry_queue 783 $thread* new_owner = pop_head( this->entry_queue );783 thread$ * new_owner = pop_head( this->entry_queue ); 784 784 /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 785 785 /* paranoid */ verify( !new_owner || new_owner->link.next == 0p ); … … 789 789 } 790 790 791 static inline bool is_accepted( $monitor* this, const __monitor_group_t & group ) {791 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & group ) { 792 792 __acceptable_t * it = this->mask.data; // Optim 793 793 __lock_size_t count = this->mask.size; … … 811 811 } 812 812 813 static inline void init( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {813 static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 814 814 for( __lock_size_t i = 0; i < count; i++) { 815 815 (criteria[i]){ monitors[i], waiter }; … … 819 819 } 820 820 821 static inline void init_push( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {821 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 822 822 for( __lock_size_t i = 0; i < count; i++) { 823 823 (criteria[i]){ monitors[i], waiter }; … … 835 835 } 836 836 837 static inline void lock_all( $monitor* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {837 static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 838 838 for( __lock_size_t i = 0; i < count; i++ ) { 839 839 __spinlock_t * l = &source[i]->lock; … … 849 849 } 850 850 851 static inline void unlock_all( $monitor* locks [], __lock_size_t count ) {851 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ) { 852 852 for( __lock_size_t i = 0; i < count; i++ ) { 853 853 unlock( locks[i]->lock ); … … 856 856 857 857 static inline void save( 858 $monitor* ctx [],858 monitor$ * ctx [], 859 859 __lock_size_t count, 860 860 __attribute((unused)) __spinlock_t * locks [], … … 869 869 870 870 static inline void restore( 871 $monitor* ctx [],871 monitor$ * ctx [], 872 872 __lock_size_t count, 873 873 __spinlock_t * locks [], … … 887 887 // 2 - Checks if all the monitors are ready to run 888 888 // if so return the thread to run 889 static inline $thread* check_condition( __condition_criterion_t * target ) {889 static inline thread$ * check_condition( __condition_criterion_t * target ) { 890 890 __condition_node_t * node = target->owner; 891 891 unsigned short count = node->count; … … 910 910 911 911 static inline void brand_condition( condition & this ) { 912 $thread* thrd = active_thread();912 thread$ * thrd = active_thread(); 913 913 if( !this.monitors ) { 914 914 // __cfaabi_dbg_print_safe( "Branding\n" ); … … 916 916 this.monitor_count = thrd->monitors.size; 917 917 918 this.monitors = ( $monitor**)malloc( this.monitor_count * sizeof( *this.monitors ) );918 this.monitors = (monitor$ **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 919 919 for( int i = 0; i < this.monitor_count; i++ ) { 920 920 this.monitors[i] = thrd->monitors[i]; … … 923 923 } 924 924 925 static inline [ $thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor* monitors [], __lock_size_t count ) {926 927 __queue_t( $thread) & entry_queue = monitors[0]->entry_queue;925 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor$ * monitors [], __lock_size_t count ) { 926 927 __queue_t(thread$) & entry_queue = monitors[0]->entry_queue; 928 928 929 929 // For each thread in the entry-queue 930 for( $thread** thrd_it = &entry_queue.head;930 for( thread$ ** thrd_it = &entry_queue.head; 931 931 (*thrd_it) != 1p; 932 932 thrd_it = &(*thrd_it)->link.next … … 972 972 } 973 973 974 static inline __lock_size_t aggregate( $monitor* storage [], const __waitfor_mask_t & mask ) {974 static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ) { 975 975 __lock_size_t size = 0; 976 976 for( __lock_size_t i = 0; i < mask.size; i++ ) { -
libcfa/src/concurrency/monitor.hfa
rcf444b6 r6ff08d8 23 23 24 24 trait is_monitor(T &) { 25 $monitor* get_monitor( T & );25 monitor$ * get_monitor( T & ); 26 26 void ^?{}( T & mutex ); 27 27 }; 28 28 29 static inline void ?{}( $monitor& this) with( this ) {29 static inline void ?{}(monitor$ & this) with( this ) { 30 30 lock{}; 31 31 entry_queue{}; … … 39 39 } 40 40 41 static inline void ^?{}( $monitor& ) {}41 static inline void ^?{}(monitor$ & ) {} 42 42 43 43 struct monitor_guard_t { 44 $monitor** m;44 monitor$ ** m; 45 45 __lock_size_t count; 46 46 __monitor_group_t prev; 47 47 }; 48 48 49 void ?{}( monitor_guard_t & this, $monitor** m, __lock_size_t count, void (*func)() );49 void ?{}( monitor_guard_t & this, monitor$ ** m, __lock_size_t count, void (*func)() ); 50 50 void ^?{}( monitor_guard_t & this ); 51 51 52 52 struct monitor_dtor_guard_t { 53 $monitor* m;53 monitor$ * m; 54 54 __monitor_group_t prev; 55 55 bool join; 56 56 }; 57 57 58 void ?{}( monitor_dtor_guard_t & this, $monitor** m, void (*func)(), bool join );58 void ?{}( monitor_dtor_guard_t & this, monitor$ ** m, void (*func)(), bool join ); 59 59 void ^?{}( monitor_dtor_guard_t & this ); 60 60 … … 73 73 74 74 // The monitor this criterion concerns 75 $monitor* target;75 monitor$ * target; 76 76 77 77 // The parent node to which this criterion belongs … … 88 88 struct __condition_node_t { 89 89 // Thread that needs to be woken when all criteria are met 90 $thread* waiting_thread;90 thread$ * waiting_thread; 91 91 92 92 // Array of criteria (Criterions are contiguous in memory) … … 107 107 } 108 108 109 void ?{}(__condition_node_t & this, $thread* waiting_thread, __lock_size_t count, uintptr_t user_info );109 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 110 110 void ?{}(__condition_criterion_t & this ); 111 void ?{}(__condition_criterion_t & this, $monitor* target, __condition_node_t * owner );111 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 112 112 113 113 struct condition { … … 116 116 117 117 // Array of monitor pointers (Monitors are NOT contiguous in memory) 118 $monitor** monitors;118 monitor$ ** monitors; 119 119 120 120 // Number of monitors in the array -
libcfa/src/concurrency/mutex.cfa
rcf444b6 r6ff08d8 122 122 recursion_count--; 123 123 if( recursion_count == 0 ) { 124 $thread* thrd = pop_head( blocked_threads );124 thread$ * thrd = pop_head( blocked_threads ); 125 125 owner = thrd; 126 126 recursion_count = (thrd ? 1 : 0); -
libcfa/src/concurrency/mutex.hfa
rcf444b6 r6ff08d8 36 36 37 37 // List of blocked threads 38 __queue_t(struct $thread) blocked_threads;38 __queue_t(struct thread$) blocked_threads; 39 39 40 40 // Locked flag … … 55 55 56 56 // List of blocked threads 57 __queue_t(struct $thread) blocked_threads;57 __queue_t(struct thread$) blocked_threads; 58 58 59 59 // Current thread owning the lock 60 struct $thread* owner;60 struct thread$ * owner; 61 61 62 62 // Number of recursion level … … 83 83 84 84 // List of blocked threads 85 __queue_t(struct $thread) blocked_threads;85 __queue_t(struct thread$) blocked_threads; 86 86 }; 87 87 -
libcfa/src/concurrency/preemption.cfa
rcf444b6 r6ff08d8 61 61 // FwdDeclarations : timeout handlers 62 62 static void preempt( processor * this ); 63 static void timeout( $thread* this );63 static void timeout( thread$ * this ); 64 64 65 65 // FwdDeclarations : Signal handlers … … 420 420 421 421 // reserved for future use 422 static void timeout( $thread* this ) {422 static void timeout( thread$ * this ) { 423 423 unpark( this ); 424 424 } -
libcfa/src/concurrency/ready_queue.cfa
rcf444b6 r6ff08d8 67 67 #endif 68 68 69 static inline struct $thread* try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));70 static inline struct $thread* try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));71 static inline struct $thread* search(struct cluster * cltr);69 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)); 70 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)); 71 static inline struct thread$ * search(struct cluster * cltr); 72 72 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred); 73 73 … … 274 274 //----------------------------------------------------------------------- 275 275 #if defined(USE_CPU_WORK_STEALING) 276 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool push_local) with (cltr->ready_queue) {276 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) { 277 277 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 278 278 … … 316 316 317 317 // Pop from the ready queue from a given cluster 318 __attribute__((hot)) $thread* pop_fast(struct cluster * cltr) with (cltr->ready_queue) {318 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 319 319 /* paranoid */ verify( lanes.count > 0 ); 320 320 /* paranoid */ verify( kernelTLS().this_processor ); … … 371 371 proc->rdq.target = -1u; 372 372 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 373 $thread* t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));373 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 374 374 proc->rdq.last = target; 375 375 if(t) return t; … … 379 379 unsigned last = proc->rdq.last; 380 380 if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) { 381 $thread* t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));381 thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); 382 382 if(t) return t; 383 383 } … … 389 389 for(READYQ_SHARD_FACTOR) { 390 390 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 391 if( $thread* t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;391 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 392 392 } 393 393 … … 396 396 } 397 397 398 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr) with (cltr->ready_queue) {398 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 399 399 processor * const proc = kernelTLS().this_processor; 400 400 unsigned last = proc->rdq.last; 401 401 if(last != -1u) { 402 struct $thread* t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));402 struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal)); 403 403 if(t) return t; 404 404 proc->rdq.last = -1u; … … 408 408 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 409 409 } 410 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr) {410 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 411 411 return search(cltr); 412 412 } … … 435 435 } 436 436 437 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool push_local) with (cltr->ready_queue) {437 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) { 438 438 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 439 439 … … 482 482 483 483 // Pop from the ready queue from a given cluster 484 __attribute__((hot)) $thread* pop_fast(struct cluster * cltr) with (cltr->ready_queue) {484 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 485 485 /* paranoid */ verify( lanes.count > 0 ); 486 486 /* paranoid */ verify( kernelTLS().this_processor ); … … 506 506 507 507 // try popping from the 2 picked lists 508 struct $thread* thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));508 struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help))); 509 509 if(thrd) { 510 510 return thrd; … … 516 516 } 517 517 518 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr) { return pop_fast(cltr); }519 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr) {518 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); } 519 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 520 520 return search(cltr); 521 521 } 522 522 #endif 523 523 #if defined(USE_WORK_STEALING) 524 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool push_local) with (cltr->ready_queue) {524 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) { 525 525 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 526 526 … … 576 576 577 577 // Pop from the ready queue from a given cluster 578 __attribute__((hot)) $thread* pop_fast(struct cluster * cltr) with (cltr->ready_queue) {578 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 579 579 /* paranoid */ verify( lanes.count > 0 ); 580 580 /* paranoid */ verify( kernelTLS().this_processor ); … … 598 598 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; 599 599 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 600 $thread* t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));600 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 601 601 if(t) return t; 602 602 } … … 605 605 for(READYQ_SHARD_FACTOR) { 606 606 unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 607 if( $thread* t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;607 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 608 608 } 609 609 return 0p; 610 610 } 611 611 612 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr) with (cltr->ready_queue) {612 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 613 613 unsigned i = __tls_rand() % lanes.count; 614 614 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 615 615 } 616 616 617 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr) with (cltr->ready_queue) {617 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) { 618 618 return search(cltr); 619 619 } … … 628 628 //----------------------------------------------------------------------- 629 629 // try to pop from a lane given by index w 630 static inline struct $thread* try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {630 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) { 631 631 __STATS( stats.attempt++; ) 632 632 … … 651 651 652 652 // Actually pop the list 653 struct $thread* thrd;653 struct thread$ * thrd; 654 654 unsigned long long tsv; 655 655 [thrd, tsv] = pop(lane); … … 678 678 // try to pop from any lanes making sure you don't miss any threads push 679 679 // before the start of the function 680 static inline struct $thread* search(struct cluster * cltr) with (cltr->ready_queue) {680 static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) { 681 681 /* paranoid */ verify( lanes.count > 0 ); 682 682 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); … … 684 684 for(i; count) { 685 685 unsigned idx = (offset + i) % count; 686 struct $thread* thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));686 struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search)); 687 687 if(thrd) { 688 688 return thrd; … … 719 719 //----------------------------------------------------------------------- 720 720 // Given 2 indexes, pick the list with the oldest push an try to pop from it 721 static inline struct $thread* try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {721 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) { 722 722 // Pick the bet list 723 723 int w = i; … … 854 854 // As long as we can pop from this lane to push the threads somewhere else in the queue 855 855 while(!is_empty(lanes.data[idx])) { 856 struct $thread* thrd;856 struct thread$ * thrd; 857 857 unsigned long long _; 858 858 [thrd, _] = pop(lanes.data[idx]); -
libcfa/src/concurrency/ready_subqueue.hfa
rcf444b6 r6ff08d8 7 7 // Intrusives lanes which are used by the relaxed ready queue 8 8 struct __attribute__((aligned(128))) __intrusive_lane_t { 9 struct $thread* prev;9 struct thread$ * prev; 10 10 11 11 // spin lock protecting the queue … … 20 20 21 21 // Get the head pointer (one before the first element) from the anchor 22 static inline $thread* mock_head(const __intrusive_lane_t & this) {23 $thread * rhead = ($thread*)(24 (uintptr_t)( &this.anchor ) - __builtin_offsetof( $thread, link )22 static inline thread$ * mock_head(const __intrusive_lane_t & this) { 23 thread$ * rhead = (thread$ *)( 24 (uintptr_t)( &this.anchor ) - __builtin_offsetof( thread$, link ) 25 25 ); 26 26 return rhead; … … 38 38 39 39 // We add a boat-load of assertions here because the anchor code is very fragile 40 /* paranoid */ _Static_assert( offsetof( $thread, link ) == offsetof(__intrusive_lane_t, anchor) );41 /* paranoid */ verify( offsetof( $thread, link ) == offsetof(__intrusive_lane_t, anchor) );42 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.anchor) );40 /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); 41 /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); 42 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) ); 43 43 /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next ); 44 44 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts ); … … 61 61 // Push a thread onto this lane 62 62 // returns true of lane was empty before push, false otherwise 63 static inline void push( __intrusive_lane_t & this, $thread* node ) {63 static inline void push( __intrusive_lane_t & this, thread$ * node ) { 64 64 /* paranoid */ verify( this.lock ); 65 65 /* paranoid */ verify( node->link.next == 0p ); … … 91 91 // returns popped 92 92 // returns true of lane was empty before push, false otherwise 93 static inline [* $thread, unsigned long long] pop( __intrusive_lane_t & this ) {93 static inline [* thread$, unsigned long long] pop( __intrusive_lane_t & this ) { 94 94 /* paranoid */ verify( this.lock ); 95 95 /* paranoid */ verify( this.anchor.next != 0p ); … … 99 99 // Get the relevant nodes locally 100 100 unsigned long long ts = this.anchor.ts; 101 $thread* node = this.anchor.next;101 thread$ * node = this.anchor.next; 102 102 this.anchor.next = node->link.next; 103 103 this.anchor.ts = node->link.ts; -
libcfa/src/concurrency/thread.cfa
rcf444b6 r6ff08d8 27 27 //----------------------------------------------------------------------------- 28 28 // Thread ctors and dtors 29 void ?{}( $thread& this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {29 void ?{}(thread$ & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 30 30 context{ 0p, 0p }; 31 31 self_cor{ name, storage, storageSize }; … … 57 57 } 58 58 59 void ^?{}( $thread& this) with( this ) {59 void ^?{}(thread$& this) with( this ) { 60 60 #if defined( __CFA_WITH_VERIFY__ ) 61 61 canary = 0xDEADDEADDEADDEADp; … … 87 87 void ?{}( thread_dtor_guard_t & this, 88 88 T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) { 89 $monitor* m = get_monitor(thrd);90 $thread* desc = get_thread(thrd);89 monitor$ * m = get_monitor(thrd); 90 thread$ * desc = get_thread(thrd); 91 91 92 92 // Setup the monitor guard … … 130 130 forall( T & | is_thread(T) ) 131 131 void __thrd_start( T & this, void (*main_p)(T &) ) { 132 $thread* this_thrd = get_thread(this);132 thread$ * this_thrd = get_thread(this); 133 133 134 134 disable_interrupts(); -
libcfa/src/concurrency/thread.hfa
rcf444b6 r6ff08d8 29 29 void ^?{}(T& mutex this); 30 30 void main(T& this); 31 $thread* get_thread(T& this);31 thread$ * get_thread(T& this); 32 32 }; 33 33 … … 45 45 // Inline getters for threads/coroutines/monitors 46 46 forall( T & | is_thread(T) ) 47 static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; }47 static inline coroutine$ * get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; } 48 48 49 49 forall( T & | is_thread(T) ) 50 static inline $monitor* get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; }50 static inline monitor$ * get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; } 51 51 52 static inline $coroutine* get_coroutine($thread* this) __attribute__((const)) { return &this->self_cor; }53 static inline $monitor * get_monitor ($thread* this) __attribute__((const)) { return &this->self_mon; }52 static inline coroutine$ * get_coroutine(thread$ * this) __attribute__((const)) { return &this->self_cor; } 53 static inline monitor$ * get_monitor (thread$ * this) __attribute__((const)) { return &this->self_mon; } 54 54 55 55 //----------------------------------------------------------------------------- … … 62 62 //----------------------------------------------------------------------------- 63 63 // Ctors and dtors 64 void ?{}( $thread& this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );65 void ^?{}( $thread& this);64 void ?{}(thread$ & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize ); 65 void ^?{}(thread$ & this); 66 66 67 static inline void ?{}( $thread& this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }68 static inline void ?{}( $thread& this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }69 static inline void ?{}( $thread& this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }70 static inline void ?{}( $thread& this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; }71 static inline void ?{}( $thread& this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; }72 static inline void ?{}( $thread& this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; }73 static inline void ?{}( $thread& this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; }74 static inline void ?{}( $thread& this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; }75 static inline void ?{}( $thread& this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }67 static inline void ?{}(thread$ & this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; } 68 static inline void ?{}(thread$ & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; } 69 static inline void ?{}(thread$ & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 70 static inline void ?{}(thread$ & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; } 71 static inline void ?{}(thread$ & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; } 72 static inline void ?{}(thread$ & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 73 static inline void ?{}(thread$ & this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; } 74 static inline void ?{}(thread$ & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 75 static inline void ?{}(thread$ & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 76 76 77 77 struct thread_dtor_guard_t { … … 111 111 // Unpark a thread, if the thread is already blocked, schedule it 112 112 // if the thread is not yet block, signal that it should rerun immediately 113 void unpark( $thread* this );113 void unpark( thread$ * this ); 114 114 115 115 forall( T & | is_thread(T) ) -
src/Concurrency/Keywords.cc
rcf444b6 r6ff08d8 122 122 // int data; int data; 123 123 // a_struct_t more_data; a_struct_t more_data; 124 // => $thread__thrd_d;124 // => thread$ __thrd_d; 125 125 // }; }; 126 // static inline $thread* get_thread( MyThread * this ) { return &this->__thrd_d; }126 // static inline thread$ * get_thread( MyThread * this ) { return &this->__thrd_d; } 127 127 // 128 128 class ThreadKeyword final : public ConcurrentSueKeyword { … … 130 130 131 131 ThreadKeyword() : ConcurrentSueKeyword( 132 " $thread",132 "thread$", 133 133 "__thrd", 134 134 "get_thread", … … 155 155 // int data; int data; 156 156 // a_struct_t more_data; a_struct_t more_data; 157 // => $coroutine__cor_d;157 // => coroutine$ __cor_d; 158 158 // }; }; 159 // static inline $coroutine* get_coroutine( MyCoroutine * this ) { return &this->__cor_d; }159 // static inline coroutine$ * get_coroutine( MyCoroutine * this ) { return &this->__cor_d; } 160 160 // 161 161 class CoroutineKeyword final : public ConcurrentSueKeyword { … … 163 163 164 164 CoroutineKeyword() : ConcurrentSueKeyword( 165 " $coroutine",165 "coroutine$", 166 166 "__cor", 167 167 "get_coroutine", … … 190 190 // int data; int data; 191 191 // a_struct_t more_data; a_struct_t more_data; 192 // => $monitor__mon_d;192 // => monitor$ __mon_d; 193 193 // }; }; 194 // static inline $monitor* get_coroutine( MyMonitor * this ) { return &this->__cor_d; }194 // static inline monitor$ * get_coroutine( MyMonitor * this ) { return &this->__cor_d; } 195 195 // 196 196 class MonitorKeyword final : public ConcurrentSueKeyword { … … 198 198 199 199 MonitorKeyword() : ConcurrentSueKeyword( 200 " $monitor",200 "monitor$", 201 201 "__mon", 202 202 "get_monitor", … … 230 230 231 231 GeneratorKeyword() : ConcurrentSueKeyword( 232 " $generator",232 "generator$", 233 233 "__generator_state", 234 234 "get_generator", 235 "Unable to find builtin type $generator\n",235 "Unable to find builtin type generator$\n", 236 236 "", 237 237 true, … … 292 292 //Handles mutex routines definitions : 293 293 // void foo( A * mutex a, B * mutex b, int i ) { void foo( A * a, B * b, int i ) { 294 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };294 // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 295 295 // monitor_guard_t __guard = { __monitors, 2 }; 296 296 // /*Some code*/ => /*Some code*/ … … 333 333 //Handles mutex routines definitions : 334 334 // void foo( A * mutex a, B * mutex b, int i ) { void foo( A * a, B * b, int i ) { 335 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };335 // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 336 336 // monitor_guard_t __guard = { __monitors, 2 }; 337 337 // /*Some code*/ => /*Some code*/ … … 449 449 Expression * ConcurrentSueKeyword::postmutate( KeywordCastExpr * cast ) { 450 450 if ( cast_target == cast->target ) { 451 // convert (thread &)t to ( $thread&)*get_thread(t), etc.451 // convert (thread &)t to (thread$ &)*get_thread(t), etc. 452 452 if( !type_decl ) SemanticError( cast, context_error ); 453 453 if( !dtor_decl ) SemanticError( cast, context_error ); … … 919 919 void MutexKeyword::postvisit(StructDecl* decl) { 920 920 921 if( decl->name == " $monitor" && decl->body ) {921 if( decl->name == "monitor$" && decl->body ) { 922 922 assert( !monitor_decl ); 923 923 monitor_decl = decl; … … 1020 1020 ); 1021 1021 1022 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };1022 //monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 1023 1023 body->push_front( new DeclStmt( monitors ) ); 1024 1024 } … … 1117 1117 ); 1118 1118 1119 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };1119 //monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 1120 1120 body->push_front( new DeclStmt( monitors) ); 1121 1121 } … … 1125 1125 //============================================================================================= 1126 1126 void ThreadStarter::previsit( StructDecl * decl ) { 1127 if( decl->name == " $thread" && decl->body ) {1127 if( decl->name == "thread$" && decl->body ) { 1128 1128 assert( !thread_decl ); 1129 1129 thread_decl = decl; -
src/Concurrency/Waitfor.cc
rcf444b6 r6ff08d8 244 244 decl_mask = decl; 245 245 } 246 else if( decl->name == " $monitor" ) {246 else if( decl->name == "monitor$" ) { 247 247 assert( !decl_monitor ); 248 248 decl_monitor = decl; -
src/ResolvExpr/AlternativeFinder.cc
rcf444b6 r6ff08d8 1302 1302 1303 1303 try { 1304 // Attempt 1 : turn (thread&)X into ( $thread&)X.__thrd1304 // Attempt 1 : turn (thread&)X into (thread$&)X.__thrd 1305 1305 // Clone is purely for memory management 1306 1306 std::unique_ptr<Expression> tech1 { new UntypedMemberExpr(new NameExpr(castExpr->concrete_target.field), castExpr->arg->clone()) }; … … 1313 1313 } catch(SemanticErrorException & ) {} 1314 1314 1315 // Fallback : turn (thread&)X into ( $thread&)get_thread(X)1315 // Fallback : turn (thread&)X into (thread$&)get_thread(X) 1316 1316 std::unique_ptr<Expression> fallback { UntypedExpr::createDeref( new UntypedExpr(new NameExpr(castExpr->concrete_target.getter), { castExpr->arg->clone() })) }; 1317 1317 // don't prune here, since it's guaranteed all alternatives will have the same type -
src/ResolvExpr/CandidateFinder.cpp
rcf444b6 r6ff08d8 1180 1180 1181 1181 try { 1182 // Attempt 1 : turn (thread&)X into ( $thread&)X.__thrd1182 // Attempt 1 : turn (thread&)X into (thread$&)X.__thrd 1183 1183 // Clone is purely for memory management 1184 1184 std::unique_ptr<const ast::Expr> tech1 { new ast::UntypedMemberExpr(loc, new ast::NameExpr(loc, castExpr->concrete_target.field), castExpr->arg) }; … … 1191 1191 } catch(SemanticErrorException & ) {} 1192 1192 1193 // Fallback : turn (thread&)X into ( $thread&)get_thread(X)1193 // Fallback : turn (thread&)X into (thread$&)get_thread(X) 1194 1194 std::unique_ptr<const ast::Expr> fallback { ast::UntypedExpr::createDeref(loc, new ast::UntypedExpr(loc, new ast::NameExpr(loc, castExpr->concrete_target.getter), { castExpr->arg })) }; 1195 1195 // don't prune here, since it's guaranteed all alternatives will have the same type -
tests/concurrent/semaphore.cfa
rcf444b6 r6ff08d8 22 22 23 23 void main(Blocker & this) { 24 $thread* me = active_thread();24 thread$ * me = active_thread(); 25 25 this.sum = 0; 26 26 for(num_blocks) { … … 45 45 or else {} 46 46 47 $thread* t = V(ben, false);47 thread$ * t = V(ben, false); 48 48 if(t) { 49 49 this.sum += (unsigned)t; -
tests/concurrent/signal/block.cfa
rcf444b6 r6ff08d8 33 33 34 34 monitor global_data_t { 35 $thread* last_thread;36 $thread* last_signaller;35 thread$ * last_thread; 36 thread$ * last_signaller; 37 37 }; 38 38 … … 82 82 if( !is_empty( cond ) ) { 83 83 84 $thread * next = ( $thread* ) front( cond );84 thread$ * next = ( thread$ * ) front( cond ); 85 85 86 86 if( ! signal_block( cond ) ) { -
tests/concurrent/spinaphore.cfa
rcf444b6 r6ff08d8 21 21 void main(Blocker & this); 22 22 23 Blocker * from_thread( $thread* t) {23 Blocker * from_thread(thread$ * t) { 24 24 Blocker & nullb = *(Blocker*)0p; 25 $thread& nullt = (thread&)nullb;25 thread$ & nullt = (thread&)nullb; 26 26 uintptr_t offset = (uintptr_t)&nullt; 27 27 uintptr_t address = ((uintptr_t)t) - offset; … … 30 30 31 31 void main(Blocker & this) { 32 $thread* me = active_thread();32 thread$ * me = active_thread(); 33 33 Blocker * me1 = &this; 34 34 Blocker * me2 = from_thread(me); … … 51 51 unsigned me = (unsigned)(uintptr_t)&this; 52 52 for(num_unblocks) { 53 $thread* t = V(sem, false);53 thread$ * t = V(sem, false); 54 54 Blocker * b = from_thread(t); 55 55 b->sum += me; -
tests/unified_locking/mcs.cfa
rcf444b6 r6ff08d8 7 7 struct MutexObj { 8 8 mcs_lock l; 9 $thread* id;9 thread$ * id; 10 10 size_t sum; 11 11 }; … … 21 21 22 22 unsigned cs() { 23 $thread* me = active_thread();23 thread$ * me = active_thread(); 24 24 unsigned value = (unsigned)me; 25 25 mcs_node n; -
tests/unified_locking/thread_test.cfa
rcf444b6 r6ff08d8 69 69 break; 70 70 default: 71 break; 71 break; 72 72 } 73 73 processor p[threadCount]; -
tools/gdb/utils-gdb.py
rcf444b6 r6ff08d8 53 53 return CfaTypes(cluster_ptr = gdb.lookup_type('struct cluster').pointer(), 54 54 processor_ptr = gdb.lookup_type('struct processor').pointer(), 55 thread_ptr = gdb.lookup_type('struct $thread').pointer(),55 thread_ptr = gdb.lookup_type('struct thread$').pointer(), 56 56 int_ptr = gdb.lookup_type('int').pointer(), 57 57 thread_state = gdb.lookup_type('enum __Coroutine_State'), … … 163 163 164 164 def thread_for_pthread(pthrd): 165 return tls_for_pthread(pthrd)['_X11this_threadVPS7 $thread_1']165 return tls_for_pthread(pthrd)['_X11this_threadVPS7thread$_1'] 166 166 167 167 def thread_for_proc(proc): 168 return tls_for_proc(proc)['_X11this_threadVPS7 $thread_1']168 return tls_for_proc(proc)['_X11this_threadVPS7thread$_1'] 169 169 170 170 … … 216 216 217 217 cfa_t = get_cfa_types() 218 root = cluster['_X7threadsS8__dllist_S7 $thread__1']['_X4headPY15__TYPE_generic__1'].cast(cfa_t.thread_ptr)218 root = cluster['_X7threadsS8__dllist_S7thread$__1']['_X4headPY15__TYPE_generic__1'].cast(cfa_t.thread_ptr) 219 219 220 220 if root == 0x0 or root.address == 0x0: … … 313 313 )) 314 314 tls = tls_for_proc( processor ) 315 thrd = tls['_X11this_threadVPS7 $thread_1']315 thrd = tls['_X11this_threadVPS7thread$_1'] 316 316 if thrd != 0x0: 317 317 tname = '{} {}'.format(thrd['self_cor']['name'].string(), str(thrd)) -
tools/perf/process_stat_array.py
rcf444b6 r6ff08d8 31 31 with open(os.path.join(root, filename), 'r') as file: 32 32 for line in file: 33 data = [int(x.strip()) for x in line.split(',')] 33 # data = [int(x.strip()) for x in line.split(',')] 34 data = [int(line.strip())] 34 35 data = [me, *data] 35 36 merged.append(data) … … 53 54 54 55 # merge the data 55 for (me, time, value) in merged: 56 # for (me, time, value) in merged: 57 for (me, value) in merged: 56 58 # check now much this changes 57 59 old = counters[me] … … 61 63 # add change to the current 62 64 curr = curr + change 63 single.append( (time, curr))65 single.append( value ) 64 66 65 67 pass 66 68 69 print(single) 70 71 # single = sorted(single)[:len(single)-100] 72 # ms = max(single) 73 # single = [float(x) / 2500.0 for x in single] 74 67 75 #print 68 for t, v in single: 69 print([t, v]) 76 # for t, v in single: 77 # print([t, v]) 78 # print(len(single)) 79 # print(max(single)) 80 # print(min(single)) 81 82 # bins = [0, 5.37751600e+04, 1.06903320e+05, 1.60031480e+05, 2.13159640e+05, 2.66287800e+05, 3.19415960e+05, 3.72544120e+05, 4.25672280e+05, 4.78800440e+05, 5.31928600e+05, 5.85056760e+05, 6.38184920e+05, 6.91313080e+05, 7.44441240e+05, 7.97569400e+05, 8.50697560e+05, 9.03825720e+05, 9.56953880e+05, 1.01008204e+06, 1.06321020e+06, 1.11633836e+06, 1.16946652e+06, 1.22259468e+06, 1.27572284e+06, 1.32885100e+06, 1.38197916e+06, 1.43510732e+06, 1.48823548e+06, 1.54136364e+06, 1.59449180e+06, 1.64761996e+06, 1.70074812e+06, 1.75387628e+06, 1.80700444e+06, 1.86013260e+06, 1.91326076e+06, 1.96638892e+06, 2.01951708e+06, 2.07264524e+06, 2.12577340e+06, 2.17890156e+06, 2.23202972e+06, 2.28515788e+06, 2.33828604e+06, 2.39141420e+06, 2.44454236e+06, 2.49767052e+06, 2.55079868e+06, 2.60392684e+06, 3.0e+06] 83 # # bins = [float(x) / 2500.0 for x in bins] 84 # # print([round(b, 2) for b in bins]) 85 86 # import numpy 87 # # hist1, _ = numpy.histogram(single, density=True, bins=50) 88 # hist2, _ = numpy.histogram(single, density=True, bins=bins) 89 # # print(hist1) 90 # print([1000.0 * h for h in hist2]) 91 # # for v in single: 92 # # print([v]) -
tools/vscode/uwaterloo.cforall-0.1.0/syntaxes/cfa.tmLanguage.json
rcf444b6 r6ff08d8 206 206 "patterns": [ 207 207 { "match": "(\\b|^|\\s)(void|bool|char|short|int|long|signed|unsigned|float|double)(\\b|$|\\s)", "name": "storage.type.built-in.primitive.cfa"}, 208 { "match": "(\\b|^|\\s)(zero_t|one_t|size_t|ssize_t|intptr_t|uintptr_t| \\$thread|\\$coroutine|\\$generator|\\$monitor)(\\b|$|\\s)", "name": "storage.type.built-in.cfa"},208 { "match": "(\\b|^|\\s)(zero_t|one_t|size_t|ssize_t|intptr_t|uintptr_t|thread\\$|coroutine\\$|generator\\$|monitor\\$)(\\b|$|\\s)", "name": "storage.type.built-in.cfa"}, 209 209 { "match": "(\\b|^|\\s)(extern|static|inline|volatile|const|thread_local)(\\b|$|\\s)", "name": "storage.modifier.cfa"} 210 210 ]
Note: See TracChangeset
for help on using the changeset viewer.