source: benchmark/readyQ/locality.go@ 3d19ae6

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 3d19ae6 was 3d19ae6, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Added padding to existing locality benchmarks

  • Property mode set to 100644
File size: 8.0 KB
Line 
1package main
2
3import (
4 "context"
5 "flag"
6 "fmt"
7 "math/rand"
8 "os"
9 "syscall"
10 "sync/atomic"
11 "time"
12 "unsafe"
13 "golang.org/x/sync/semaphore"
14 "golang.org/x/text/language"
15 "golang.org/x/text/message"
16)
17
18// ==================================================
19type MyData struct {
20 _p1 [16]uint64 // padding
21 ttid int
22 id int
23 data [] uint64
24 _p2 [16]uint64 // padding
25}
26
27func NewData(id int, size uint64) (*MyData) {
28 var data [] uint64
29 data = make([]uint64, size)
30 for i := uint64(0); i < size; i++ {
31 data[i] = 0
32 }
33 return &MyData{[16]uint64{0}, syscall.Gettid(), id, data,[16]uint64{0}}
34}
35
36func (this * MyData) moved( ttid int ) (uint64) {
37 if this.ttid == ttid {
38 return 0
39 }
40 this.ttid = ttid
41 return 1
42}
43
44func (this * MyData) access( idx uint64 ) {
45 this.data[idx % uint64(len(this.data))] += 1
46}
47
48// ==================================================
49type MyCtx struct {
50 _p1 [16]uint64 // padding
51 s * semaphore.Weighted
52 d unsafe.Pointer
53 c context.Context
54 ttid int
55 id int
56 _p2 [16]uint64 // padding
57}
58
59func NewCtx( data * MyData, id int ) (MyCtx) {
60 r := MyCtx{[16]uint64{0},semaphore.NewWeighted(1), unsafe.Pointer(data), context.Background(), syscall.Gettid(), id,[16]uint64{0}}
61 r.s.Acquire(context.Background(), 1)
62 return r
63}
64
65func (this * MyCtx) moved( ttid int ) (uint64) {
66 if this.ttid == ttid {
67 return 0
68 }
69 this.ttid = ttid
70 return 1
71}
72
73// ==================================================
74// Atomic object where a single thread can wait
75// May exchanges data
76type Spot struct {
77 ptr uintptr // atomic variable use fo MES
78 id int // id for debugging
79 _p [16]uint64 // padding
80}
81
82// Main handshake of the code
83// Single seat, first thread arriving waits
84// Next threads unblocks current one and blocks in its place
85// if share == true, exchange data in the process
86func (this * Spot) put( ctx * MyCtx, data * MyData, share bool) (* MyData, bool) {
87 new := uintptr(unsafe.Pointer(ctx))
88 // old_d := ctx.d
89
90 // Attempt to CAS our context into the seat
91 var raw uintptr
92 for true {
93 raw = this.ptr
94 if raw == uintptr(1) { // Seat is closed, return
95 return nil, true
96 }
97 if atomic.CompareAndSwapUintptr(&this.ptr, raw, new) {
98 break // We got the seat
99 }
100 }
101
102 // If we aren't the fist in, wake someone
103 if raw != uintptr(0) {
104 var val *MyCtx
105 val = (*MyCtx)(unsafe.Pointer(raw))
106
107 // If we are sharing, give them our data
108 if share {
109 // fmt.Printf("[%d] - %d update %d: %p -> %p\n", this.id, ctx.id, val.id, val.d, data)
110 atomic.StorePointer(&val.d, unsafe.Pointer(data))
111 }
112
113 // Wake them up
114 // fmt.Printf("[%d] - %d release %d\n", this.id, ctx.id, val.id)
115 val.s.Release(1)
116 }
117
118 // fmt.Printf("[%d] - %d enter\n", this.id, ctx.id)
119
120 // Block once on the seat
121 ctx.s.Acquire(ctx.c, 1)
122
123 // Someone woke us up, get the new data
124 ret := (* MyData)(atomic.LoadPointer(&ctx.d))
125 // fmt.Printf("[%d] - %d leave: %p -> %p\n", this.id, ctx.id, ret, old_d)
126
127 return ret, false
128}
129
130// Shutdown the spot
131// Wake current thread and mark seat as closed
132func (this * Spot) release() {
133 val := (*MyCtx)(unsafe.Pointer(atomic.SwapUintptr(&this.ptr, uintptr(1))))
134 if val == nil {
135 return
136 }
137
138 // Someone was there, release them
139 val.s.Release(1)
140}
141
142// ==================================================
143// Struct for result, Go doesn't support passing tuple in channels
144type Result struct {
145 count uint64
146 gmigs uint64
147 dmigs uint64
148}
149
150func NewResult() (Result) {
151 return Result{0, 0, 0}
152}
153
154// ==================================================
155// Random number generator, Go's native one is to slow and global
156func __xorshift64( state * uint64 ) (uint64) {
157 x := *state
158 x ^= x << 13
159 x ^= x >> 7
160 x ^= x << 17
161 *state = x
162 return x
163}
164
165// ==================================================
166// Do some work by accessing 'cnt' cells in the array
167func work(data * MyData, cnt uint64, state * uint64) {
168 for i := uint64(0); i < cnt; i++ {
169 data.access(__xorshift64(state))
170 }
171}
172
173// Main body of the threads
174func local(result chan Result, start chan struct{}, size uint64, cnt uint64, channels [] Spot, share bool, id int) {
175 // Initialize some data
176 state := rand.Uint64() // RNG state
177 data := NewData(id, size) // Starting piece of data
178 ctx := NewCtx(data, id) // Goroutine local context
179
180 // Prepare results
181 r := NewResult()
182
183 // Wait for start
184 <- start
185
186 // Main loop
187 for true {
188 // Touch our current data, write to invalidate remote cache lines
189 work(data, cnt, &state)
190
191 // Wait on a random spot
192 i := __xorshift64(&state) % uint64(len(channels))
193 var closed bool
194 data, closed = channels[i].put(&ctx, data, share)
195
196 // Check if the experiment is over
197 if closed { break } // yes, spot was closed
198 if clock_mode && atomic.LoadInt32(&stop) == 1 { break } // yes, time's up
199 if !clock_mode && r.count >= stop_count { break } // yes, iterations reached
200
201 // Check everything is consistent
202 if uint64(len(data.data)) != size { panic("Data has weird size") }
203
204 // write down progress and check migrations
205 ttid := syscall.Gettid()
206 r.count += 1
207 r.gmigs += ctx .moved(ttid)
208 r.dmigs += data.moved(ttid)
209 }
210
211 // Mark goroutine as done
212 atomic.AddInt64(&threads_left, -1);
213
214 // return result
215 result <- r
216}
217
218// ==================================================
219// Program main
220func main() {
221 // Benchmark specific command line arguments
222 work_sizeOpt := flag.Uint64("w", 2 , "Size of the array for each threads, in words (64bit)")
223 countOpt := flag.Uint64("c", 2 , "Number of words to touch when working (random pick, cells can be picked more than once)")
224 shareOpt := flag.Bool ("s", false, "Pass the work data to the next thread when blocking")
225
226 // General benchmark initialization and deinitialization
227 defer bench_init()()
228
229 // Eval command line arguments
230 size := *work_sizeOpt
231 cnt := *countOpt
232 share := *shareOpt
233
234 // Check params
235 if ! (nthreads > nprocs) {
236 fmt.Fprintf(os.Stderr, "Must have more threads than procs\n")
237 os.Exit(1)
238 }
239
240 // Make global data
241 barrierStart := make(chan struct{}) // Barrier used at the start
242 threads_left = int64(nprocs) // Counter for active threads (not 'nthreads' because at all times 'nthreads - nprocs' are blocked)
243 result := make(chan Result) // Channel for results
244 channels := make([]Spot, nthreads - nprocs) // Number of spots
245 for i := range channels {
246 channels[i] = Spot{uintptr(0), i,[16]uint64{0}} // init spots
247 }
248
249 // start the goroutines
250 for i := 0; i < nthreads; i++ {
251 go local(result, barrierStart, size, cnt, channels, share, i)
252 }
253 fmt.Printf("Starting\n");
254
255 atomic.StoreInt32(&stop, 0)
256 start := time.Now()
257 close(barrierStart) // release barrier
258
259 wait(start, true); // general benchmark wait
260
261 atomic.StoreInt32(&stop, 1)
262 end := time.Now()
263 delta := end.Sub(start)
264
265 fmt.Printf("\nDone\n")
266
267 // release all the blocked threads
268 for i := range channels {
269 channels[i].release()
270 }
271
272 // Join and accumulate results
273 results := NewResult()
274 for i := 0; i < nthreads; i++ {
275 r := <- result
276 results.count += r.count
277 results.gmigs += r.gmigs
278 results.dmigs += r.dmigs
279 }
280
281 // Print with nice 's, i.e. 1'000'000 instead of 1000000
282 p := message.NewPrinter(language.English)
283 p.Printf("Duration (ms) : %f\n", delta.Seconds());
284 p.Printf("Number of processors : %d\n", nprocs);
285 p.Printf("Number of threads : %d\n", nthreads);
286 p.Printf("Work size (64bit words): %d\n", size);
287 p.Printf("Total Operations(ops) : %15d\n", results.count)
288 p.Printf("Total G Migrations : %15d\n", results.gmigs)
289 p.Printf("Total D Migrations : %15d\n", results.dmigs)
290 p.Printf("Ops per second : %18.2f\n", float64(results.count) / delta.Seconds())
291 p.Printf("ns per ops : %18.2f\n", float64(delta.Nanoseconds()) / float64(results.count))
292 p.Printf("Ops per threads : %15d\n", results.count / uint64(nthreads))
293 p.Printf("Ops per procs : %15d\n", results.count / uint64(nprocs))
294 p.Printf("Ops/sec/procs : %18.2f\n", (float64(results.count) / float64(nprocs)) / delta.Seconds())
295 p.Printf("ns per ops/procs : %18.2f\n", float64(delta.Nanoseconds()) / (float64(results.count) / float64(nprocs)))
296}
Note: See TracBrowser for help on using the repository browser.