1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
13 "golang.org/x/sync/syncmap"
17 setup func(*testing.B, mapInterface)
18 perG func(b *testing.B, pb *testing.PB, i int, m mapInterface)
21 func benchMap(b *testing.B, bench bench) {
22 for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &syncmap.Map{}} {
23 b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
24 m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
25 if bench.setup != nil {
32 b.RunParallel(func(pb *testing.PB) {
33 id := int(atomic.AddInt64(&i, 1) - 1)
34 bench.perG(b, pb, id*b.N, m)
40 func BenchmarkLoadMostlyHits(b *testing.B) {
41 const hits, misses = 1023, 1
44 setup: func(_ *testing.B, m mapInterface) {
45 for i := 0; i < hits; i++ {
48 // Prime the map to get it into a steady state.
49 for i := 0; i < hits*2; i++ {
54 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
55 for ; pb.Next(); i++ {
56 m.Load(i % (hits + misses))
62 func BenchmarkLoadMostlyMisses(b *testing.B) {
63 const hits, misses = 1, 1023
66 setup: func(_ *testing.B, m mapInterface) {
67 for i := 0; i < hits; i++ {
70 // Prime the map to get it into a steady state.
71 for i := 0; i < hits*2; i++ {
76 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
77 for ; pb.Next(); i++ {
78 m.Load(i % (hits + misses))
84 func BenchmarkLoadOrStoreBalanced(b *testing.B) {
85 const hits, misses = 128, 128
88 setup: func(b *testing.B, m mapInterface) {
89 if _, ok := m.(*DeepCopyMap); ok {
90 b.Skip("DeepCopyMap has quadratic running time.")
92 for i := 0; i < hits; i++ {
95 // Prime the map to get it into a steady state.
96 for i := 0; i < hits*2; i++ {
101 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
102 for ; pb.Next(); i++ {
103 j := i % (hits + misses)
105 if _, ok := m.LoadOrStore(j, i); !ok {
106 b.Fatalf("unexpected miss for %v", j)
109 if v, loaded := m.LoadOrStore(i, i); loaded {
110 b.Fatalf("failed to store %v: existing value %v", i, v)
118 func BenchmarkLoadOrStoreUnique(b *testing.B) {
120 setup: func(b *testing.B, m mapInterface) {
121 if _, ok := m.(*DeepCopyMap); ok {
122 b.Skip("DeepCopyMap has quadratic running time.")
126 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
127 for ; pb.Next(); i++ {
134 func BenchmarkLoadOrStoreCollision(b *testing.B) {
136 setup: func(_ *testing.B, m mapInterface) {
140 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
141 for ; pb.Next(); i++ {
148 func BenchmarkRange(b *testing.B) {
149 const mapSize = 1 << 10
152 setup: func(_ *testing.B, m mapInterface) {
153 for i := 0; i < mapSize; i++ {
158 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
159 for ; pb.Next(); i++ {
160 m.Range(func(_, _ interface{}) bool { return true })
166 // BenchmarkAdversarialAlloc tests performance when we store a new value
167 // immediately whenever the map is promoted to clean and otherwise load a
168 // unique, missing key.
170 // This forces the Load calls to always acquire the map's mutex.
171 func BenchmarkAdversarialAlloc(b *testing.B) {
173 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
174 var stores, loadsSinceStore int64
175 for ; pb.Next(); i++ {
177 if loadsSinceStore++; loadsSinceStore > stores {
178 m.LoadOrStore(i, stores)
187 // BenchmarkAdversarialDelete tests performance when we periodically delete
188 // one key and add a different one in a large map.
190 // This forces the Load calls to always acquire the map's mutex and periodically
191 // makes a full copy of the map despite changing only one entry.
192 func BenchmarkAdversarialDelete(b *testing.B) {
193 const mapSize = 1 << 10
196 setup: func(_ *testing.B, m mapInterface) {
197 for i := 0; i < mapSize; i++ {
202 perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
203 for ; pb.Next(); i++ {
207 m.Range(func(k, _ interface{}) bool {