1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 // Map is a concurrent map with amortized-constant-time loads, stores, and deletes.
16 // It is safe for multiple goroutines to call a Map's methods concurrently.
18 // The zero Map is valid and empty.
20 // A Map must not be copied after first use.
24 // read contains the portion of the map's contents that are safe for
25 // concurrent access (with or without mu held).
27 // The read field itself is always safe to load, but must only be stored with
30 // Entries stored in read may be updated concurrently without mu, but updating
31 // a previously-expunged entry requires that the entry be copied to the dirty
32 // map and unexpunged with mu held.
33 read atomic.Value // readOnly
35 // dirty contains the portion of the map's contents that require mu to be
36 // held. To ensure that the dirty map can be promoted to the read map quickly,
37 // it also includes all of the non-expunged entries in the read map.
39 // Expunged entries are not stored in the dirty map. An expunged entry in the
40 // clean map must be unexpunged and added to the dirty map before a new value
41 // can be stored to it.
43 // If the dirty map is nil, the next write to the map will initialize it by
44 // making a shallow copy of the clean map, omitting stale entries.
45 dirty map[interface{}]*entry
47 // misses counts the number of loads since the read map was last updated that
48 // needed to lock mu to determine whether the key was present.
50 // Once enough misses have occurred to cover the cost of copying the dirty
51 // map, the dirty map will be promoted to the read map (in the unamended
52 // state) and the next store to the map will make a new dirty copy.
56 // readOnly is an immutable struct stored atomically in the Map.read field.
57 type readOnly struct {
58 m map[interface{}]*entry
59 amended bool // true if the dirty map contains some key not in m.
62 // expunged is an arbitrary pointer that marks entries which have been deleted
63 // from the dirty map.
64 var expunged = unsafe.Pointer(new(interface{}))
66 // An entry is a slot in the map corresponding to a particular key.
68 // p points to the interface{} value stored for the entry.
70 // If p == nil, the entry has been deleted and m.dirty == nil.
72 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
73 // is missing from m.dirty.
75 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
76 // != nil, in m.dirty[key].
78 // An entry can be deleted by atomic replacement with nil: when m.dirty is
79 // next created, it will atomically replace nil with expunged and leave
80 // m.dirty[key] unset.
82 // An entry's associated value can be updated by atomic replacement, provided
83 // p != expunged. If p == expunged, an entry's associated value can be updated
84 // only after first setting m.dirty[key] = e so that lookups using the dirty
85 // map find the entry.
86 p unsafe.Pointer // *interface{}
89 func newEntry(i interface{}) *entry {
90 return &entry{p: unsafe.Pointer(&i)}
93 // Load returns the value stored in the map for a key, or nil if no
95 // The ok result indicates whether value was found in the map.
96 func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
97 read, _ := m.read.Load().(readOnly)
99 if !ok && read.amended {
101 // Avoid reporting a spurious miss if m.dirty got promoted while we were
102 // blocked on m.mu. (If further loads of the same key will not miss, it's
103 // not worth copying the dirty map for this key.)
104 read, _ = m.read.Load().(readOnly)
106 if !ok && read.amended {
108 // Regardless of whether the entry was present, record a miss: this key
109 // will take the slow path until the dirty map is promoted to the read
121 func (e *entry) load() (value interface{}, ok bool) {
122 p := atomic.LoadPointer(&e.p)
123 if p == nil || p == expunged {
126 return *(*interface{})(p), true
129 // Store sets the value for a key.
130 func (m *Map) Store(key, value interface{}) {
131 read, _ := m.read.Load().(readOnly)
132 if e, ok := read.m[key]; ok && e.tryStore(&value) {
137 read, _ = m.read.Load().(readOnly)
138 if e, ok := read.m[key]; ok {
139 if e.unexpungeLocked() {
140 // The entry was previously expunged, which implies that there is a
141 // non-nil dirty map and this entry is not in it.
144 e.storeLocked(&value)
145 } else if e, ok := m.dirty[key]; ok {
146 e.storeLocked(&value)
149 // We're adding the first new key to the dirty map.
150 // Make sure it is allocated and mark the read-only map as incomplete.
152 m.read.Store(readOnly{m: read.m, amended: true})
154 m.dirty[key] = newEntry(value)
159 // tryStore stores a value if the entry has not been expunged.
161 // If the entry is expunged, tryStore returns false and leaves the entry
163 func (e *entry) tryStore(i *interface{}) bool {
164 p := atomic.LoadPointer(&e.p)
169 if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
172 p = atomic.LoadPointer(&e.p)
179 // unexpungeLocked ensures that the entry is not marked as expunged.
181 // If the entry was previously expunged, it must be added to the dirty map
182 // before m.mu is unlocked.
183 func (e *entry) unexpungeLocked() (wasExpunged bool) {
184 return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
187 // storeLocked unconditionally stores a value to the entry.
189 // The entry must be known not to be expunged.
190 func (e *entry) storeLocked(i *interface{}) {
191 atomic.StorePointer(&e.p, unsafe.Pointer(i))
194 // LoadOrStore returns the existing value for the key if present.
195 // Otherwise, it stores and returns the given value.
196 // The loaded result is true if the value was loaded, false if stored.
197 func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
198 // Avoid locking if it's a clean hit.
199 read, _ := m.read.Load().(readOnly)
200 if e, ok := read.m[key]; ok {
201 actual, loaded, ok := e.tryLoadOrStore(value)
203 return actual, loaded
208 read, _ = m.read.Load().(readOnly)
209 if e, ok := read.m[key]; ok {
210 if e.unexpungeLocked() {
213 actual, loaded, _ = e.tryLoadOrStore(value)
214 } else if e, ok := m.dirty[key]; ok {
215 actual, loaded, _ = e.tryLoadOrStore(value)
219 // We're adding the first new key to the dirty map.
220 // Make sure it is allocated and mark the read-only map as incomplete.
222 m.read.Store(readOnly{m: read.m, amended: true})
224 m.dirty[key] = newEntry(value)
225 actual, loaded = value, false
229 return actual, loaded
232 // tryLoadOrStore atomically loads or stores a value if the entry is not
235 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
236 // returns with ok==false.
237 func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
238 p := atomic.LoadPointer(&e.p)
240 return nil, false, false
243 return *(*interface{})(p), true, true
246 // Copy the interface after the first load to make this method more amenable
247 // to escape analysis: if we hit the "load" path or the entry is expunged, we
248 // shouldn't bother heap-allocating.
251 if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
252 return i, false, true
254 p = atomic.LoadPointer(&e.p)
256 return nil, false, false
259 return *(*interface{})(p), true, true
264 // Delete deletes the value for a key.
265 func (m *Map) Delete(key interface{}) {
266 read, _ := m.read.Load().(readOnly)
268 if !ok && read.amended {
270 read, _ = m.read.Load().(readOnly)
272 if !ok && read.amended {
282 func (e *entry) delete() (hadValue bool) {
284 p := atomic.LoadPointer(&e.p)
285 if p == nil || p == expunged {
288 if atomic.CompareAndSwapPointer(&e.p, p, nil) {
294 // Range calls f sequentially for each key and value present in the map.
295 // If f returns false, range stops the iteration.
297 // Range does not necessarily correspond to any consistent snapshot of the Map's
298 // contents: no key will be visited more than once, but if the value for any key
299 // is stored or deleted concurrently, Range may reflect any mapping for that key
300 // from any point during the Range call.
302 // Range may be O(N) with the number of elements in the map even if f returns
303 // false after a constant number of calls.
304 func (m *Map) Range(f func(key, value interface{}) bool) {
305 // We need to be able to iterate over all of the keys that were already
306 // present at the start of the call to Range.
307 // If read.amended is false, then read.m satisfies that property without
308 // requiring us to hold m.mu for a long time.
309 read, _ := m.read.Load().(readOnly)
311 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
312 // (assuming the caller does not break out early), so a call to Range
313 // amortizes an entire copy of the map: we can promote the dirty copy
316 read, _ = m.read.Load().(readOnly)
318 read = readOnly{m: m.dirty}
326 for k, e := range read.m {
337 func (m *Map) missLocked() {
339 if m.misses < len(m.dirty) {
342 m.read.Store(readOnly{m: m.dirty})
347 func (m *Map) dirtyLocked() {
352 read, _ := m.read.Load().(readOnly)
353 m.dirty = make(map[interface{}]*entry, len(read.m))
354 for k, e := range read.m {
355 if !e.tryExpungeLocked() {
361 func (e *entry) tryExpungeLocked() (isExpunged bool) {
362 p := atomic.LoadPointer(&e.p)
364 if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
367 p = atomic.LoadPointer(&e.p)