Source file
src/runtime/map_fast64.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "unsafe"
11 )
12
13 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
14 if raceenabled && h != nil {
15 callerpc := getcallerpc()
16 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
17 }
18 if h == nil || h.count == 0 {
19 return unsafe.Pointer(&zeroVal[0])
20 }
21 if h.flags&hashWriting != 0 {
22 throw("concurrent map read and map write")
23 }
24 var b *bmap
25 if h.B == 0 {
26
27 b = (*bmap)(h.buckets)
28 } else {
29 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
30 m := bucketMask(h.B)
31 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
32 if c := h.oldbuckets; c != nil {
33 if !h.sameSizeGrow() {
34
35 m >>= 1
36 }
37 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
38 if !evacuated(oldb) {
39 b = oldb
40 }
41 }
42 }
43 for ; b != nil; b = b.overflow(t) {
44 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
45 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
46 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
47 }
48 }
49 }
50 return unsafe.Pointer(&zeroVal[0])
51 }
52
53 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
54 if raceenabled && h != nil {
55 callerpc := getcallerpc()
56 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
57 }
58 if h == nil || h.count == 0 {
59 return unsafe.Pointer(&zeroVal[0]), false
60 }
61 if h.flags&hashWriting != 0 {
62 throw("concurrent map read and map write")
63 }
64 var b *bmap
65 if h.B == 0 {
66
67 b = (*bmap)(h.buckets)
68 } else {
69 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
70 m := bucketMask(h.B)
71 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
72 if c := h.oldbuckets; c != nil {
73 if !h.sameSizeGrow() {
74
75 m >>= 1
76 }
77 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
78 if !evacuated(oldb) {
79 b = oldb
80 }
81 }
82 }
83 for ; b != nil; b = b.overflow(t) {
84 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
85 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
86 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
87 }
88 }
89 }
90 return unsafe.Pointer(&zeroVal[0]), false
91 }
92
93 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
94 if h == nil {
95 panic(plainError("assignment to entry in nil map"))
96 }
97 if raceenabled {
98 callerpc := getcallerpc()
99 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
100 }
101 if h.flags&hashWriting != 0 {
102 throw("concurrent map writes")
103 }
104 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
105
106
107 h.flags ^= hashWriting
108
109 if h.buckets == nil {
110 h.buckets = newobject(t.bucket)
111 }
112
113 again:
114 bucket := hash & bucketMask(h.B)
115 if h.growing() {
116 growWork_fast64(t, h, bucket)
117 }
118 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
119
120 var insertb *bmap
121 var inserti uintptr
122 var insertk unsafe.Pointer
123
124 bucketloop:
125 for {
126 for i := uintptr(0); i < bucketCnt; i++ {
127 if isEmpty(b.tophash[i]) {
128 if insertb == nil {
129 insertb = b
130 inserti = i
131 }
132 if b.tophash[i] == emptyRest {
133 break bucketloop
134 }
135 continue
136 }
137 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
138 if k != key {
139 continue
140 }
141 insertb = b
142 inserti = i
143 goto done
144 }
145 ovf := b.overflow(t)
146 if ovf == nil {
147 break
148 }
149 b = ovf
150 }
151
152
153
154
155
156 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
157 hashGrow(t, h)
158 goto again
159 }
160
161 if insertb == nil {
162
163 insertb = h.newoverflow(t, b)
164 inserti = 0
165 }
166 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
167
168 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
169
170 *(*uint64)(insertk) = key
171
172 h.count++
173
174 done:
175 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
176 if h.flags&hashWriting == 0 {
177 throw("concurrent map writes")
178 }
179 h.flags &^= hashWriting
180 return elem
181 }
182
183 func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
184 if h == nil {
185 panic(plainError("assignment to entry in nil map"))
186 }
187 if raceenabled {
188 callerpc := getcallerpc()
189 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
190 }
191 if h.flags&hashWriting != 0 {
192 throw("concurrent map writes")
193 }
194 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
195
196
197 h.flags ^= hashWriting
198
199 if h.buckets == nil {
200 h.buckets = newobject(t.bucket)
201 }
202
203 again:
204 bucket := hash & bucketMask(h.B)
205 if h.growing() {
206 growWork_fast64(t, h, bucket)
207 }
208 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
209
210 var insertb *bmap
211 var inserti uintptr
212 var insertk unsafe.Pointer
213
214 bucketloop:
215 for {
216 for i := uintptr(0); i < bucketCnt; i++ {
217 if isEmpty(b.tophash[i]) {
218 if insertb == nil {
219 insertb = b
220 inserti = i
221 }
222 if b.tophash[i] == emptyRest {
223 break bucketloop
224 }
225 continue
226 }
227 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
228 if k != key {
229 continue
230 }
231 insertb = b
232 inserti = i
233 goto done
234 }
235 ovf := b.overflow(t)
236 if ovf == nil {
237 break
238 }
239 b = ovf
240 }
241
242
243
244
245
246 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
247 hashGrow(t, h)
248 goto again
249 }
250
251 if insertb == nil {
252
253 insertb = h.newoverflow(t, b)
254 inserti = 0
255 }
256 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
257
258 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
259
260 *(*unsafe.Pointer)(insertk) = key
261
262 h.count++
263
264 done:
265 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
266 if h.flags&hashWriting == 0 {
267 throw("concurrent map writes")
268 }
269 h.flags &^= hashWriting
270 return elem
271 }
272
273 func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
274 if raceenabled && h != nil {
275 callerpc := getcallerpc()
276 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
277 }
278 if h == nil || h.count == 0 {
279 return
280 }
281 if h.flags&hashWriting != 0 {
282 throw("concurrent map writes")
283 }
284
285 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
286
287
288 h.flags ^= hashWriting
289
290 bucket := hash & bucketMask(h.B)
291 if h.growing() {
292 growWork_fast64(t, h, bucket)
293 }
294 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
295 bOrig := b
296 search:
297 for ; b != nil; b = b.overflow(t) {
298 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
299 if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
300 continue
301 }
302
303 if t.key.ptrdata != 0 {
304 if goarch.PtrSize == 8 {
305 *(*unsafe.Pointer)(k) = nil
306 } else {
307
308
309 memclrHasPointers(k, 8)
310 }
311 }
312 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
313 if t.elem.ptrdata != 0 {
314 memclrHasPointers(e, t.elem.size)
315 } else {
316 memclrNoHeapPointers(e, t.elem.size)
317 }
318 b.tophash[i] = emptyOne
319
320
321 if i == bucketCnt-1 {
322 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
323 goto notLast
324 }
325 } else {
326 if b.tophash[i+1] != emptyRest {
327 goto notLast
328 }
329 }
330 for {
331 b.tophash[i] = emptyRest
332 if i == 0 {
333 if b == bOrig {
334 break
335 }
336
337 c := b
338 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
339 }
340 i = bucketCnt - 1
341 } else {
342 i--
343 }
344 if b.tophash[i] != emptyOne {
345 break
346 }
347 }
348 notLast:
349 h.count--
350
351
352 if h.count == 0 {
353 h.hash0 = fastrand()
354 }
355 break search
356 }
357 }
358
359 if h.flags&hashWriting == 0 {
360 throw("concurrent map writes")
361 }
362 h.flags &^= hashWriting
363 }
364
365 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
366
367
368 evacuate_fast64(t, h, bucket&h.oldbucketmask())
369
370
371 if h.growing() {
372 evacuate_fast64(t, h, h.nevacuate)
373 }
374 }
375
376 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
377 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
378 newbit := h.noldbuckets()
379 if !evacuated(b) {
380
381
382
383
384 var xy [2]evacDst
385 x := &xy[0]
386 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
387 x.k = add(unsafe.Pointer(x.b), dataOffset)
388 x.e = add(x.k, bucketCnt*8)
389
390 if !h.sameSizeGrow() {
391
392
393 y := &xy[1]
394 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
395 y.k = add(unsafe.Pointer(y.b), dataOffset)
396 y.e = add(y.k, bucketCnt*8)
397 }
398
399 for ; b != nil; b = b.overflow(t) {
400 k := add(unsafe.Pointer(b), dataOffset)
401 e := add(k, bucketCnt*8)
402 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
403 top := b.tophash[i]
404 if isEmpty(top) {
405 b.tophash[i] = evacuatedEmpty
406 continue
407 }
408 if top < minTopHash {
409 throw("bad map state")
410 }
411 var useY uint8
412 if !h.sameSizeGrow() {
413
414
415 hash := t.hasher(k, uintptr(h.hash0))
416 if hash&newbit != 0 {
417 useY = 1
418 }
419 }
420
421 b.tophash[i] = evacuatedX + useY
422 dst := &xy[useY]
423
424 if dst.i == bucketCnt {
425 dst.b = h.newoverflow(t, dst.b)
426 dst.i = 0
427 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
428 dst.e = add(dst.k, bucketCnt*8)
429 }
430 dst.b.tophash[dst.i&(bucketCnt-1)] = top
431
432
433 if t.key.ptrdata != 0 && writeBarrier.enabled {
434 if goarch.PtrSize == 8 {
435
436 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
437 } else {
438
439
440 typedmemmove(t.key, dst.k, k)
441 }
442 } else {
443 *(*uint64)(dst.k) = *(*uint64)(k)
444 }
445
446 typedmemmove(t.elem, dst.e, e)
447 dst.i++
448
449
450
451
452 dst.k = add(dst.k, 8)
453 dst.e = add(dst.e, uintptr(t.elemsize))
454 }
455 }
456
457 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
458 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
459
460
461 ptr := add(b, dataOffset)
462 n := uintptr(t.bucketsize) - dataOffset
463 memclrHasPointers(ptr, n)
464 }
465 }
466
467 if oldbucket == h.nevacuate {
468 advanceEvacuationMark(h, t, newbit)
469 }
470 }
471
View as plain text