Source file
src/runtime/map_fast32.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "unsafe"
11 )
12
13 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
14 if raceenabled && h != nil {
15 callerpc := getcallerpc()
16 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
17 }
18 if h == nil || h.count == 0 {
19 return unsafe.Pointer(&zeroVal[0])
20 }
21 if h.flags&hashWriting != 0 {
22 throw("concurrent map read and map write")
23 }
24 var b *bmap
25 if h.B == 0 {
26
27 b = (*bmap)(h.buckets)
28 } else {
29 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
30 m := bucketMask(h.B)
31 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
32 if c := h.oldbuckets; c != nil {
33 if !h.sameSizeGrow() {
34
35 m >>= 1
36 }
37 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
38 if !evacuated(oldb) {
39 b = oldb
40 }
41 }
42 }
43 for ; b != nil; b = b.overflow(t) {
44 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
45 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
46 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
47 }
48 }
49 }
50 return unsafe.Pointer(&zeroVal[0])
51 }
52
53 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
54 if raceenabled && h != nil {
55 callerpc := getcallerpc()
56 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
57 }
58 if h == nil || h.count == 0 {
59 return unsafe.Pointer(&zeroVal[0]), false
60 }
61 if h.flags&hashWriting != 0 {
62 throw("concurrent map read and map write")
63 }
64 var b *bmap
65 if h.B == 0 {
66
67 b = (*bmap)(h.buckets)
68 } else {
69 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
70 m := bucketMask(h.B)
71 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
72 if c := h.oldbuckets; c != nil {
73 if !h.sameSizeGrow() {
74
75 m >>= 1
76 }
77 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
78 if !evacuated(oldb) {
79 b = oldb
80 }
81 }
82 }
83 for ; b != nil; b = b.overflow(t) {
84 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
85 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
86 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
87 }
88 }
89 }
90 return unsafe.Pointer(&zeroVal[0]), false
91 }
92
93 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
94 if h == nil {
95 panic(plainError("assignment to entry in nil map"))
96 }
97 if raceenabled {
98 callerpc := getcallerpc()
99 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
100 }
101 if h.flags&hashWriting != 0 {
102 throw("concurrent map writes")
103 }
104 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
105
106
107 h.flags ^= hashWriting
108
109 if h.buckets == nil {
110 h.buckets = newobject(t.bucket)
111 }
112
113 again:
114 bucket := hash & bucketMask(h.B)
115 if h.growing() {
116 growWork_fast32(t, h, bucket)
117 }
118 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
119
120 var insertb *bmap
121 var inserti uintptr
122 var insertk unsafe.Pointer
123
124 bucketloop:
125 for {
126 for i := uintptr(0); i < bucketCnt; i++ {
127 if isEmpty(b.tophash[i]) {
128 if insertb == nil {
129 inserti = i
130 insertb = b
131 }
132 if b.tophash[i] == emptyRest {
133 break bucketloop
134 }
135 continue
136 }
137 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
138 if k != key {
139 continue
140 }
141 inserti = i
142 insertb = b
143 goto done
144 }
145 ovf := b.overflow(t)
146 if ovf == nil {
147 break
148 }
149 b = ovf
150 }
151
152
153
154
155
156 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
157 hashGrow(t, h)
158 goto again
159 }
160
161 if insertb == nil {
162
163 insertb = h.newoverflow(t, b)
164 inserti = 0
165 }
166 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
167
168 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
169
170 *(*uint32)(insertk) = key
171
172 h.count++
173
174 done:
175 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
176 if h.flags&hashWriting == 0 {
177 throw("concurrent map writes")
178 }
179 h.flags &^= hashWriting
180 return elem
181 }
182
183 func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
184 if h == nil {
185 panic(plainError("assignment to entry in nil map"))
186 }
187 if raceenabled {
188 callerpc := getcallerpc()
189 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
190 }
191 if h.flags&hashWriting != 0 {
192 throw("concurrent map writes")
193 }
194 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
195
196
197 h.flags ^= hashWriting
198
199 if h.buckets == nil {
200 h.buckets = newobject(t.bucket)
201 }
202
203 again:
204 bucket := hash & bucketMask(h.B)
205 if h.growing() {
206 growWork_fast32(t, h, bucket)
207 }
208 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
209
210 var insertb *bmap
211 var inserti uintptr
212 var insertk unsafe.Pointer
213
214 bucketloop:
215 for {
216 for i := uintptr(0); i < bucketCnt; i++ {
217 if isEmpty(b.tophash[i]) {
218 if insertb == nil {
219 inserti = i
220 insertb = b
221 }
222 if b.tophash[i] == emptyRest {
223 break bucketloop
224 }
225 continue
226 }
227 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
228 if k != key {
229 continue
230 }
231 inserti = i
232 insertb = b
233 goto done
234 }
235 ovf := b.overflow(t)
236 if ovf == nil {
237 break
238 }
239 b = ovf
240 }
241
242
243
244
245
246 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
247 hashGrow(t, h)
248 goto again
249 }
250
251 if insertb == nil {
252
253 insertb = h.newoverflow(t, b)
254 inserti = 0
255 }
256 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
257
258 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
259
260 *(*unsafe.Pointer)(insertk) = key
261
262 h.count++
263
264 done:
265 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
266 if h.flags&hashWriting == 0 {
267 throw("concurrent map writes")
268 }
269 h.flags &^= hashWriting
270 return elem
271 }
272
273 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
274 if raceenabled && h != nil {
275 callerpc := getcallerpc()
276 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
277 }
278 if h == nil || h.count == 0 {
279 return
280 }
281 if h.flags&hashWriting != 0 {
282 throw("concurrent map writes")
283 }
284
285 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
286
287
288 h.flags ^= hashWriting
289
290 bucket := hash & bucketMask(h.B)
291 if h.growing() {
292 growWork_fast32(t, h, bucket)
293 }
294 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
295 bOrig := b
296 search:
297 for ; b != nil; b = b.overflow(t) {
298 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
299 if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
300 continue
301 }
302
303
304
305 if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
306
307
308 *(*unsafe.Pointer)(k) = nil
309 }
310 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
311 if t.elem.ptrdata != 0 {
312 memclrHasPointers(e, t.elem.size)
313 } else {
314 memclrNoHeapPointers(e, t.elem.size)
315 }
316 b.tophash[i] = emptyOne
317
318
319 if i == bucketCnt-1 {
320 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
321 goto notLast
322 }
323 } else {
324 if b.tophash[i+1] != emptyRest {
325 goto notLast
326 }
327 }
328 for {
329 b.tophash[i] = emptyRest
330 if i == 0 {
331 if b == bOrig {
332 break
333 }
334
335 c := b
336 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
337 }
338 i = bucketCnt - 1
339 } else {
340 i--
341 }
342 if b.tophash[i] != emptyOne {
343 break
344 }
345 }
346 notLast:
347 h.count--
348
349
350 if h.count == 0 {
351 h.hash0 = fastrand()
352 }
353 break search
354 }
355 }
356
357 if h.flags&hashWriting == 0 {
358 throw("concurrent map writes")
359 }
360 h.flags &^= hashWriting
361 }
362
363 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
364
365
366 evacuate_fast32(t, h, bucket&h.oldbucketmask())
367
368
369 if h.growing() {
370 evacuate_fast32(t, h, h.nevacuate)
371 }
372 }
373
374 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
375 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
376 newbit := h.noldbuckets()
377 if !evacuated(b) {
378
379
380
381
382 var xy [2]evacDst
383 x := &xy[0]
384 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
385 x.k = add(unsafe.Pointer(x.b), dataOffset)
386 x.e = add(x.k, bucketCnt*4)
387
388 if !h.sameSizeGrow() {
389
390
391 y := &xy[1]
392 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
393 y.k = add(unsafe.Pointer(y.b), dataOffset)
394 y.e = add(y.k, bucketCnt*4)
395 }
396
397 for ; b != nil; b = b.overflow(t) {
398 k := add(unsafe.Pointer(b), dataOffset)
399 e := add(k, bucketCnt*4)
400 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
401 top := b.tophash[i]
402 if isEmpty(top) {
403 b.tophash[i] = evacuatedEmpty
404 continue
405 }
406 if top < minTopHash {
407 throw("bad map state")
408 }
409 var useY uint8
410 if !h.sameSizeGrow() {
411
412
413 hash := t.hasher(k, uintptr(h.hash0))
414 if hash&newbit != 0 {
415 useY = 1
416 }
417 }
418
419 b.tophash[i] = evacuatedX + useY
420 dst := &xy[useY]
421
422 if dst.i == bucketCnt {
423 dst.b = h.newoverflow(t, dst.b)
424 dst.i = 0
425 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
426 dst.e = add(dst.k, bucketCnt*4)
427 }
428 dst.b.tophash[dst.i&(bucketCnt-1)] = top
429
430
431 if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
432
433 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
434 } else {
435 *(*uint32)(dst.k) = *(*uint32)(k)
436 }
437
438 typedmemmove(t.elem, dst.e, e)
439 dst.i++
440
441
442
443
444 dst.k = add(dst.k, 4)
445 dst.e = add(dst.e, uintptr(t.elemsize))
446 }
447 }
448
449 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
450 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
451
452
453 ptr := add(b, dataOffset)
454 n := uintptr(t.bucketsize) - dataOffset
455 memclrHasPointers(ptr, n)
456 }
457 }
458
459 if oldbucket == h.nevacuate {
460 advanceEvacuationMark(h, t, newbit)
461 }
462 }
463
View as plain text