Source file
src/runtime/map_faststr.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/goarch"
10 "unsafe"
11 )
12
13 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
14 if raceenabled && h != nil {
15 callerpc := getcallerpc()
16 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
17 }
18 if h == nil || h.count == 0 {
19 return unsafe.Pointer(&zeroVal[0])
20 }
21 if h.flags&hashWriting != 0 {
22 throw("concurrent map read and map write")
23 }
24 key := stringStructOf(&ky)
25 if h.B == 0 {
26
27 b := (*bmap)(h.buckets)
28 if key.len < 32 {
29
30 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
31 k := (*stringStruct)(kptr)
32 if k.len != key.len || isEmpty(b.tophash[i]) {
33 if b.tophash[i] == emptyRest {
34 break
35 }
36 continue
37 }
38 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
39 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
40 }
41 }
42 return unsafe.Pointer(&zeroVal[0])
43 }
44
45 keymaybe := uintptr(bucketCnt)
46 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
47 k := (*stringStruct)(kptr)
48 if k.len != key.len || isEmpty(b.tophash[i]) {
49 if b.tophash[i] == emptyRest {
50 break
51 }
52 continue
53 }
54 if k.str == key.str {
55 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
56 }
57
58 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
59 continue
60 }
61
62 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
63 continue
64 }
65 if keymaybe != bucketCnt {
66
67 goto dohash
68 }
69 keymaybe = i
70 }
71 if keymaybe != bucketCnt {
72 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
73 if memequal(k.str, key.str, uintptr(key.len)) {
74 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize))
75 }
76 }
77 return unsafe.Pointer(&zeroVal[0])
78 }
79 dohash:
80 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
81 m := bucketMask(h.B)
82 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
83 if c := h.oldbuckets; c != nil {
84 if !h.sameSizeGrow() {
85
86 m >>= 1
87 }
88 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
89 if !evacuated(oldb) {
90 b = oldb
91 }
92 }
93 top := tophash(hash)
94 for ; b != nil; b = b.overflow(t) {
95 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
96 k := (*stringStruct)(kptr)
97 if k.len != key.len || b.tophash[i] != top {
98 continue
99 }
100 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
101 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
102 }
103 }
104 }
105 return unsafe.Pointer(&zeroVal[0])
106 }
107
108 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
109 if raceenabled && h != nil {
110 callerpc := getcallerpc()
111 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
112 }
113 if h == nil || h.count == 0 {
114 return unsafe.Pointer(&zeroVal[0]), false
115 }
116 if h.flags&hashWriting != 0 {
117 throw("concurrent map read and map write")
118 }
119 key := stringStructOf(&ky)
120 if h.B == 0 {
121
122 b := (*bmap)(h.buckets)
123 if key.len < 32 {
124
125 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
126 k := (*stringStruct)(kptr)
127 if k.len != key.len || isEmpty(b.tophash[i]) {
128 if b.tophash[i] == emptyRest {
129 break
130 }
131 continue
132 }
133 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
134 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
135 }
136 }
137 return unsafe.Pointer(&zeroVal[0]), false
138 }
139
140 keymaybe := uintptr(bucketCnt)
141 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
142 k := (*stringStruct)(kptr)
143 if k.len != key.len || isEmpty(b.tophash[i]) {
144 if b.tophash[i] == emptyRest {
145 break
146 }
147 continue
148 }
149 if k.str == key.str {
150 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
151 }
152
153 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
154 continue
155 }
156
157 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
158 continue
159 }
160 if keymaybe != bucketCnt {
161
162 goto dohash
163 }
164 keymaybe = i
165 }
166 if keymaybe != bucketCnt {
167 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
168 if memequal(k.str, key.str, uintptr(key.len)) {
169 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true
170 }
171 }
172 return unsafe.Pointer(&zeroVal[0]), false
173 }
174 dohash:
175 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
176 m := bucketMask(h.B)
177 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
178 if c := h.oldbuckets; c != nil {
179 if !h.sameSizeGrow() {
180
181 m >>= 1
182 }
183 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
184 if !evacuated(oldb) {
185 b = oldb
186 }
187 }
188 top := tophash(hash)
189 for ; b != nil; b = b.overflow(t) {
190 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
191 k := (*stringStruct)(kptr)
192 if k.len != key.len || b.tophash[i] != top {
193 continue
194 }
195 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
196 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
197 }
198 }
199 }
200 return unsafe.Pointer(&zeroVal[0]), false
201 }
202
203 func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
204 if h == nil {
205 panic(plainError("assignment to entry in nil map"))
206 }
207 if raceenabled {
208 callerpc := getcallerpc()
209 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
210 }
211 if h.flags&hashWriting != 0 {
212 throw("concurrent map writes")
213 }
214 key := stringStructOf(&s)
215 hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
216
217
218 h.flags ^= hashWriting
219
220 if h.buckets == nil {
221 h.buckets = newobject(t.bucket)
222 }
223
224 again:
225 bucket := hash & bucketMask(h.B)
226 if h.growing() {
227 growWork_faststr(t, h, bucket)
228 }
229 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
230 top := tophash(hash)
231
232 var insertb *bmap
233 var inserti uintptr
234 var insertk unsafe.Pointer
235
236 bucketloop:
237 for {
238 for i := uintptr(0); i < bucketCnt; i++ {
239 if b.tophash[i] != top {
240 if isEmpty(b.tophash[i]) && insertb == nil {
241 insertb = b
242 inserti = i
243 }
244 if b.tophash[i] == emptyRest {
245 break bucketloop
246 }
247 continue
248 }
249 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
250 if k.len != key.len {
251 continue
252 }
253 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
254 continue
255 }
256
257 inserti = i
258 insertb = b
259
260
261 k.str = key.str
262 goto done
263 }
264 ovf := b.overflow(t)
265 if ovf == nil {
266 break
267 }
268 b = ovf
269 }
270
271
272
273
274
275 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
276 hashGrow(t, h)
277 goto again
278 }
279
280 if insertb == nil {
281
282 insertb = h.newoverflow(t, b)
283 inserti = 0
284 }
285 insertb.tophash[inserti&(bucketCnt-1)] = top
286
287 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
288
289 *((*stringStruct)(insertk)) = *key
290 h.count++
291
292 done:
293 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
294 if h.flags&hashWriting == 0 {
295 throw("concurrent map writes")
296 }
297 h.flags &^= hashWriting
298 return elem
299 }
300
301 func mapdelete_faststr(t *maptype, h *hmap, ky string) {
302 if raceenabled && h != nil {
303 callerpc := getcallerpc()
304 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
305 }
306 if h == nil || h.count == 0 {
307 return
308 }
309 if h.flags&hashWriting != 0 {
310 throw("concurrent map writes")
311 }
312
313 key := stringStructOf(&ky)
314 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
315
316
317 h.flags ^= hashWriting
318
319 bucket := hash & bucketMask(h.B)
320 if h.growing() {
321 growWork_faststr(t, h, bucket)
322 }
323 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
324 bOrig := b
325 top := tophash(hash)
326 search:
327 for ; b != nil; b = b.overflow(t) {
328 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
329 k := (*stringStruct)(kptr)
330 if k.len != key.len || b.tophash[i] != top {
331 continue
332 }
333 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
334 continue
335 }
336
337 k.str = nil
338 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
339 if t.elem.ptrdata != 0 {
340 memclrHasPointers(e, t.elem.size)
341 } else {
342 memclrNoHeapPointers(e, t.elem.size)
343 }
344 b.tophash[i] = emptyOne
345
346
347 if i == bucketCnt-1 {
348 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
349 goto notLast
350 }
351 } else {
352 if b.tophash[i+1] != emptyRest {
353 goto notLast
354 }
355 }
356 for {
357 b.tophash[i] = emptyRest
358 if i == 0 {
359 if b == bOrig {
360 break
361 }
362
363 c := b
364 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
365 }
366 i = bucketCnt - 1
367 } else {
368 i--
369 }
370 if b.tophash[i] != emptyOne {
371 break
372 }
373 }
374 notLast:
375 h.count--
376
377
378 if h.count == 0 {
379 h.hash0 = fastrand()
380 }
381 break search
382 }
383 }
384
385 if h.flags&hashWriting == 0 {
386 throw("concurrent map writes")
387 }
388 h.flags &^= hashWriting
389 }
390
391 func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
392
393
394 evacuate_faststr(t, h, bucket&h.oldbucketmask())
395
396
397 if h.growing() {
398 evacuate_faststr(t, h, h.nevacuate)
399 }
400 }
401
402 func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
403 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
404 newbit := h.noldbuckets()
405 if !evacuated(b) {
406
407
408
409
410 var xy [2]evacDst
411 x := &xy[0]
412 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
413 x.k = add(unsafe.Pointer(x.b), dataOffset)
414 x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
415
416 if !h.sameSizeGrow() {
417
418
419 y := &xy[1]
420 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
421 y.k = add(unsafe.Pointer(y.b), dataOffset)
422 y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
423 }
424
425 for ; b != nil; b = b.overflow(t) {
426 k := add(unsafe.Pointer(b), dataOffset)
427 e := add(k, bucketCnt*2*goarch.PtrSize)
428 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
429 top := b.tophash[i]
430 if isEmpty(top) {
431 b.tophash[i] = evacuatedEmpty
432 continue
433 }
434 if top < minTopHash {
435 throw("bad map state")
436 }
437 var useY uint8
438 if !h.sameSizeGrow() {
439
440
441 hash := t.hasher(k, uintptr(h.hash0))
442 if hash&newbit != 0 {
443 useY = 1
444 }
445 }
446
447 b.tophash[i] = evacuatedX + useY
448 dst := &xy[useY]
449
450 if dst.i == bucketCnt {
451 dst.b = h.newoverflow(t, dst.b)
452 dst.i = 0
453 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
454 dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
455 }
456 dst.b.tophash[dst.i&(bucketCnt-1)] = top
457
458
459 *(*string)(dst.k) = *(*string)(k)
460
461 typedmemmove(t.elem, dst.e, e)
462 dst.i++
463
464
465
466
467 dst.k = add(dst.k, 2*goarch.PtrSize)
468 dst.e = add(dst.e, uintptr(t.elemsize))
469 }
470 }
471
472 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
473 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
474
475
476 ptr := add(b, dataOffset)
477 n := uintptr(t.bucketsize) - dataOffset
478 memclrHasPointers(ptr, n)
479 }
480 }
481
482 if oldbucket == h.nevacuate {
483 advanceEvacuationMark(h, t, newbit)
484 }
485 }
486
View as plain text