Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17 var Fadd64 = fadd64
18 var Fsub64 = fsub64
19 var Fmul64 = fmul64
20 var Fdiv64 = fdiv64
21 var F64to32 = f64to32
22 var F32to64 = f32to64
23 var Fcmp64 = fcmp64
24 var Fintto64 = fintto64
25 var F64toint = f64toint
26
27 var Entersyscall = entersyscall
28 var Exitsyscall = exitsyscall
29 var LockedOSThread = lockedOSThread
30 var Xadduintptr = atomic.Xadduintptr
31
32 var Fastlog2 = fastlog2
33
34 var Atoi = atoi
35 var Atoi32 = atoi32
36
37 var Nanotime = nanotime
38 var NetpollBreak = netpollBreak
39 var Usleep = usleep
40
41 var PhysPageSize = physPageSize
42 var PhysHugePageSize = physHugePageSize
43
44 var NetpollGenericInit = netpollGenericInit
45
46 var Memmove = memmove
47 var MemclrNoHeapPointers = memclrNoHeapPointers
48
49 var LockPartialOrder = lockPartialOrder
50
51 type LockRank lockRank
52
53 func (l LockRank) String() string {
54 return lockRank(l).String()
55 }
56
57 const PreemptMSupported = preemptMSupported
58
59 type LFNode struct {
60 Next uint64
61 Pushcnt uintptr
62 }
63
64 func LFStackPush(head *uint64, node *LFNode) {
65 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
66 }
67
68 func LFStackPop(head *uint64) *LFNode {
69 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
70 }
71
72 func Netpoll(delta int64) {
73 systemstack(func() {
74 netpoll(delta)
75 })
76 }
77
78 func GCMask(x any) (ret []byte) {
79 systemstack(func() {
80 ret = getgcmask(x)
81 })
82 return
83 }
84
85 func RunSchedLocalQueueTest() {
86 _p_ := new(p)
87 gs := make([]g, len(_p_.runq))
88 for i := 0; i < len(_p_.runq); i++ {
89 if g, _ := runqget(_p_); g != nil {
90 throw("runq is not empty initially")
91 }
92 for j := 0; j < i; j++ {
93 runqput(_p_, &gs[i], false)
94 }
95 for j := 0; j < i; j++ {
96 if g, _ := runqget(_p_); g != &gs[i] {
97 print("bad element at iter ", i, "/", j, "\n")
98 throw("bad element")
99 }
100 }
101 if g, _ := runqget(_p_); g != nil {
102 throw("runq is not empty afterwards")
103 }
104 }
105 }
106
107 func RunSchedLocalQueueStealTest() {
108 p1 := new(p)
109 p2 := new(p)
110 gs := make([]g, len(p1.runq))
111 for i := 0; i < len(p1.runq); i++ {
112 for j := 0; j < i; j++ {
113 gs[j].sig = 0
114 runqput(p1, &gs[j], false)
115 }
116 gp := runqsteal(p2, p1, true)
117 s := 0
118 if gp != nil {
119 s++
120 gp.sig++
121 }
122 for {
123 gp, _ = runqget(p2)
124 if gp == nil {
125 break
126 }
127 s++
128 gp.sig++
129 }
130 for {
131 gp, _ = runqget(p1)
132 if gp == nil {
133 break
134 }
135 gp.sig++
136 }
137 for j := 0; j < i; j++ {
138 if gs[j].sig != 1 {
139 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
140 throw("bad element")
141 }
142 }
143 if s != i/2 && s != i/2+1 {
144 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
145 throw("bad steal")
146 }
147 }
148 }
149
150 func RunSchedLocalQueueEmptyTest(iters int) {
151
152
153
154
155 done := make(chan bool, 1)
156 p := new(p)
157 gs := make([]g, 2)
158 ready := new(uint32)
159 for i := 0; i < iters; i++ {
160 *ready = 0
161 next0 := (i & 1) == 0
162 next1 := (i & 2) == 0
163 runqput(p, &gs[0], next0)
164 go func() {
165 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
166 }
167 if runqempty(p) {
168 println("next:", next0, next1)
169 throw("queue is empty")
170 }
171 done <- true
172 }()
173 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
174 }
175 runqput(p, &gs[1], next1)
176 runqget(p)
177 <-done
178 runqget(p)
179 }
180 }
181
182 var (
183 StringHash = stringHash
184 BytesHash = bytesHash
185 Int32Hash = int32Hash
186 Int64Hash = int64Hash
187 MemHash = memhash
188 MemHash32 = memhash32
189 MemHash64 = memhash64
190 EfaceHash = efaceHash
191 IfaceHash = ifaceHash
192 )
193
194 var UseAeshash = &useAeshash
195
196 func MemclrBytes(b []byte) {
197 s := (*slice)(unsafe.Pointer(&b))
198 memclrNoHeapPointers(s.array, uintptr(s.len))
199 }
200
201 const HashLoad = hashLoad
202
203
204 func GostringW(w []uint16) (s string) {
205 systemstack(func() {
206 s = gostringw(&w[0])
207 })
208 return
209 }
210
211 var Open = open
212 var Close = closefd
213 var Read = read
214 var Write = write
215
216 func Envs() []string { return envs }
217 func SetEnvs(e []string) { envs = e }
218
219
220
221 func BenchSetType(n int, x any) {
222 e := *efaceOf(&x)
223 t := e._type
224 var size uintptr
225 var p unsafe.Pointer
226 switch t.kind & kindMask {
227 case kindPtr:
228 t = (*ptrtype)(unsafe.Pointer(t)).elem
229 size = t.size
230 p = e.data
231 case kindSlice:
232 slice := *(*struct {
233 ptr unsafe.Pointer
234 len, cap uintptr
235 })(e.data)
236 t = (*slicetype)(unsafe.Pointer(t)).elem
237 size = t.size * slice.len
238 p = slice.ptr
239 }
240 allocSize := roundupsize(size)
241 systemstack(func() {
242 for i := 0; i < n; i++ {
243 heapBitsSetType(uintptr(p), allocSize, size, t)
244 }
245 })
246 }
247
248 const PtrSize = goarch.PtrSize
249
250 var ForceGCPeriod = &forcegcperiod
251
252
253
254
255 func SetTracebackEnv(level string) {
256 setTraceback(level)
257 traceback_env = traceback_cache
258 }
259
260 var ReadUnaligned32 = readUnaligned32
261 var ReadUnaligned64 = readUnaligned64
262
263 func CountPagesInUse() (pagesInUse, counted uintptr) {
264 stopTheWorld("CountPagesInUse")
265
266 pagesInUse = uintptr(mheap_.pagesInUse.Load())
267
268 for _, s := range mheap_.allspans {
269 if s.state.get() == mSpanInUse {
270 counted += s.npages
271 }
272 }
273
274 startTheWorld()
275
276 return
277 }
278
279 func Fastrand() uint32 { return fastrand() }
280 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
281
282 type ProfBuf profBuf
283
284 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
285 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
286 }
287
288 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
289 (*profBuf)(p).write(tag, now, hdr, stk)
290 }
291
292 const (
293 ProfBufBlocking = profBufBlocking
294 ProfBufNonBlocking = profBufNonBlocking
295 )
296
297 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
298 return (*profBuf)(p).read(profBufReadMode(mode))
299 }
300
301 func (p *ProfBuf) Close() {
302 (*profBuf)(p).close()
303 }
304
305 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
306 stopTheWorld("ReadMetricsSlow")
307
308
309
310 semacquire(&metricsSema)
311 initMetrics()
312 semrelease(&metricsSema)
313
314 systemstack(func() {
315
316
317
318
319 readmemstats_m(memStats)
320 })
321
322
323
324
325
326 readMetrics(samplesp, len, cap)
327
328 startTheWorld()
329 }
330
331
332
333 func ReadMemStatsSlow() (base, slow MemStats) {
334 stopTheWorld("ReadMemStatsSlow")
335
336
337 systemstack(func() {
338
339 getg().m.mallocing++
340
341 readmemstats_m(&base)
342
343
344
345 slow = base
346 slow.Alloc = 0
347 slow.TotalAlloc = 0
348 slow.Mallocs = 0
349 slow.Frees = 0
350 slow.HeapReleased = 0
351 var bySize [_NumSizeClasses]struct {
352 Mallocs, Frees uint64
353 }
354
355
356 for _, s := range mheap_.allspans {
357 if s.state.get() != mSpanInUse {
358 continue
359 }
360 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
361 slow.Mallocs++
362 slow.Alloc += uint64(s.elemsize)
363 } else {
364 slow.Mallocs += uint64(s.allocCount)
365 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
366 bySize[sizeclass].Mallocs += uint64(s.allocCount)
367 }
368 }
369
370
371 var m heapStatsDelta
372 memstats.heapStats.unsafeRead(&m)
373
374
375 var smallFree uint64
376 for i := 0; i < _NumSizeClasses; i++ {
377 slow.Frees += uint64(m.smallFreeCount[i])
378 bySize[i].Frees += uint64(m.smallFreeCount[i])
379 bySize[i].Mallocs += uint64(m.smallFreeCount[i])
380 smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
381 }
382 slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
383 slow.Mallocs += slow.Frees
384
385 slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
386
387 for i := range slow.BySize {
388 slow.BySize[i].Mallocs = bySize[i].Mallocs
389 slow.BySize[i].Frees = bySize[i].Frees
390 }
391
392 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
393 chunk := mheap_.pages.tryChunkOf(i)
394 if chunk == nil {
395 continue
396 }
397 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
398 slow.HeapReleased += uint64(pg) * pageSize
399 }
400 for _, p := range allp {
401 pg := sys.OnesCount64(p.pcache.scav)
402 slow.HeapReleased += uint64(pg) * pageSize
403 }
404
405 getg().m.mallocing--
406 })
407
408 startTheWorld()
409 return
410 }
411
412
413
414
415 func BlockOnSystemStack() {
416 systemstack(blockOnSystemStackInternal)
417 }
418
419 func blockOnSystemStackInternal() {
420 print("x\n")
421 lock(&deadlock)
422 lock(&deadlock)
423 }
424
425 type RWMutex struct {
426 rw rwmutex
427 }
428
429 func (rw *RWMutex) RLock() {
430 rw.rw.rlock()
431 }
432
433 func (rw *RWMutex) RUnlock() {
434 rw.rw.runlock()
435 }
436
437 func (rw *RWMutex) Lock() {
438 rw.rw.lock()
439 }
440
441 func (rw *RWMutex) Unlock() {
442 rw.rw.unlock()
443 }
444
445 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
446
447 func MapBucketsCount(m map[int]int) int {
448 h := *(**hmap)(unsafe.Pointer(&m))
449 return 1 << h.B
450 }
451
452 func MapBucketsPointerIsNil(m map[int]int) bool {
453 h := *(**hmap)(unsafe.Pointer(&m))
454 return h.buckets == nil
455 }
456
457 func LockOSCounts() (external, internal uint32) {
458 g := getg()
459 if g.m.lockedExt+g.m.lockedInt == 0 {
460 if g.lockedm != 0 {
461 panic("lockedm on non-locked goroutine")
462 }
463 } else {
464 if g.lockedm == 0 {
465 panic("nil lockedm on locked goroutine")
466 }
467 }
468 return g.m.lockedExt, g.m.lockedInt
469 }
470
471
472 func TracebackSystemstack(stk []uintptr, i int) int {
473 if i == 0 {
474 pc, sp := getcallerpc(), getcallersp()
475 return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
476 }
477 n := 0
478 systemstack(func() {
479 n = TracebackSystemstack(stk, i-1)
480 })
481 return n
482 }
483
484 func KeepNArenaHints(n int) {
485 hint := mheap_.arenaHints
486 for i := 1; i < n; i++ {
487 hint = hint.next
488 if hint == nil {
489 return
490 }
491 }
492 hint.next = nil
493 }
494
495
496
497
498 func MapNextArenaHint() (start, end uintptr) {
499 hint := mheap_.arenaHints
500 addr := hint.addr
501 if hint.down {
502 start, end = addr-heapArenaBytes, addr
503 addr -= physPageSize
504 } else {
505 start, end = addr, addr+heapArenaBytes
506 }
507 sysReserve(unsafe.Pointer(addr), physPageSize)
508 return
509 }
510
511 func GetNextArenaHint() uintptr {
512 return mheap_.arenaHints.addr
513 }
514
515 type G = g
516
517 type Sudog = sudog
518
519 func Getg() *G {
520 return getg()
521 }
522
523
524 func PanicForTesting(b []byte, i int) byte {
525 return unexportedPanicForTesting(b, i)
526 }
527
528
529 func unexportedPanicForTesting(b []byte, i int) byte {
530 return b[i]
531 }
532
533 func G0StackOverflow() {
534 systemstack(func() {
535 stackOverflow(nil)
536 })
537 }
538
539 func stackOverflow(x *byte) {
540 var buf [256]byte
541 stackOverflow(&buf[0])
542 }
543
544 func MapTombstoneCheck(m map[int]int) {
545
546
547
548 h := *(**hmap)(unsafe.Pointer(&m))
549 i := any(m)
550 t := *(**maptype)(unsafe.Pointer(&i))
551
552 for x := 0; x < 1<<h.B; x++ {
553 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
554 n := 0
555 for b := b0; b != nil; b = b.overflow(t) {
556 for i := 0; i < bucketCnt; i++ {
557 if b.tophash[i] != emptyRest {
558 n++
559 }
560 }
561 }
562 k := 0
563 for b := b0; b != nil; b = b.overflow(t) {
564 for i := 0; i < bucketCnt; i++ {
565 if k < n && b.tophash[i] == emptyRest {
566 panic("early emptyRest")
567 }
568 if k >= n && b.tophash[i] != emptyRest {
569 panic("late non-emptyRest")
570 }
571 if k == n-1 && b.tophash[i] == emptyOne {
572 panic("last non-emptyRest entry is emptyOne")
573 }
574 k++
575 }
576 }
577 }
578 }
579
580 func RunGetgThreadSwitchTest() {
581
582
583
584
585
586
587 ch := make(chan int)
588 go func(ch chan int) {
589 ch <- 5
590 LockOSThread()
591 }(ch)
592
593 g1 := getg()
594
595
596
597
598
599 <-ch
600
601 g2 := getg()
602 if g1 != g2 {
603 panic("g1 != g2")
604 }
605
606
607
608 g3 := getg()
609 if g1 != g3 {
610 panic("g1 != g3")
611 }
612 }
613
614 const (
615 PageSize = pageSize
616 PallocChunkPages = pallocChunkPages
617 PageAlloc64Bit = pageAlloc64Bit
618 PallocSumBytes = pallocSumBytes
619 )
620
621
622 type PallocSum pallocSum
623
624 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
625 func (m PallocSum) Start() uint { return pallocSum(m).start() }
626 func (m PallocSum) Max() uint { return pallocSum(m).max() }
627 func (m PallocSum) End() uint { return pallocSum(m).end() }
628
629
630 type PallocBits pallocBits
631
632 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
633 return (*pallocBits)(b).find(npages, searchIdx)
634 }
635 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
636 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
637 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
638 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
639
640
641
642 func SummarizeSlow(b *PallocBits) PallocSum {
643 var start, max, end uint
644
645 const N = uint(len(b)) * 64
646 for start < N && (*pageBits)(b).get(start) == 0 {
647 start++
648 }
649 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
650 end++
651 }
652 run := uint(0)
653 for i := uint(0); i < N; i++ {
654 if (*pageBits)(b).get(i) == 0 {
655 run++
656 } else {
657 run = 0
658 }
659 if run > max {
660 max = run
661 }
662 }
663 return PackPallocSum(start, max, end)
664 }
665
666
667 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
668
669
670
671 func DiffPallocBits(a, b *PallocBits) []BitRange {
672 ba := (*pageBits)(a)
673 bb := (*pageBits)(b)
674
675 var d []BitRange
676 base, size := uint(0), uint(0)
677 for i := uint(0); i < uint(len(ba))*64; i++ {
678 if ba.get(i) != bb.get(i) {
679 if size == 0 {
680 base = i
681 }
682 size++
683 } else {
684 if size != 0 {
685 d = append(d, BitRange{base, size})
686 }
687 size = 0
688 }
689 }
690 if size != 0 {
691 d = append(d, BitRange{base, size})
692 }
693 return d
694 }
695
696
697
698
699 func StringifyPallocBits(b *PallocBits, r BitRange) string {
700 str := ""
701 for j := r.I; j < r.I+r.N; j++ {
702 if (*pageBits)(b).get(j) != 0 {
703 str += "1"
704 } else {
705 str += "0"
706 }
707 }
708 return str
709 }
710
711
712 type PallocData pallocData
713
714 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
715 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
716 }
717 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
718 func (d *PallocData) ScavengedSetRange(i, n uint) {
719 (*pallocData)(d).scavenged.setRange(i, n)
720 }
721 func (d *PallocData) PallocBits() *PallocBits {
722 return (*PallocBits)(&(*pallocData)(d).pallocBits)
723 }
724 func (d *PallocData) Scavenged() *PallocBits {
725 return (*PallocBits)(&(*pallocData)(d).scavenged)
726 }
727
728
729 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
730
731
732 type PageCache pageCache
733
734 const PageCachePages = pageCachePages
735
736 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
737 return PageCache(pageCache{base: base, cache: cache, scav: scav})
738 }
739 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
740 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
741 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
742 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
743 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
744 return (*pageCache)(c).alloc(npages)
745 }
746 func (c *PageCache) Flush(s *PageAlloc) {
747 cp := (*pageCache)(c)
748 sp := (*pageAlloc)(s)
749
750 systemstack(func() {
751
752
753 lock(sp.mheapLock)
754 cp.flush(sp)
755 unlock(sp.mheapLock)
756 })
757 }
758
759
760 type ChunkIdx chunkIdx
761
762
763
764 type PageAlloc pageAlloc
765
766 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
767 pp := (*pageAlloc)(p)
768
769 var addr, scav uintptr
770 systemstack(func() {
771
772
773 lock(pp.mheapLock)
774 addr, scav = pp.alloc(npages)
775 unlock(pp.mheapLock)
776 })
777 return addr, scav
778 }
779 func (p *PageAlloc) AllocToCache() PageCache {
780 pp := (*pageAlloc)(p)
781
782 var c PageCache
783 systemstack(func() {
784
785
786 lock(pp.mheapLock)
787 c = PageCache(pp.allocToCache())
788 unlock(pp.mheapLock)
789 })
790 return c
791 }
792 func (p *PageAlloc) Free(base, npages uintptr) {
793 pp := (*pageAlloc)(p)
794
795 systemstack(func() {
796
797
798 lock(pp.mheapLock)
799 pp.free(base, npages, true)
800 unlock(pp.mheapLock)
801 })
802 }
803 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
804 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
805 }
806 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
807 pp := (*pageAlloc)(p)
808 systemstack(func() {
809 r = pp.scavenge(nbytes)
810 })
811 return
812 }
813 func (p *PageAlloc) InUse() []AddrRange {
814 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
815 for _, r := range p.inUse.ranges {
816 ranges = append(ranges, AddrRange{r})
817 }
818 return ranges
819 }
820
821
822 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
823 ci := chunkIdx(i)
824 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
825 }
826
827
828 type AddrRange struct {
829 addrRange
830 }
831
832
833 func MakeAddrRange(base, limit uintptr) AddrRange {
834 return AddrRange{makeAddrRange(base, limit)}
835 }
836
837
838 func (a AddrRange) Base() uintptr {
839 return a.addrRange.base.addr()
840 }
841
842
843 func (a AddrRange) Limit() uintptr {
844 return a.addrRange.limit.addr()
845 }
846
847
848 func (a AddrRange) Equals(b AddrRange) bool {
849 return a == b
850 }
851
852
853 func (a AddrRange) Size() uintptr {
854 return a.addrRange.size()
855 }
856
857
858 type AddrRanges struct {
859 addrRanges
860 mutable bool
861 }
862
863
864
865
866
867
868
869
870
871
872 func NewAddrRanges() AddrRanges {
873 r := addrRanges{}
874 r.init(new(sysMemStat))
875 return AddrRanges{r, true}
876 }
877
878
879
880
881
882
883 func MakeAddrRanges(a ...AddrRange) AddrRanges {
884
885
886
887
888
889 ranges := make([]addrRange, 0, len(a))
890 total := uintptr(0)
891 for _, r := range a {
892 ranges = append(ranges, r.addrRange)
893 total += r.Size()
894 }
895 return AddrRanges{addrRanges{
896 ranges: ranges,
897 totalBytes: total,
898 sysStat: new(sysMemStat),
899 }, false}
900 }
901
902
903
904 func (a *AddrRanges) Ranges() []AddrRange {
905 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
906 for _, r := range a.addrRanges.ranges {
907 result = append(result, AddrRange{r})
908 }
909 return result
910 }
911
912
913
914 func (a *AddrRanges) FindSucc(base uintptr) int {
915 return a.findSucc(base)
916 }
917
918
919
920
921
922 func (a *AddrRanges) Add(r AddrRange) {
923 if !a.mutable {
924 throw("attempt to mutate immutable AddrRanges")
925 }
926 a.add(r.addrRange)
927 }
928
929
930 func (a *AddrRanges) TotalBytes() uintptr {
931 return a.addrRanges.totalBytes
932 }
933
934
935 type BitRange struct {
936 I, N uint
937 }
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
954 p := new(pageAlloc)
955
956
957 p.init(new(mutex), nil)
958 lockInit(p.mheapLock, lockRankMheap)
959 p.test = true
960
961 for i, init := range chunks {
962 addr := chunkBase(chunkIdx(i))
963
964
965 systemstack(func() {
966 lock(p.mheapLock)
967 p.grow(addr, pallocChunkBytes)
968 unlock(p.mheapLock)
969 })
970
971
972 chunk := p.chunkOf(chunkIndex(addr))
973
974
975 chunk.scavenged.clearRange(0, pallocChunkPages)
976
977
978 if scav != nil {
979 if scvg, ok := scav[i]; ok {
980 for _, s := range scvg {
981
982
983 if s.N != 0 {
984 chunk.scavenged.setRange(s.I, s.N)
985 }
986 }
987 }
988 }
989
990
991 for _, s := range init {
992
993
994 if s.N != 0 {
995 chunk.allocRange(s.I, s.N)
996 }
997 }
998
999
1000 systemstack(func() {
1001 lock(p.mheapLock)
1002 p.update(addr, pallocChunkPages, false, false)
1003 unlock(p.mheapLock)
1004 })
1005 }
1006
1007 systemstack(func() {
1008 lock(p.mheapLock)
1009 p.scavengeStartGen()
1010 unlock(p.mheapLock)
1011 })
1012
1013 return (*PageAlloc)(p)
1014 }
1015
1016
1017
1018
1019 func FreePageAlloc(pp *PageAlloc) {
1020 p := (*pageAlloc)(pp)
1021
1022
1023 if pageAlloc64Bit != 0 {
1024 for l := 0; l < summaryLevels; l++ {
1025 sysFree(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes, nil)
1026 }
1027 } else {
1028 resSize := uintptr(0)
1029 for _, s := range p.summary {
1030 resSize += uintptr(cap(s)) * pallocSumBytes
1031 }
1032 sysFree(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize), nil)
1033 }
1034
1035
1036 for i := range p.chunks {
1037 if x := p.chunks[i]; x != nil {
1038 p.chunks[i] = nil
1039
1040 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
1041 }
1042 }
1043 }
1044
1045
1046
1047
1048
1049
1050
1051 var BaseChunkIdx = func() ChunkIdx {
1052 var prefix uintptr
1053 if pageAlloc64Bit != 0 {
1054 prefix = 0xc000
1055 } else {
1056 prefix = 0x100
1057 }
1058 baseAddr := prefix * pallocChunkBytes
1059 if goos.IsAix != 0 {
1060 baseAddr += arenaBaseOffset
1061 }
1062 return ChunkIdx(chunkIndex(baseAddr))
1063 }()
1064
1065
1066
1067 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1068 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1069 }
1070
1071 type BitsMismatch struct {
1072 Base uintptr
1073 Got, Want uint64
1074 }
1075
1076 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1077 ok = true
1078
1079
1080 systemstack(func() {
1081 getg().m.mallocing++
1082
1083
1084 lock(&mheap_.lock)
1085 chunkLoop:
1086 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1087 chunk := mheap_.pages.tryChunkOf(i)
1088 if chunk == nil {
1089 continue
1090 }
1091 for j := 0; j < pallocChunkPages/64; j++ {
1092
1093
1094
1095
1096
1097 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1098 got := chunk.scavenged[j]
1099 if want != got {
1100 ok = false
1101 if n >= len(mismatches) {
1102 break chunkLoop
1103 }
1104 mismatches[n] = BitsMismatch{
1105 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1106 Got: got,
1107 Want: want,
1108 }
1109 n++
1110 }
1111 }
1112 }
1113 unlock(&mheap_.lock)
1114
1115 getg().m.mallocing--
1116 })
1117 return
1118 }
1119
1120 func PageCachePagesLeaked() (leaked uintptr) {
1121 stopTheWorld("PageCachePagesLeaked")
1122
1123
1124 deadp := allp[len(allp):cap(allp)]
1125 for _, p := range deadp {
1126
1127
1128 if p != nil {
1129 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1130 }
1131 }
1132
1133 startTheWorld()
1134 return
1135 }
1136
1137 var Semacquire = semacquire
1138 var Semrelease1 = semrelease1
1139
1140 func SemNwait(addr *uint32) uint32 {
1141 root := semroot(addr)
1142 return atomic.Load(&root.nwait)
1143 }
1144
1145
1146
1147 type MSpan mspan
1148
1149
1150 func AllocMSpan() *MSpan {
1151 var s *mspan
1152 systemstack(func() {
1153 lock(&mheap_.lock)
1154 s = (*mspan)(mheap_.spanalloc.alloc())
1155 unlock(&mheap_.lock)
1156 })
1157 return (*MSpan)(s)
1158 }
1159
1160
1161 func FreeMSpan(s *MSpan) {
1162 systemstack(func() {
1163 lock(&mheap_.lock)
1164 mheap_.spanalloc.free(unsafe.Pointer(s))
1165 unlock(&mheap_.lock)
1166 })
1167 }
1168
1169 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1170 s := (*mspan)(ms)
1171 s.nelems = uintptr(len(bits) * 8)
1172 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1173 result := s.countAlloc()
1174 s.gcmarkBits = nil
1175 return result
1176 }
1177
1178 const (
1179 TimeHistSubBucketBits = timeHistSubBucketBits
1180 TimeHistNumSubBuckets = timeHistNumSubBuckets
1181 TimeHistNumSuperBuckets = timeHistNumSuperBuckets
1182 )
1183
1184 type TimeHistogram timeHistogram
1185
1186
1187
1188
1189 func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
1190 t := (*timeHistogram)(th)
1191 i := bucket*TimeHistNumSubBuckets + subBucket
1192 if i >= uint(len(t.counts)) {
1193 return t.underflow, false
1194 }
1195 return t.counts[i], true
1196 }
1197
1198 func (th *TimeHistogram) Record(duration int64) {
1199 (*timeHistogram)(th).record(duration)
1200 }
1201
1202 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1203
1204 func SetIntArgRegs(a int) int {
1205 lock(&finlock)
1206 old := intArgRegs
1207 if a >= 0 {
1208 intArgRegs = a
1209 }
1210 unlock(&finlock)
1211 return old
1212 }
1213
1214 func FinalizerGAsleep() bool {
1215 lock(&finlock)
1216 result := fingwait
1217 unlock(&finlock)
1218 return result
1219 }
1220
1221
1222
1223
1224 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1225
1226
1227
1228 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1229 return gcTestIsReachable(ptrs...)
1230 }
1231
1232
1233
1234
1235
1236
1237
1238 func GCTestPointerClass(p unsafe.Pointer) string {
1239 return gcTestPointerClass(p)
1240 }
1241
1242 const Raceenabled = raceenabled
1243
1244 const (
1245 GCBackgroundUtilization = gcBackgroundUtilization
1246 GCGoalUtilization = gcGoalUtilization
1247 )
1248
1249 type GCController struct {
1250 gcControllerState
1251 }
1252
1253 func NewGCController(gcPercent int) *GCController {
1254
1255
1256
1257
1258 g := escape(new(GCController)).(*GCController)
1259 g.gcControllerState.test = true
1260 g.init(int32(gcPercent))
1261 return g
1262 }
1263
1264 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1265 c.scannableStackSize = stackSize
1266 c.globalsScan = globalsSize
1267 c.heapLive = c.trigger
1268 c.heapScan += uint64(float64(c.trigger-c.heapMarked) * scannableFrac)
1269 c.startCycle(0, gomaxprocs)
1270 }
1271
1272 func (c *GCController) AssistWorkPerByte() float64 {
1273 return c.assistWorkPerByte.Load()
1274 }
1275
1276 func (c *GCController) HeapGoal() uint64 {
1277 return c.heapGoal
1278 }
1279
1280 func (c *GCController) HeapLive() uint64 {
1281 return c.heapLive
1282 }
1283
1284 func (c *GCController) HeapMarked() uint64 {
1285 return c.heapMarked
1286 }
1287
1288 func (c *GCController) Trigger() uint64 {
1289 return c.trigger
1290 }
1291
1292 type GCControllerReviseDelta struct {
1293 HeapLive int64
1294 HeapScan int64
1295 HeapScanWork int64
1296 StackScanWork int64
1297 GlobalsScanWork int64
1298 }
1299
1300 func (c *GCController) Revise(d GCControllerReviseDelta) {
1301 c.heapLive += uint64(d.HeapLive)
1302 c.heapScan += uint64(d.HeapScan)
1303 c.heapScanWork.Add(d.HeapScanWork)
1304 c.stackScanWork.Add(d.StackScanWork)
1305 c.globalsScanWork.Add(d.GlobalsScanWork)
1306 c.revise()
1307 }
1308
1309 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1310 c.assistTime = assistTime
1311 triggerRatio := c.endCycle(elapsed, gomaxprocs, false)
1312 c.resetLive(bytesMarked)
1313 c.commit(triggerRatio)
1314 }
1315
1316 var escapeSink any
1317
1318
1319 func escape(x any) any {
1320 escapeSink = x
1321 escapeSink = nil
1322 return x
1323 }
1324
1325
1326 func Acquirem() {
1327 acquirem()
1328 }
1329
1330 func Releasem() {
1331 releasem(getg().m)
1332 }
1333
1334 var Timediv = timediv
1335
1336 type PIController struct {
1337 piController
1338 }
1339
1340 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1341 return &PIController{piController{
1342 kp: kp,
1343 ti: ti,
1344 tt: tt,
1345 min: min,
1346 max: max,
1347 }}
1348 }
1349
1350 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1351 return c.piController.next(input, setpoint, period)
1352 }
1353
View as plain text