Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
66
67 const (
68
69
70
71
72 _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
73
74
75 _StackMin = 2048
76
77
78
79 _FixedStack0 = _StackMin + _StackSystem
80 _FixedStack1 = _FixedStack0 - 1
81 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
82 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
83 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
84 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
85 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
86 _FixedStack = _FixedStack6 + 1
87
88
89
90
91
92
93 _StackBig = 4096
94
95
96
97
98
99
100
101 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
102
103
104
105
106 _StackSmall = 128
107
108
109
110 _StackLimit = _StackGuard - _StackSystem - _StackSmall
111 )
112
113 const (
114
115
116
117
118
119 stackDebug = 0
120 stackFromSystem = 0
121 stackFaultOnFree = 0
122 stackPoisonCopy = 0
123 stackNoCache = 0
124
125
126 debugCheckBP = false
127 )
128
129 const (
130 uintptrMask = 1<<(8*goarch.PtrSize) - 1
131
132
133
134
135
136
137
138 stackPreempt = uintptrMask & -1314
139
140
141
142 stackFork = uintptrMask & -1234
143
144
145
146 stackForceMove = uintptrMask & -275
147
148
149 stackPoisonMin = uintptrMask & -4096
150 )
151
152
153
154
155
156 var stackpool [_NumStackOrders]struct {
157 item stackpoolItem
158 _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
159 }
160
161
162 type stackpoolItem struct {
163 mu mutex
164 span mSpanList
165 }
166
167
168 var stackLarge struct {
169 lock mutex
170 free [heapAddrBits - pageShift]mSpanList
171 }
172
173 func stackinit() {
174 if _StackCacheSize&_PageMask != 0 {
175 throw("cache size must be a multiple of page size")
176 }
177 for i := range stackpool {
178 stackpool[i].item.span.init()
179 lockInit(&stackpool[i].item.mu, lockRankStackpool)
180 }
181 for i := range stackLarge.free {
182 stackLarge.free[i].init()
183 lockInit(&stackLarge.lock, lockRankStackLarge)
184 }
185 }
186
187
188 func stacklog2(n uintptr) int {
189 log2 := 0
190 for n > 1 {
191 n >>= 1
192 log2++
193 }
194 return log2
195 }
196
197
198
199 func stackpoolalloc(order uint8) gclinkptr {
200 list := &stackpool[order].item.span
201 s := list.first
202 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
203 if s == nil {
204
205 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
206 if s == nil {
207 throw("out of memory")
208 }
209 if s.allocCount != 0 {
210 throw("bad allocCount")
211 }
212 if s.manualFreeList.ptr() != nil {
213 throw("bad manualFreeList")
214 }
215 osStackAlloc(s)
216 s.elemsize = _FixedStack << order
217 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
218 x := gclinkptr(s.base() + i)
219 x.ptr().next = s.manualFreeList
220 s.manualFreeList = x
221 }
222 list.insert(s)
223 }
224 x := s.manualFreeList
225 if x.ptr() == nil {
226 throw("span has no free stacks")
227 }
228 s.manualFreeList = x.ptr().next
229 s.allocCount++
230 if s.manualFreeList.ptr() == nil {
231
232 list.remove(s)
233 }
234 return x
235 }
236
237
238 func stackpoolfree(x gclinkptr, order uint8) {
239 s := spanOfUnchecked(uintptr(x))
240 if s.state.get() != mSpanManual {
241 throw("freeing stack not in a stack span")
242 }
243 if s.manualFreeList.ptr() == nil {
244
245 stackpool[order].item.span.insert(s)
246 }
247 x.ptr().next = s.manualFreeList
248 s.manualFreeList = x
249 s.allocCount--
250 if gcphase == _GCoff && s.allocCount == 0 {
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266 stackpool[order].item.span.remove(s)
267 s.manualFreeList = 0
268 osStackFree(s)
269 mheap_.freeManual(s, spanAllocStack)
270 }
271 }
272
273
274
275
276
277 func stackcacherefill(c *mcache, order uint8) {
278 if stackDebug >= 1 {
279 print("stackcacherefill order=", order, "\n")
280 }
281
282
283
284 var list gclinkptr
285 var size uintptr
286 lock(&stackpool[order].item.mu)
287 for size < _StackCacheSize/2 {
288 x := stackpoolalloc(order)
289 x.ptr().next = list
290 list = x
291 size += _FixedStack << order
292 }
293 unlock(&stackpool[order].item.mu)
294 c.stackcache[order].list = list
295 c.stackcache[order].size = size
296 }
297
298
299 func stackcacherelease(c *mcache, order uint8) {
300 if stackDebug >= 1 {
301 print("stackcacherelease order=", order, "\n")
302 }
303 x := c.stackcache[order].list
304 size := c.stackcache[order].size
305 lock(&stackpool[order].item.mu)
306 for size > _StackCacheSize/2 {
307 y := x.ptr().next
308 stackpoolfree(x, order)
309 x = y
310 size -= _FixedStack << order
311 }
312 unlock(&stackpool[order].item.mu)
313 c.stackcache[order].list = x
314 c.stackcache[order].size = size
315 }
316
317
318 func stackcache_clear(c *mcache) {
319 if stackDebug >= 1 {
320 print("stackcache clear\n")
321 }
322 for order := uint8(0); order < _NumStackOrders; order++ {
323 lock(&stackpool[order].item.mu)
324 x := c.stackcache[order].list
325 for x.ptr() != nil {
326 y := x.ptr().next
327 stackpoolfree(x, order)
328 x = y
329 }
330 c.stackcache[order].list = 0
331 c.stackcache[order].size = 0
332 unlock(&stackpool[order].item.mu)
333 }
334 }
335
336
337
338
339
340
341
342 func stackalloc(n uint32) stack {
343
344
345
346 thisg := getg()
347 if thisg != thisg.m.g0 {
348 throw("stackalloc not on scheduler stack")
349 }
350 if n&(n-1) != 0 {
351 throw("stack size not a power of 2")
352 }
353 if stackDebug >= 1 {
354 print("stackalloc ", n, "\n")
355 }
356
357 if debug.efence != 0 || stackFromSystem != 0 {
358 n = uint32(alignUp(uintptr(n), physPageSize))
359 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
360 if v == nil {
361 throw("out of memory (stackalloc)")
362 }
363 return stack{uintptr(v), uintptr(v) + uintptr(n)}
364 }
365
366
367
368
369 var v unsafe.Pointer
370 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
371 order := uint8(0)
372 n2 := n
373 for n2 > _FixedStack {
374 order++
375 n2 >>= 1
376 }
377 var x gclinkptr
378 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
379
380
381
382
383 lock(&stackpool[order].item.mu)
384 x = stackpoolalloc(order)
385 unlock(&stackpool[order].item.mu)
386 } else {
387 c := thisg.m.p.ptr().mcache
388 x = c.stackcache[order].list
389 if x.ptr() == nil {
390 stackcacherefill(c, order)
391 x = c.stackcache[order].list
392 }
393 c.stackcache[order].list = x.ptr().next
394 c.stackcache[order].size -= uintptr(n)
395 }
396 v = unsafe.Pointer(x)
397 } else {
398 var s *mspan
399 npage := uintptr(n) >> _PageShift
400 log2npage := stacklog2(npage)
401
402
403 lock(&stackLarge.lock)
404 if !stackLarge.free[log2npage].isEmpty() {
405 s = stackLarge.free[log2npage].first
406 stackLarge.free[log2npage].remove(s)
407 }
408 unlock(&stackLarge.lock)
409
410 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
411
412 if s == nil {
413
414 s = mheap_.allocManual(npage, spanAllocStack)
415 if s == nil {
416 throw("out of memory")
417 }
418 osStackAlloc(s)
419 s.elemsize = uintptr(n)
420 }
421 v = unsafe.Pointer(s.base())
422 }
423
424 if raceenabled {
425 racemalloc(v, uintptr(n))
426 }
427 if msanenabled {
428 msanmalloc(v, uintptr(n))
429 }
430 if asanenabled {
431 asanunpoison(v, uintptr(n))
432 }
433 if stackDebug >= 1 {
434 print(" allocated ", v, "\n")
435 }
436 return stack{uintptr(v), uintptr(v) + uintptr(n)}
437 }
438
439
440
441
442
443
444
445 func stackfree(stk stack) {
446 gp := getg()
447 v := unsafe.Pointer(stk.lo)
448 n := stk.hi - stk.lo
449 if n&(n-1) != 0 {
450 throw("stack not a power of 2")
451 }
452 if stk.lo+n < stk.hi {
453 throw("bad stack size")
454 }
455 if stackDebug >= 1 {
456 println("stackfree", v, n)
457 memclrNoHeapPointers(v, n)
458 }
459 if debug.efence != 0 || stackFromSystem != 0 {
460 if debug.efence != 0 || stackFaultOnFree != 0 {
461 sysFault(v, n)
462 } else {
463 sysFree(v, n, &memstats.stacks_sys)
464 }
465 return
466 }
467 if msanenabled {
468 msanfree(v, n)
469 }
470 if asanenabled {
471 asanpoison(v, n)
472 }
473 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
474 order := uint8(0)
475 n2 := n
476 for n2 > _FixedStack {
477 order++
478 n2 >>= 1
479 }
480 x := gclinkptr(v)
481 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
482 lock(&stackpool[order].item.mu)
483 stackpoolfree(x, order)
484 unlock(&stackpool[order].item.mu)
485 } else {
486 c := gp.m.p.ptr().mcache
487 if c.stackcache[order].size >= _StackCacheSize {
488 stackcacherelease(c, order)
489 }
490 x.ptr().next = c.stackcache[order].list
491 c.stackcache[order].list = x
492 c.stackcache[order].size += n
493 }
494 } else {
495 s := spanOfUnchecked(uintptr(v))
496 if s.state.get() != mSpanManual {
497 println(hex(s.base()), v)
498 throw("bad span state")
499 }
500 if gcphase == _GCoff {
501
502
503 osStackFree(s)
504 mheap_.freeManual(s, spanAllocStack)
505 } else {
506
507
508
509
510
511 log2npage := stacklog2(s.npages)
512 lock(&stackLarge.lock)
513 stackLarge.free[log2npage].insert(s)
514 unlock(&stackLarge.lock)
515 }
516 }
517 }
518
519 var maxstacksize uintptr = 1 << 20
520
521 var maxstackceiling = maxstacksize
522
523 var ptrnames = []string{
524 0: "scalar",
525 1: "ptr",
526 }
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556 type adjustinfo struct {
557 old stack
558 delta uintptr
559 cache pcvalueCache
560
561
562 sghi uintptr
563 }
564
565
566
567 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
568 pp := (*uintptr)(vpp)
569 p := *pp
570 if stackDebug >= 4 {
571 print(" ", pp, ":", hex(p), "\n")
572 }
573 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
574 *pp = p + adjinfo.delta
575 if stackDebug >= 3 {
576 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
577 }
578 }
579 }
580
581
582
583 type bitvector struct {
584 n int32
585 bytedata *uint8
586 }
587
588
589
590
591
592 func (bv *bitvector) ptrbit(i uintptr) uint8 {
593 b := *(addb(bv.bytedata, i/8))
594 return (b >> (i % 8)) & 1
595 }
596
597
598
599 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
600 minp := adjinfo.old.lo
601 maxp := adjinfo.old.hi
602 delta := adjinfo.delta
603 num := uintptr(bv.n)
604
605
606
607
608
609 useCAS := uintptr(scanp) < adjinfo.sghi
610 for i := uintptr(0); i < num; i += 8 {
611 if stackDebug >= 4 {
612 for j := uintptr(0); j < 8; j++ {
613 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
614 }
615 }
616 b := *(addb(bv.bytedata, i/8))
617 for b != 0 {
618 j := uintptr(sys.Ctz8(b))
619 b &= b - 1
620 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
621 retry:
622 p := *pp
623 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
624
625
626 getg().m.traceback = 2
627 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
628 throw("invalid pointer found on stack")
629 }
630 if minp <= p && p < maxp {
631 if stackDebug >= 3 {
632 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
633 }
634 if useCAS {
635 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
636 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
637 goto retry
638 }
639 } else {
640 *pp = p + delta
641 }
642 }
643 }
644 }
645 }
646
647
648 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
649 adjinfo := (*adjustinfo)(arg)
650 if frame.continpc == 0 {
651
652 return true
653 }
654 f := frame.fn
655 if stackDebug >= 2 {
656 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
657 }
658 if f.funcID == funcID_systemstack_switch {
659
660
661
662 return true
663 }
664
665 locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
666
667
668 if locals.n > 0 {
669 size := uintptr(locals.n) * goarch.PtrSize
670 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
671 }
672
673
674
675 if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
676 if stackDebug >= 3 {
677 print(" saved bp\n")
678 }
679 if debugCheckBP {
680
681
682 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
683 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
684 println("runtime: found invalid frame pointer")
685 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
686 throw("bad frame pointer")
687 }
688 }
689 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
690 }
691
692
693 if args.n > 0 {
694 if stackDebug >= 3 {
695 print(" args\n")
696 }
697 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
698 }
699
700
701
702 if frame.varp != 0 {
703 for i := range objs {
704 obj := &objs[i]
705 off := obj.off
706 base := frame.varp
707 if off >= 0 {
708 base = frame.argp
709 }
710 p := base + uintptr(off)
711 if p < frame.sp {
712
713
714
715 continue
716 }
717 ptrdata := obj.ptrdata()
718 gcdata := obj.gcdata()
719 var s *mspan
720 if obj.useGCProg() {
721
722 s = materializeGCProg(ptrdata, gcdata)
723 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
724 }
725 for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
726 if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
727 adjustpointer(adjinfo, unsafe.Pointer(p+i))
728 }
729 }
730 if s != nil {
731 dematerializeGCProg(s)
732 }
733 }
734 }
735
736 return true
737 }
738
739 func adjustctxt(gp *g, adjinfo *adjustinfo) {
740 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
741 if !framepointer_enabled {
742 return
743 }
744 if debugCheckBP {
745 bp := gp.sched.bp
746 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
747 println("runtime: found invalid top frame pointer")
748 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
749 throw("bad top frame pointer")
750 }
751 }
752 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
753 }
754
755 func adjustdefers(gp *g, adjinfo *adjustinfo) {
756
757
758
759 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
760 for d := gp._defer; d != nil; d = d.link {
761 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
762 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
763 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
764 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
765 adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
766 adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
767 }
768 }
769
770 func adjustpanics(gp *g, adjinfo *adjustinfo) {
771
772
773 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
774 }
775
776 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
777
778
779 for s := gp.waiting; s != nil; s = s.waitlink {
780 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
781 }
782 }
783
784 func fillstack(stk stack, b byte) {
785 for p := stk.lo; p < stk.hi; p++ {
786 *(*byte)(unsafe.Pointer(p)) = b
787 }
788 }
789
790 func findsghi(gp *g, stk stack) uintptr {
791 var sghi uintptr
792 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
793 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
794 if stk.lo <= p && p < stk.hi && p > sghi {
795 sghi = p
796 }
797 }
798 return sghi
799 }
800
801
802
803
804 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
805 if gp.waiting == nil {
806 return 0
807 }
808
809
810 var lastc *hchan
811 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
812 if sg.c != lastc {
813
814
815
816
817
818
819
820
821
822 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
823 }
824 lastc = sg.c
825 }
826
827
828 adjustsudogs(gp, adjinfo)
829
830
831
832
833 var sgsize uintptr
834 if adjinfo.sghi != 0 {
835 oldBot := adjinfo.old.hi - used
836 newBot := oldBot + adjinfo.delta
837 sgsize = adjinfo.sghi - oldBot
838 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
839 }
840
841
842 lastc = nil
843 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
844 if sg.c != lastc {
845 unlock(&sg.c.lock)
846 }
847 lastc = sg.c
848 }
849
850 return sgsize
851 }
852
853
854
855 func copystack(gp *g, newsize uintptr) {
856 if gp.syscallsp != 0 {
857 throw("stack growth not allowed in system call")
858 }
859 old := gp.stack
860 if old.lo == 0 {
861 throw("nil stackbase")
862 }
863 used := old.hi - gp.sched.sp
864
865
866
867
868 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
869
870
871 new := stackalloc(uint32(newsize))
872 if stackPoisonCopy != 0 {
873 fillstack(new, 0xfd)
874 }
875 if stackDebug >= 1 {
876 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
877 }
878
879
880 var adjinfo adjustinfo
881 adjinfo.old = old
882 adjinfo.delta = new.hi - old.hi
883
884
885 ncopy := used
886 if !gp.activeStackChans {
887 if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
888
889
890
891
892 throw("racy sudog adjustment due to parking on channel")
893 }
894 adjustsudogs(gp, &adjinfo)
895 } else {
896
897
898
899
900
901
902
903 adjinfo.sghi = findsghi(gp, old)
904
905
906
907 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
908 }
909
910
911 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
912
913
914
915
916 adjustctxt(gp, &adjinfo)
917 adjustdefers(gp, &adjinfo)
918 adjustpanics(gp, &adjinfo)
919 if adjinfo.sghi != 0 {
920 adjinfo.sghi += adjinfo.delta
921 }
922
923
924 gp.stack = new
925 gp.stackguard0 = new.lo + _StackGuard
926 gp.sched.sp = new.hi - used
927 gp.stktopsp += adjinfo.delta
928
929
930 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
931
932
933 if stackPoisonCopy != 0 {
934 fillstack(old, 0xfc)
935 }
936 stackfree(old)
937 }
938
939
940 func round2(x int32) int32 {
941 s := uint(0)
942 for 1<<s < x {
943 s++
944 }
945 return 1 << s
946 }
947
948
949
950
951
952
953
954
955
956
957
958
959
960 func newstack() {
961 thisg := getg()
962
963 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
964 throw("stack growth after fork")
965 }
966 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
967 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
968 morebuf := thisg.m.morebuf
969 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
970 throw("runtime: wrong goroutine in newstack")
971 }
972
973 gp := thisg.m.curg
974
975 if thisg.m.curg.throwsplit {
976
977 morebuf := thisg.m.morebuf
978 gp.syscallsp = morebuf.sp
979 gp.syscallpc = morebuf.pc
980 pcname, pcoff := "(unknown)", uintptr(0)
981 f := findfunc(gp.sched.pc)
982 if f.valid() {
983 pcname = funcname(f)
984 pcoff = gp.sched.pc - f.entry()
985 }
986 print("runtime: newstack at ", pcname, "+", hex(pcoff),
987 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
988 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
989 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
990
991 thisg.m.traceback = 2
992 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
993 throw("runtime: stack split at bad time")
994 }
995
996 morebuf := thisg.m.morebuf
997 thisg.m.morebuf.pc = 0
998 thisg.m.morebuf.lr = 0
999 thisg.m.morebuf.sp = 0
1000 thisg.m.morebuf.g = 0
1001
1002
1003
1004
1005 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019 preempt := stackguard0 == stackPreempt
1020 if preempt {
1021 if !canPreemptM(thisg.m) {
1022
1023
1024 gp.stackguard0 = gp.stack.lo + _StackGuard
1025 gogo(&gp.sched)
1026 }
1027 }
1028
1029 if gp.stack.lo == 0 {
1030 throw("missing stack in newstack")
1031 }
1032 sp := gp.sched.sp
1033 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1034
1035 sp -= goarch.PtrSize
1036 }
1037 if stackDebug >= 1 || sp < gp.stack.lo {
1038 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1039 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1040 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1041 }
1042 if sp < gp.stack.lo {
1043 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1044 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1045 throw("runtime: split stack overflow")
1046 }
1047
1048 if preempt {
1049 if gp == thisg.m.g0 {
1050 throw("runtime: preempt g0")
1051 }
1052 if thisg.m.p == 0 && thisg.m.locks == 0 {
1053 throw("runtime: g is running but p is not")
1054 }
1055
1056 if gp.preemptShrink {
1057
1058
1059 gp.preemptShrink = false
1060 shrinkstack(gp)
1061 }
1062
1063 if gp.preemptStop {
1064 preemptPark(gp)
1065 }
1066
1067
1068 gopreempt_m(gp)
1069 }
1070
1071
1072 oldsize := gp.stack.hi - gp.stack.lo
1073 newsize := oldsize * 2
1074
1075
1076
1077
1078 if f := findfunc(gp.sched.pc); f.valid() {
1079 max := uintptr(funcMaxSPDelta(f))
1080 needed := max + _StackGuard
1081 used := gp.stack.hi - gp.sched.sp
1082 for newsize-used < needed {
1083 newsize *= 2
1084 }
1085 }
1086
1087 if stackguard0 == stackForceMove {
1088
1089
1090
1091 newsize = oldsize
1092 }
1093
1094 if newsize > maxstacksize || newsize > maxstackceiling {
1095 if maxstacksize < maxstackceiling {
1096 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1097 } else {
1098 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1099 }
1100 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1101 throw("stack overflow")
1102 }
1103
1104
1105
1106 casgstatus(gp, _Grunning, _Gcopystack)
1107
1108
1109
1110 copystack(gp, newsize)
1111 if stackDebug >= 1 {
1112 print("stack grow done\n")
1113 }
1114 casgstatus(gp, _Gcopystack, _Grunning)
1115 gogo(&gp.sched)
1116 }
1117
1118
1119 func nilfunc() {
1120 *(*uint8)(nil) = 0
1121 }
1122
1123
1124
1125 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1126 var fn unsafe.Pointer
1127 if fv != nil {
1128 fn = unsafe.Pointer(fv.fn)
1129 } else {
1130 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1131 }
1132 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1133 }
1134
1135
1136
1137
1138 func isShrinkStackSafe(gp *g) bool {
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
1152 }
1153
1154
1155
1156
1157
1158 func shrinkstack(gp *g) {
1159 if gp.stack.lo == 0 {
1160 throw("missing stack in shrinkstack")
1161 }
1162 if s := readgstatus(gp); s&_Gscan == 0 {
1163
1164
1165
1166 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1167
1168 throw("bad status in shrinkstack")
1169 }
1170 }
1171 if !isShrinkStackSafe(gp) {
1172 throw("shrinkstack at bad time")
1173 }
1174
1175
1176
1177 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1178 throw("shrinking stack in libcall")
1179 }
1180
1181 if debug.gcshrinkstackoff > 0 {
1182 return
1183 }
1184 f := findfunc(gp.startpc)
1185 if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1186
1187
1188 return
1189 }
1190
1191 oldsize := gp.stack.hi - gp.stack.lo
1192 newsize := oldsize / 2
1193
1194
1195 if newsize < _FixedStack {
1196 return
1197 }
1198
1199
1200
1201
1202
1203 avail := gp.stack.hi - gp.stack.lo
1204 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1205 return
1206 }
1207
1208 if stackDebug > 0 {
1209 print("shrinking stack ", oldsize, "->", newsize, "\n")
1210 }
1211
1212 copystack(gp, newsize)
1213 }
1214
1215
1216 func freeStackSpans() {
1217
1218 for order := range stackpool {
1219 lock(&stackpool[order].item.mu)
1220 list := &stackpool[order].item.span
1221 for s := list.first; s != nil; {
1222 next := s.next
1223 if s.allocCount == 0 {
1224 list.remove(s)
1225 s.manualFreeList = 0
1226 osStackFree(s)
1227 mheap_.freeManual(s, spanAllocStack)
1228 }
1229 s = next
1230 }
1231 unlock(&stackpool[order].item.mu)
1232 }
1233
1234
1235 lock(&stackLarge.lock)
1236 for i := range stackLarge.free {
1237 for s := stackLarge.free[i].first; s != nil; {
1238 next := s.next
1239 stackLarge.free[i].remove(s)
1240 osStackFree(s)
1241 mheap_.freeManual(s, spanAllocStack)
1242 s = next
1243 }
1244 }
1245 unlock(&stackLarge.lock)
1246 }
1247
1248
1249
1250 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
1251 targetpc := frame.continpc
1252 if targetpc == 0 {
1253
1254 return
1255 }
1256
1257 f := frame.fn
1258 pcdata := int32(-1)
1259 if targetpc != f.entry() {
1260
1261
1262
1263
1264 targetpc--
1265 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
1266 }
1267 if pcdata == -1 {
1268
1269
1270
1271 pcdata = 0
1272 }
1273
1274
1275 size := frame.varp - frame.sp
1276 var minsize uintptr
1277 switch goarch.ArchFamily {
1278 case goarch.ARM64:
1279 minsize = sys.StackAlign
1280 default:
1281 minsize = sys.MinFrameSize
1282 }
1283 if size > minsize {
1284 stackid := pcdata
1285 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
1286 if stkmap == nil || stkmap.n <= 0 {
1287 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
1288 throw("missing stackmap")
1289 }
1290
1291 if stkmap.nbit > 0 {
1292 if stackid < 0 || stackid >= stkmap.n {
1293
1294 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1295 throw("bad symbol table")
1296 }
1297 locals = stackmapdata(stkmap, stackid)
1298 if stackDebug >= 3 && debug {
1299 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
1300 }
1301 } else if stackDebug >= 3 && debug {
1302 print(" no locals to adjust\n")
1303 }
1304 }
1305
1306
1307 if frame.arglen > 0 {
1308 if frame.argmap != nil {
1309
1310
1311
1312 args = *frame.argmap
1313 n := int32(frame.arglen / goarch.PtrSize)
1314 if n < args.n {
1315 args.n = n
1316 }
1317 } else {
1318 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
1319 if stackmap == nil || stackmap.n <= 0 {
1320 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
1321 throw("missing stackmap")
1322 }
1323 if pcdata < 0 || pcdata >= stackmap.n {
1324
1325 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1326 throw("bad symbol table")
1327 }
1328 if stackmap.nbit > 0 {
1329 args = stackmapdata(stackmap, pcdata)
1330 }
1331 }
1332 }
1333
1334
1335 if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le") && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
1336
1337
1338
1339
1340 objs = methodValueCallFrameObjs[:]
1341 } else {
1342 p := funcdata(f, _FUNCDATA_StackObjects)
1343 if p != nil {
1344 n := *(*uintptr)(p)
1345 p = add(p, goarch.PtrSize)
1346 *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
1347
1348
1349
1350
1351
1352 }
1353 }
1354
1355 return
1356 }
1357
1358 var methodValueCallFrameObjs [1]stackObjectRecord
1359
1360 func stkobjinit() {
1361 var abiRegArgsEface any = abi.RegArgs{}
1362 abiRegArgsType := efaceOf(&abiRegArgsEface)._type
1363 if abiRegArgsType.kind&kindGCProg != 0 {
1364 throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
1365 }
1366
1367
1368 ptr := uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
1369 var mod *moduledata
1370 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1371 if datap.gofunc <= ptr && ptr < datap.end {
1372 mod = datap
1373 break
1374 }
1375 }
1376 if mod == nil {
1377 throw("methodValueCallFrameObjs is not in a module")
1378 }
1379 methodValueCallFrameObjs[0] = stackObjectRecord{
1380 off: -int32(alignUp(abiRegArgsType.size, 8)),
1381 size: int32(abiRegArgsType.size),
1382 _ptrdata: int32(abiRegArgsType.ptrdata),
1383 gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.gcdata)) - mod.rodata),
1384 }
1385 }
1386
1387
1388
1389 type stackObjectRecord struct {
1390
1391
1392
1393 off int32
1394 size int32
1395 _ptrdata int32
1396 gcdataoff uint32
1397 }
1398
1399 func (r *stackObjectRecord) useGCProg() bool {
1400 return r._ptrdata < 0
1401 }
1402
1403 func (r *stackObjectRecord) ptrdata() uintptr {
1404 x := r._ptrdata
1405 if x < 0 {
1406 return uintptr(-x)
1407 }
1408 return uintptr(x)
1409 }
1410
1411
1412 func (r *stackObjectRecord) gcdata() *byte {
1413 ptr := uintptr(unsafe.Pointer(r))
1414 var mod *moduledata
1415 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1416 if datap.gofunc <= ptr && ptr < datap.end {
1417 mod = datap
1418 break
1419 }
1420 }
1421
1422
1423
1424 res := mod.rodata + uintptr(r.gcdataoff)
1425 return (*byte)(unsafe.Pointer(res))
1426 }
1427
1428
1429
1430
1431
1432 func morestackc() {
1433 throw("attempt to execute system stack code on user stack")
1434 }
1435
View as plain text