Source file src/runtime/stack.go

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/cpu"
    10  	"internal/goarch"
    11  	"internal/goos"
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  /*
    18  Stack layout parameters.
    19  Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    20  
    21  The per-goroutine g->stackguard is set to point StackGuard bytes
    22  above the bottom of the stack.  Each function compares its stack
    23  pointer against g->stackguard to check for overflow.  To cut one
    24  instruction from the check sequence for functions with tiny frames,
    25  the stack is allowed to protrude StackSmall bytes below the stack
    26  guard.  Functions with large frames don't bother with the check and
    27  always call morestack.  The sequences are (for amd64, others are
    28  similar):
    29  
    30  	guard = g->stackguard
    31  	frame = function's stack frame size
    32  	argsize = size of function arguments (call + return)
    33  
    34  	stack frame size <= StackSmall:
    35  		CMPQ guard, SP
    36  		JHI 3(PC)
    37  		MOVQ m->morearg, $(argsize << 32)
    38  		CALL morestack(SB)
    39  
    40  	stack frame size > StackSmall but < StackBig
    41  		LEAQ (frame-StackSmall)(SP), R0
    42  		CMPQ guard, R0
    43  		JHI 3(PC)
    44  		MOVQ m->morearg, $(argsize << 32)
    45  		CALL morestack(SB)
    46  
    47  	stack frame size >= StackBig:
    48  		MOVQ m->morearg, $((argsize << 32) | frame)
    49  		CALL morestack(SB)
    50  
    51  The bottom StackGuard - StackSmall bytes are important: there has
    52  to be enough room to execute functions that refuse to check for
    53  stack overflow, either because they need to be adjacent to the
    54  actual caller's frame (deferproc) or because they handle the imminent
    55  stack overflow (morestack).
    56  
    57  For example, deferproc might call malloc, which does one of the
    58  above checks (without allocating a full frame), which might trigger
    59  a call to morestack.  This sequence needs to fit in the bottom
    60  section of the stack.  On amd64, morestack's frame is 40 bytes, and
    61  deferproc's frame is 56 bytes.  That fits well within the
    62  StackGuard - StackSmall bytes at the bottom.
    63  The linkers explore all possible call traces involving non-splitting
    64  functions to make sure that this limit cannot be violated.
    65  */
    66  
    67  const (
    68  	// StackSystem is a number of additional bytes to add
    69  	// to each stack below the usual guard area for OS-specific
    70  	// purposes like signal handling. Used on Windows, Plan 9,
    71  	// and iOS because they do not use a separate stack.
    72  	_StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
    73  
    74  	// The minimum size of stack used by Go code
    75  	_StackMin = 2048
    76  
    77  	// The minimum stack size to allocate.
    78  	// The hackery here rounds FixedStack0 up to a power of 2.
    79  	_FixedStack0 = _StackMin + _StackSystem
    80  	_FixedStack1 = _FixedStack0 - 1
    81  	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
    82  	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
    83  	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
    84  	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
    85  	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
    86  	_FixedStack  = _FixedStack6 + 1
    87  
    88  	// Functions that need frames bigger than this use an extra
    89  	// instruction to do the stack split check, to avoid overflow
    90  	// in case SP - framesize wraps below zero.
    91  	// This value can be no bigger than the size of the unmapped
    92  	// space at zero.
    93  	_StackBig = 4096
    94  
    95  	// The stack guard is a pointer this many bytes above the
    96  	// bottom of the stack.
    97  	//
    98  	// The guard leaves enough room for one _StackSmall frame plus
    99  	// a _StackLimit chain of NOSPLIT calls plus _StackSystem
   100  	// bytes for the OS.
   101  	_StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
   102  
   103  	// After a stack split check the SP is allowed to be this
   104  	// many bytes below the stack guard. This saves an instruction
   105  	// in the checking sequence for tiny frames.
   106  	_StackSmall = 128
   107  
   108  	// The maximum number of bytes that a chain of NOSPLIT
   109  	// functions can use.
   110  	_StackLimit = _StackGuard - _StackSystem - _StackSmall
   111  )
   112  
   113  const (
   114  	// stackDebug == 0: no logging
   115  	//            == 1: logging of per-stack operations
   116  	//            == 2: logging of per-frame operations
   117  	//            == 3: logging of per-word updates
   118  	//            == 4: logging of per-word reads
   119  	stackDebug       = 0
   120  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   121  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   122  	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   123  	stackNoCache     = 0 // disable per-P small stack caches
   124  
   125  	// check the BP links during traceback.
   126  	debugCheckBP = false
   127  )
   128  
   129  const (
   130  	uintptrMask = 1<<(8*goarch.PtrSize) - 1
   131  
   132  	// The values below can be stored to g.stackguard0 to force
   133  	// the next stack check to fail.
   134  	// These are all larger than any real SP.
   135  
   136  	// Goroutine preemption request.
   137  	// 0xfffffade in hex.
   138  	stackPreempt = uintptrMask & -1314
   139  
   140  	// Thread is forking. Causes a split stack check failure.
   141  	// 0xfffffb2e in hex.
   142  	stackFork = uintptrMask & -1234
   143  
   144  	// Force a stack movement. Used for debugging.
   145  	// 0xfffffeed in hex.
   146  	stackForceMove = uintptrMask & -275
   147  
   148  	// stackPoisonMin is the lowest allowed stack poison value.
   149  	stackPoisonMin = uintptrMask & -4096
   150  )
   151  
   152  // Global pool of spans that have free stacks.
   153  // Stacks are assigned an order according to size.
   154  //     order = log_2(size/FixedStack)
   155  // There is a free list for each order.
   156  var stackpool [_NumStackOrders]struct {
   157  	item stackpoolItem
   158  	_    [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
   159  }
   160  
   161  //go:notinheap
   162  type stackpoolItem struct {
   163  	mu   mutex
   164  	span mSpanList
   165  }
   166  
   167  // Global pool of large stack spans.
   168  var stackLarge struct {
   169  	lock mutex
   170  	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
   171  }
   172  
   173  func stackinit() {
   174  	if _StackCacheSize&_PageMask != 0 {
   175  		throw("cache size must be a multiple of page size")
   176  	}
   177  	for i := range stackpool {
   178  		stackpool[i].item.span.init()
   179  		lockInit(&stackpool[i].item.mu, lockRankStackpool)
   180  	}
   181  	for i := range stackLarge.free {
   182  		stackLarge.free[i].init()
   183  		lockInit(&stackLarge.lock, lockRankStackLarge)
   184  	}
   185  }
   186  
   187  // stacklog2 returns ⌊log_2(n)⌋.
   188  func stacklog2(n uintptr) int {
   189  	log2 := 0
   190  	for n > 1 {
   191  		n >>= 1
   192  		log2++
   193  	}
   194  	return log2
   195  }
   196  
   197  // Allocates a stack from the free pool. Must be called with
   198  // stackpool[order].item.mu held.
   199  func stackpoolalloc(order uint8) gclinkptr {
   200  	list := &stackpool[order].item.span
   201  	s := list.first
   202  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   203  	if s == nil {
   204  		// no free stacks. Allocate another span worth.
   205  		s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
   206  		if s == nil {
   207  			throw("out of memory")
   208  		}
   209  		if s.allocCount != 0 {
   210  			throw("bad allocCount")
   211  		}
   212  		if s.manualFreeList.ptr() != nil {
   213  			throw("bad manualFreeList")
   214  		}
   215  		osStackAlloc(s)
   216  		s.elemsize = _FixedStack << order
   217  		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
   218  			x := gclinkptr(s.base() + i)
   219  			x.ptr().next = s.manualFreeList
   220  			s.manualFreeList = x
   221  		}
   222  		list.insert(s)
   223  	}
   224  	x := s.manualFreeList
   225  	if x.ptr() == nil {
   226  		throw("span has no free stacks")
   227  	}
   228  	s.manualFreeList = x.ptr().next
   229  	s.allocCount++
   230  	if s.manualFreeList.ptr() == nil {
   231  		// all stacks in s are allocated.
   232  		list.remove(s)
   233  	}
   234  	return x
   235  }
   236  
   237  // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
   238  func stackpoolfree(x gclinkptr, order uint8) {
   239  	s := spanOfUnchecked(uintptr(x))
   240  	if s.state.get() != mSpanManual {
   241  		throw("freeing stack not in a stack span")
   242  	}
   243  	if s.manualFreeList.ptr() == nil {
   244  		// s will now have a free stack
   245  		stackpool[order].item.span.insert(s)
   246  	}
   247  	x.ptr().next = s.manualFreeList
   248  	s.manualFreeList = x
   249  	s.allocCount--
   250  	if gcphase == _GCoff && s.allocCount == 0 {
   251  		// Span is completely free. Return it to the heap
   252  		// immediately if we're sweeping.
   253  		//
   254  		// If GC is active, we delay the free until the end of
   255  		// GC to avoid the following type of situation:
   256  		//
   257  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   258  		// 2) The stack that pointer points to is copied
   259  		// 3) The old stack is freed
   260  		// 4) The containing span is marked free
   261  		// 5) GC attempts to mark the SudoG.elem pointer. The
   262  		//    marking fails because the pointer looks like a
   263  		//    pointer into a free span.
   264  		//
   265  		// By not freeing, we prevent step #4 until GC is done.
   266  		stackpool[order].item.span.remove(s)
   267  		s.manualFreeList = 0
   268  		osStackFree(s)
   269  		mheap_.freeManual(s, spanAllocStack)
   270  	}
   271  }
   272  
   273  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   274  // The pool is required to prevent unlimited growth of per-thread caches.
   275  //
   276  //go:systemstack
   277  func stackcacherefill(c *mcache, order uint8) {
   278  	if stackDebug >= 1 {
   279  		print("stackcacherefill order=", order, "\n")
   280  	}
   281  
   282  	// Grab some stacks from the global cache.
   283  	// Grab half of the allowed capacity (to prevent thrashing).
   284  	var list gclinkptr
   285  	var size uintptr
   286  	lock(&stackpool[order].item.mu)
   287  	for size < _StackCacheSize/2 {
   288  		x := stackpoolalloc(order)
   289  		x.ptr().next = list
   290  		list = x
   291  		size += _FixedStack << order
   292  	}
   293  	unlock(&stackpool[order].item.mu)
   294  	c.stackcache[order].list = list
   295  	c.stackcache[order].size = size
   296  }
   297  
   298  //go:systemstack
   299  func stackcacherelease(c *mcache, order uint8) {
   300  	if stackDebug >= 1 {
   301  		print("stackcacherelease order=", order, "\n")
   302  	}
   303  	x := c.stackcache[order].list
   304  	size := c.stackcache[order].size
   305  	lock(&stackpool[order].item.mu)
   306  	for size > _StackCacheSize/2 {
   307  		y := x.ptr().next
   308  		stackpoolfree(x, order)
   309  		x = y
   310  		size -= _FixedStack << order
   311  	}
   312  	unlock(&stackpool[order].item.mu)
   313  	c.stackcache[order].list = x
   314  	c.stackcache[order].size = size
   315  }
   316  
   317  //go:systemstack
   318  func stackcache_clear(c *mcache) {
   319  	if stackDebug >= 1 {
   320  		print("stackcache clear\n")
   321  	}
   322  	for order := uint8(0); order < _NumStackOrders; order++ {
   323  		lock(&stackpool[order].item.mu)
   324  		x := c.stackcache[order].list
   325  		for x.ptr() != nil {
   326  			y := x.ptr().next
   327  			stackpoolfree(x, order)
   328  			x = y
   329  		}
   330  		c.stackcache[order].list = 0
   331  		c.stackcache[order].size = 0
   332  		unlock(&stackpool[order].item.mu)
   333  	}
   334  }
   335  
   336  // stackalloc allocates an n byte stack.
   337  //
   338  // stackalloc must run on the system stack because it uses per-P
   339  // resources and must not split the stack.
   340  //
   341  //go:systemstack
   342  func stackalloc(n uint32) stack {
   343  	// Stackalloc must be called on scheduler stack, so that we
   344  	// never try to grow the stack during the code that stackalloc runs.
   345  	// Doing so would cause a deadlock (issue 1547).
   346  	thisg := getg()
   347  	if thisg != thisg.m.g0 {
   348  		throw("stackalloc not on scheduler stack")
   349  	}
   350  	if n&(n-1) != 0 {
   351  		throw("stack size not a power of 2")
   352  	}
   353  	if stackDebug >= 1 {
   354  		print("stackalloc ", n, "\n")
   355  	}
   356  
   357  	if debug.efence != 0 || stackFromSystem != 0 {
   358  		n = uint32(alignUp(uintptr(n), physPageSize))
   359  		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
   360  		if v == nil {
   361  			throw("out of memory (stackalloc)")
   362  		}
   363  		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   364  	}
   365  
   366  	// Small stacks are allocated with a fixed-size free-list allocator.
   367  	// If we need a stack of a bigger size, we fall back on allocating
   368  	// a dedicated span.
   369  	var v unsafe.Pointer
   370  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   371  		order := uint8(0)
   372  		n2 := n
   373  		for n2 > _FixedStack {
   374  			order++
   375  			n2 >>= 1
   376  		}
   377  		var x gclinkptr
   378  		if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
   379  			// thisg.m.p == 0 can happen in the guts of exitsyscall
   380  			// or procresize. Just get a stack from the global pool.
   381  			// Also don't touch stackcache during gc
   382  			// as it's flushed concurrently.
   383  			lock(&stackpool[order].item.mu)
   384  			x = stackpoolalloc(order)
   385  			unlock(&stackpool[order].item.mu)
   386  		} else {
   387  			c := thisg.m.p.ptr().mcache
   388  			x = c.stackcache[order].list
   389  			if x.ptr() == nil {
   390  				stackcacherefill(c, order)
   391  				x = c.stackcache[order].list
   392  			}
   393  			c.stackcache[order].list = x.ptr().next
   394  			c.stackcache[order].size -= uintptr(n)
   395  		}
   396  		v = unsafe.Pointer(x)
   397  	} else {
   398  		var s *mspan
   399  		npage := uintptr(n) >> _PageShift
   400  		log2npage := stacklog2(npage)
   401  
   402  		// Try to get a stack from the large stack cache.
   403  		lock(&stackLarge.lock)
   404  		if !stackLarge.free[log2npage].isEmpty() {
   405  			s = stackLarge.free[log2npage].first
   406  			stackLarge.free[log2npage].remove(s)
   407  		}
   408  		unlock(&stackLarge.lock)
   409  
   410  		lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   411  
   412  		if s == nil {
   413  			// Allocate a new stack from the heap.
   414  			s = mheap_.allocManual(npage, spanAllocStack)
   415  			if s == nil {
   416  				throw("out of memory")
   417  			}
   418  			osStackAlloc(s)
   419  			s.elemsize = uintptr(n)
   420  		}
   421  		v = unsafe.Pointer(s.base())
   422  	}
   423  
   424  	if raceenabled {
   425  		racemalloc(v, uintptr(n))
   426  	}
   427  	if msanenabled {
   428  		msanmalloc(v, uintptr(n))
   429  	}
   430  	if asanenabled {
   431  		asanunpoison(v, uintptr(n))
   432  	}
   433  	if stackDebug >= 1 {
   434  		print("  allocated ", v, "\n")
   435  	}
   436  	return stack{uintptr(v), uintptr(v) + uintptr(n)}
   437  }
   438  
   439  // stackfree frees an n byte stack allocation at stk.
   440  //
   441  // stackfree must run on the system stack because it uses per-P
   442  // resources and must not split the stack.
   443  //
   444  //go:systemstack
   445  func stackfree(stk stack) {
   446  	gp := getg()
   447  	v := unsafe.Pointer(stk.lo)
   448  	n := stk.hi - stk.lo
   449  	if n&(n-1) != 0 {
   450  		throw("stack not a power of 2")
   451  	}
   452  	if stk.lo+n < stk.hi {
   453  		throw("bad stack size")
   454  	}
   455  	if stackDebug >= 1 {
   456  		println("stackfree", v, n)
   457  		memclrNoHeapPointers(v, n) // for testing, clobber stack data
   458  	}
   459  	if debug.efence != 0 || stackFromSystem != 0 {
   460  		if debug.efence != 0 || stackFaultOnFree != 0 {
   461  			sysFault(v, n)
   462  		} else {
   463  			sysFree(v, n, &memstats.stacks_sys)
   464  		}
   465  		return
   466  	}
   467  	if msanenabled {
   468  		msanfree(v, n)
   469  	}
   470  	if asanenabled {
   471  		asanpoison(v, n)
   472  	}
   473  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   474  		order := uint8(0)
   475  		n2 := n
   476  		for n2 > _FixedStack {
   477  			order++
   478  			n2 >>= 1
   479  		}
   480  		x := gclinkptr(v)
   481  		if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
   482  			lock(&stackpool[order].item.mu)
   483  			stackpoolfree(x, order)
   484  			unlock(&stackpool[order].item.mu)
   485  		} else {
   486  			c := gp.m.p.ptr().mcache
   487  			if c.stackcache[order].size >= _StackCacheSize {
   488  				stackcacherelease(c, order)
   489  			}
   490  			x.ptr().next = c.stackcache[order].list
   491  			c.stackcache[order].list = x
   492  			c.stackcache[order].size += n
   493  		}
   494  	} else {
   495  		s := spanOfUnchecked(uintptr(v))
   496  		if s.state.get() != mSpanManual {
   497  			println(hex(s.base()), v)
   498  			throw("bad span state")
   499  		}
   500  		if gcphase == _GCoff {
   501  			// Free the stack immediately if we're
   502  			// sweeping.
   503  			osStackFree(s)
   504  			mheap_.freeManual(s, spanAllocStack)
   505  		} else {
   506  			// If the GC is running, we can't return a
   507  			// stack span to the heap because it could be
   508  			// reused as a heap span, and this state
   509  			// change would race with GC. Add it to the
   510  			// large stack cache instead.
   511  			log2npage := stacklog2(s.npages)
   512  			lock(&stackLarge.lock)
   513  			stackLarge.free[log2npage].insert(s)
   514  			unlock(&stackLarge.lock)
   515  		}
   516  	}
   517  }
   518  
   519  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   520  
   521  var maxstackceiling = maxstacksize
   522  
   523  var ptrnames = []string{
   524  	0: "scalar",
   525  	1: "ptr",
   526  }
   527  
   528  // Stack frame layout
   529  //
   530  // (x86)
   531  // +------------------+
   532  // | args from caller |
   533  // +------------------+ <- frame->argp
   534  // |  return address  |
   535  // +------------------+
   536  // |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
   537  // +------------------+ <- frame->varp
   538  // |     locals       |
   539  // +------------------+
   540  // |  args to callee  |
   541  // +------------------+ <- frame->sp
   542  //
   543  // (arm)
   544  // +------------------+
   545  // | args from caller |
   546  // +------------------+ <- frame->argp
   547  // | caller's retaddr |
   548  // +------------------+ <- frame->varp
   549  // |     locals       |
   550  // +------------------+
   551  // |  args to callee  |
   552  // +------------------+
   553  // |  return address  |
   554  // +------------------+ <- frame->sp
   555  
   556  type adjustinfo struct {
   557  	old   stack
   558  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   559  	cache pcvalueCache
   560  
   561  	// sghi is the highest sudog.elem on the stack.
   562  	sghi uintptr
   563  }
   564  
   565  // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   566  // If so, it rewrites *vpp to point into the new stack.
   567  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   568  	pp := (*uintptr)(vpp)
   569  	p := *pp
   570  	if stackDebug >= 4 {
   571  		print("        ", pp, ":", hex(p), "\n")
   572  	}
   573  	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
   574  		*pp = p + adjinfo.delta
   575  		if stackDebug >= 3 {
   576  			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
   577  		}
   578  	}
   579  }
   580  
   581  // Information from the compiler about the layout of stack frames.
   582  // Note: this type must agree with reflect.bitVector.
   583  type bitvector struct {
   584  	n        int32 // # of bits
   585  	bytedata *uint8
   586  }
   587  
   588  // ptrbit returns the i'th bit in bv.
   589  // ptrbit is less efficient than iterating directly over bitvector bits,
   590  // and should only be used in non-performance-critical code.
   591  // See adjustpointers for an example of a high-efficiency walk of a bitvector.
   592  func (bv *bitvector) ptrbit(i uintptr) uint8 {
   593  	b := *(addb(bv.bytedata, i/8))
   594  	return (b >> (i % 8)) & 1
   595  }
   596  
   597  // bv describes the memory starting at address scanp.
   598  // Adjust any pointers contained therein.
   599  func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
   600  	minp := adjinfo.old.lo
   601  	maxp := adjinfo.old.hi
   602  	delta := adjinfo.delta
   603  	num := uintptr(bv.n)
   604  	// If this frame might contain channel receive slots, use CAS
   605  	// to adjust pointers. If the slot hasn't been received into
   606  	// yet, it may contain stack pointers and a concurrent send
   607  	// could race with adjusting those pointers. (The sent value
   608  	// itself can never contain stack pointers.)
   609  	useCAS := uintptr(scanp) < adjinfo.sghi
   610  	for i := uintptr(0); i < num; i += 8 {
   611  		if stackDebug >= 4 {
   612  			for j := uintptr(0); j < 8; j++ {
   613  				print("        ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
   614  			}
   615  		}
   616  		b := *(addb(bv.bytedata, i/8))
   617  		for b != 0 {
   618  			j := uintptr(sys.Ctz8(b))
   619  			b &= b - 1
   620  			pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
   621  		retry:
   622  			p := *pp
   623  			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
   624  				// Looks like a junk value in a pointer slot.
   625  				// Live analysis wrong?
   626  				getg().m.traceback = 2
   627  				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   628  				throw("invalid pointer found on stack")
   629  			}
   630  			if minp <= p && p < maxp {
   631  				if stackDebug >= 3 {
   632  					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
   633  				}
   634  				if useCAS {
   635  					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
   636  					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
   637  						goto retry
   638  					}
   639  				} else {
   640  					*pp = p + delta
   641  				}
   642  			}
   643  		}
   644  	}
   645  }
   646  
   647  // Note: the argument/return area is adjusted by the callee.
   648  func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
   649  	adjinfo := (*adjustinfo)(arg)
   650  	if frame.continpc == 0 {
   651  		// Frame is dead.
   652  		return true
   653  	}
   654  	f := frame.fn
   655  	if stackDebug >= 2 {
   656  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   657  	}
   658  	if f.funcID == funcID_systemstack_switch {
   659  		// A special routine at the bottom of stack of a goroutine that does a systemstack call.
   660  		// We will allow it to be copied even though we don't
   661  		// have full GC info for it (because it is written in asm).
   662  		return true
   663  	}
   664  
   665  	locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
   666  
   667  	// Adjust local variables if stack frame has been allocated.
   668  	if locals.n > 0 {
   669  		size := uintptr(locals.n) * goarch.PtrSize
   670  		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
   671  	}
   672  
   673  	// Adjust saved base pointer if there is one.
   674  	// TODO what about arm64 frame pointer adjustment?
   675  	if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
   676  		if stackDebug >= 3 {
   677  			print("      saved bp\n")
   678  		}
   679  		if debugCheckBP {
   680  			// Frame pointers should always point to the next higher frame on
   681  			// the Go stack (or be nil, for the top frame on the stack).
   682  			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
   683  			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   684  				println("runtime: found invalid frame pointer")
   685  				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   686  				throw("bad frame pointer")
   687  			}
   688  		}
   689  		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   690  	}
   691  
   692  	// Adjust arguments.
   693  	if args.n > 0 {
   694  		if stackDebug >= 3 {
   695  			print("      args\n")
   696  		}
   697  		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
   698  	}
   699  
   700  	// Adjust pointers in all stack objects (whether they are live or not).
   701  	// See comments in mgcmark.go:scanframeworker.
   702  	if frame.varp != 0 {
   703  		for i := range objs {
   704  			obj := &objs[i]
   705  			off := obj.off
   706  			base := frame.varp // locals base pointer
   707  			if off >= 0 {
   708  				base = frame.argp // arguments and return values base pointer
   709  			}
   710  			p := base + uintptr(off)
   711  			if p < frame.sp {
   712  				// Object hasn't been allocated in the frame yet.
   713  				// (Happens when the stack bounds check fails and
   714  				// we call into morestack.)
   715  				continue
   716  			}
   717  			ptrdata := obj.ptrdata()
   718  			gcdata := obj.gcdata()
   719  			var s *mspan
   720  			if obj.useGCProg() {
   721  				// See comments in mgcmark.go:scanstack
   722  				s = materializeGCProg(ptrdata, gcdata)
   723  				gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   724  			}
   725  			for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
   726  				if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
   727  					adjustpointer(adjinfo, unsafe.Pointer(p+i))
   728  				}
   729  			}
   730  			if s != nil {
   731  				dematerializeGCProg(s)
   732  			}
   733  		}
   734  	}
   735  
   736  	return true
   737  }
   738  
   739  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   740  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   741  	if !framepointer_enabled {
   742  		return
   743  	}
   744  	if debugCheckBP {
   745  		bp := gp.sched.bp
   746  		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   747  			println("runtime: found invalid top frame pointer")
   748  			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   749  			throw("bad top frame pointer")
   750  		}
   751  	}
   752  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
   753  }
   754  
   755  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   756  	// Adjust pointers in the Defer structs.
   757  	// We need to do this first because we need to adjust the
   758  	// defer.link fields so we always work on the new stack.
   759  	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
   760  	for d := gp._defer; d != nil; d = d.link {
   761  		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   762  		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   763  		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
   764  		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
   765  		adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
   766  		adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
   767  	}
   768  }
   769  
   770  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   771  	// Panics are on stack and already adjusted.
   772  	// Update pointer to head of list in G.
   773  	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   774  }
   775  
   776  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   777  	// the data elements pointed to by a SudoG structure
   778  	// might be in the stack.
   779  	for s := gp.waiting; s != nil; s = s.waitlink {
   780  		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   781  	}
   782  }
   783  
   784  func fillstack(stk stack, b byte) {
   785  	for p := stk.lo; p < stk.hi; p++ {
   786  		*(*byte)(unsafe.Pointer(p)) = b
   787  	}
   788  }
   789  
   790  func findsghi(gp *g, stk stack) uintptr {
   791  	var sghi uintptr
   792  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   793  		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
   794  		if stk.lo <= p && p < stk.hi && p > sghi {
   795  			sghi = p
   796  		}
   797  	}
   798  	return sghi
   799  }
   800  
   801  // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
   802  // stack they refer to while synchronizing with concurrent channel
   803  // operations. It returns the number of bytes of stack copied.
   804  func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
   805  	if gp.waiting == nil {
   806  		return 0
   807  	}
   808  
   809  	// Lock channels to prevent concurrent send/receive.
   810  	var lastc *hchan
   811  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   812  		if sg.c != lastc {
   813  			// There is a ranking cycle here between gscan bit and
   814  			// hchan locks. Normally, we only allow acquiring hchan
   815  			// locks and then getting a gscan bit. In this case, we
   816  			// already have the gscan bit. We allow acquiring hchan
   817  			// locks here as a special case, since a deadlock can't
   818  			// happen because the G involved must already be
   819  			// suspended. So, we get a special hchan lock rank here
   820  			// that is lower than gscan, but doesn't allow acquiring
   821  			// any other locks other than hchan.
   822  			lockWithRank(&sg.c.lock, lockRankHchanLeaf)
   823  		}
   824  		lastc = sg.c
   825  	}
   826  
   827  	// Adjust sudogs.
   828  	adjustsudogs(gp, adjinfo)
   829  
   830  	// Copy the part of the stack the sudogs point in to
   831  	// while holding the lock to prevent races on
   832  	// send/receive slots.
   833  	var sgsize uintptr
   834  	if adjinfo.sghi != 0 {
   835  		oldBot := adjinfo.old.hi - used
   836  		newBot := oldBot + adjinfo.delta
   837  		sgsize = adjinfo.sghi - oldBot
   838  		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
   839  	}
   840  
   841  	// Unlock channels.
   842  	lastc = nil
   843  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   844  		if sg.c != lastc {
   845  			unlock(&sg.c.lock)
   846  		}
   847  		lastc = sg.c
   848  	}
   849  
   850  	return sgsize
   851  }
   852  
   853  // Copies gp's stack to a new stack of a different size.
   854  // Caller must have changed gp status to Gcopystack.
   855  func copystack(gp *g, newsize uintptr) {
   856  	if gp.syscallsp != 0 {
   857  		throw("stack growth not allowed in system call")
   858  	}
   859  	old := gp.stack
   860  	if old.lo == 0 {
   861  		throw("nil stackbase")
   862  	}
   863  	used := old.hi - gp.sched.sp
   864  	// Add just the difference to gcController.addScannableStack.
   865  	// g0 stacks never move, so this will never account for them.
   866  	// It's also fine if we have no P, addScannableStack can deal with
   867  	// that case.
   868  	gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
   869  
   870  	// allocate new stack
   871  	new := stackalloc(uint32(newsize))
   872  	if stackPoisonCopy != 0 {
   873  		fillstack(new, 0xfd)
   874  	}
   875  	if stackDebug >= 1 {
   876  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   877  	}
   878  
   879  	// Compute adjustment.
   880  	var adjinfo adjustinfo
   881  	adjinfo.old = old
   882  	adjinfo.delta = new.hi - old.hi
   883  
   884  	// Adjust sudogs, synchronizing with channel ops if necessary.
   885  	ncopy := used
   886  	if !gp.activeStackChans {
   887  		if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
   888  			// It's not safe for someone to shrink this stack while we're actively
   889  			// parking on a channel, but it is safe to grow since we do that
   890  			// ourselves and explicitly don't want to synchronize with channels
   891  			// since we could self-deadlock.
   892  			throw("racy sudog adjustment due to parking on channel")
   893  		}
   894  		adjustsudogs(gp, &adjinfo)
   895  	} else {
   896  		// sudogs may be pointing in to the stack and gp has
   897  		// released channel locks, so other goroutines could
   898  		// be writing to gp's stack. Find the highest such
   899  		// pointer so we can handle everything there and below
   900  		// carefully. (This shouldn't be far from the bottom
   901  		// of the stack, so there's little cost in handling
   902  		// everything below it carefully.)
   903  		adjinfo.sghi = findsghi(gp, old)
   904  
   905  		// Synchronize with channel ops and copy the part of
   906  		// the stack they may interact with.
   907  		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
   908  	}
   909  
   910  	// Copy the stack (or the rest of it) to the new location
   911  	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
   912  
   913  	// Adjust remaining structures that have pointers into stacks.
   914  	// We have to do most of these before we traceback the new
   915  	// stack because gentraceback uses them.
   916  	adjustctxt(gp, &adjinfo)
   917  	adjustdefers(gp, &adjinfo)
   918  	adjustpanics(gp, &adjinfo)
   919  	if adjinfo.sghi != 0 {
   920  		adjinfo.sghi += adjinfo.delta
   921  	}
   922  
   923  	// Swap out old stack for new one
   924  	gp.stack = new
   925  	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
   926  	gp.sched.sp = new.hi - used
   927  	gp.stktopsp += adjinfo.delta
   928  
   929  	// Adjust pointers in the new stack.
   930  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
   931  
   932  	// free old stack
   933  	if stackPoisonCopy != 0 {
   934  		fillstack(old, 0xfc)
   935  	}
   936  	stackfree(old)
   937  }
   938  
   939  // round x up to a power of 2.
   940  func round2(x int32) int32 {
   941  	s := uint(0)
   942  	for 1<<s < x {
   943  		s++
   944  	}
   945  	return 1 << s
   946  }
   947  
   948  // Called from runtime·morestack when more stack is needed.
   949  // Allocate larger stack and relocate to new stack.
   950  // Stack growth is multiplicative, for constant amortized cost.
   951  //
   952  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   953  // If the scheduler is trying to stop this g, then it will set preemptStop.
   954  //
   955  // This must be nowritebarrierrec because it can be called as part of
   956  // stack growth from other nowritebarrierrec functions, but the
   957  // compiler doesn't check this.
   958  //
   959  //go:nowritebarrierrec
   960  func newstack() {
   961  	thisg := getg()
   962  	// TODO: double check all gp. shouldn't be getg().
   963  	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   964  		throw("stack growth after fork")
   965  	}
   966  	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   967  		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   968  		morebuf := thisg.m.morebuf
   969  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   970  		throw("runtime: wrong goroutine in newstack")
   971  	}
   972  
   973  	gp := thisg.m.curg
   974  
   975  	if thisg.m.curg.throwsplit {
   976  		// Update syscallsp, syscallpc in case traceback uses them.
   977  		morebuf := thisg.m.morebuf
   978  		gp.syscallsp = morebuf.sp
   979  		gp.syscallpc = morebuf.pc
   980  		pcname, pcoff := "(unknown)", uintptr(0)
   981  		f := findfunc(gp.sched.pc)
   982  		if f.valid() {
   983  			pcname = funcname(f)
   984  			pcoff = gp.sched.pc - f.entry()
   985  		}
   986  		print("runtime: newstack at ", pcname, "+", hex(pcoff),
   987  			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   988  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   989  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   990  
   991  		thisg.m.traceback = 2 // Include runtime frames
   992  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
   993  		throw("runtime: stack split at bad time")
   994  	}
   995  
   996  	morebuf := thisg.m.morebuf
   997  	thisg.m.morebuf.pc = 0
   998  	thisg.m.morebuf.lr = 0
   999  	thisg.m.morebuf.sp = 0
  1000  	thisg.m.morebuf.g = 0
  1001  
  1002  	// NOTE: stackguard0 may change underfoot, if another thread
  1003  	// is about to try to preempt gp. Read it just once and use that same
  1004  	// value now and below.
  1005  	stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
  1006  
  1007  	// Be conservative about where we preempt.
  1008  	// We are interested in preempting user Go code, not runtime code.
  1009  	// If we're holding locks, mallocing, or preemption is disabled, don't
  1010  	// preempt.
  1011  	// This check is very early in newstack so that even the status change
  1012  	// from Grunning to Gwaiting and back doesn't happen in this case.
  1013  	// That status change by itself can be viewed as a small preemption,
  1014  	// because the GC might change Gwaiting to Gscanwaiting, and then
  1015  	// this goroutine has to wait for the GC to finish before continuing.
  1016  	// If the GC is in some way dependent on this goroutine (for example,
  1017  	// it needs a lock held by the goroutine), that small preemption turns
  1018  	// into a real deadlock.
  1019  	preempt := stackguard0 == stackPreempt
  1020  	if preempt {
  1021  		if !canPreemptM(thisg.m) {
  1022  			// Let the goroutine keep running for now.
  1023  			// gp->preempt is set, so it will be preempted next time.
  1024  			gp.stackguard0 = gp.stack.lo + _StackGuard
  1025  			gogo(&gp.sched) // never return
  1026  		}
  1027  	}
  1028  
  1029  	if gp.stack.lo == 0 {
  1030  		throw("missing stack in newstack")
  1031  	}
  1032  	sp := gp.sched.sp
  1033  	if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
  1034  		// The call to morestack cost a word.
  1035  		sp -= goarch.PtrSize
  1036  	}
  1037  	if stackDebug >= 1 || sp < gp.stack.lo {
  1038  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
  1039  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
  1040  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
  1041  	}
  1042  	if sp < gp.stack.lo {
  1043  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
  1044  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
  1045  		throw("runtime: split stack overflow")
  1046  	}
  1047  
  1048  	if preempt {
  1049  		if gp == thisg.m.g0 {
  1050  			throw("runtime: preempt g0")
  1051  		}
  1052  		if thisg.m.p == 0 && thisg.m.locks == 0 {
  1053  			throw("runtime: g is running but p is not")
  1054  		}
  1055  
  1056  		if gp.preemptShrink {
  1057  			// We're at a synchronous safe point now, so
  1058  			// do the pending stack shrink.
  1059  			gp.preemptShrink = false
  1060  			shrinkstack(gp)
  1061  		}
  1062  
  1063  		if gp.preemptStop {
  1064  			preemptPark(gp) // never returns
  1065  		}
  1066  
  1067  		// Act like goroutine called runtime.Gosched.
  1068  		gopreempt_m(gp) // never return
  1069  	}
  1070  
  1071  	// Allocate a bigger segment and move the stack.
  1072  	oldsize := gp.stack.hi - gp.stack.lo
  1073  	newsize := oldsize * 2
  1074  
  1075  	// Make sure we grow at least as much as needed to fit the new frame.
  1076  	// (This is just an optimization - the caller of morestack will
  1077  	// recheck the bounds on return.)
  1078  	if f := findfunc(gp.sched.pc); f.valid() {
  1079  		max := uintptr(funcMaxSPDelta(f))
  1080  		needed := max + _StackGuard
  1081  		used := gp.stack.hi - gp.sched.sp
  1082  		for newsize-used < needed {
  1083  			newsize *= 2
  1084  		}
  1085  	}
  1086  
  1087  	if stackguard0 == stackForceMove {
  1088  		// Forced stack movement used for debugging.
  1089  		// Don't double the stack (or we may quickly run out
  1090  		// if this is done repeatedly).
  1091  		newsize = oldsize
  1092  	}
  1093  
  1094  	if newsize > maxstacksize || newsize > maxstackceiling {
  1095  		if maxstacksize < maxstackceiling {
  1096  			print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
  1097  		} else {
  1098  			print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
  1099  		}
  1100  		print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1101  		throw("stack overflow")
  1102  	}
  1103  
  1104  	// The goroutine must be executing in order to call newstack,
  1105  	// so it must be Grunning (or Gscanrunning).
  1106  	casgstatus(gp, _Grunning, _Gcopystack)
  1107  
  1108  	// The concurrent GC will not scan the stack while we are doing the copy since
  1109  	// the gp is in a Gcopystack status.
  1110  	copystack(gp, newsize)
  1111  	if stackDebug >= 1 {
  1112  		print("stack grow done\n")
  1113  	}
  1114  	casgstatus(gp, _Gcopystack, _Grunning)
  1115  	gogo(&gp.sched)
  1116  }
  1117  
  1118  //go:nosplit
  1119  func nilfunc() {
  1120  	*(*uint8)(nil) = 0
  1121  }
  1122  
  1123  // adjust Gobuf as if it executed a call to fn
  1124  // and then stopped before the first instruction in fn.
  1125  func gostartcallfn(gobuf *gobuf, fv *funcval) {
  1126  	var fn unsafe.Pointer
  1127  	if fv != nil {
  1128  		fn = unsafe.Pointer(fv.fn)
  1129  	} else {
  1130  		fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
  1131  	}
  1132  	gostartcall(gobuf, fn, unsafe.Pointer(fv))
  1133  }
  1134  
  1135  // isShrinkStackSafe returns whether it's safe to attempt to shrink
  1136  // gp's stack. Shrinking the stack is only safe when we have precise
  1137  // pointer maps for all frames on the stack.
  1138  func isShrinkStackSafe(gp *g) bool {
  1139  	// We can't copy the stack if we're in a syscall.
  1140  	// The syscall might have pointers into the stack and
  1141  	// often we don't have precise pointer maps for the innermost
  1142  	// frames.
  1143  	//
  1144  	// We also can't copy the stack if we're at an asynchronous
  1145  	// safe-point because we don't have precise pointer maps for
  1146  	// all frames.
  1147  	//
  1148  	// We also can't *shrink* the stack in the window between the
  1149  	// goroutine calling gopark to park on a channel and
  1150  	// gp.activeStackChans being set.
  1151  	return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
  1152  }
  1153  
  1154  // Maybe shrink the stack being used by gp.
  1155  //
  1156  // gp must be stopped and we must own its stack. It may be in
  1157  // _Grunning, but only if this is our own user G.
  1158  func shrinkstack(gp *g) {
  1159  	if gp.stack.lo == 0 {
  1160  		throw("missing stack in shrinkstack")
  1161  	}
  1162  	if s := readgstatus(gp); s&_Gscan == 0 {
  1163  		// We don't own the stack via _Gscan. We could still
  1164  		// own it if this is our own user G and we're on the
  1165  		// system stack.
  1166  		if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
  1167  			// We don't own the stack.
  1168  			throw("bad status in shrinkstack")
  1169  		}
  1170  	}
  1171  	if !isShrinkStackSafe(gp) {
  1172  		throw("shrinkstack at bad time")
  1173  	}
  1174  	// Check for self-shrinks while in a libcall. These may have
  1175  	// pointers into the stack disguised as uintptrs, but these
  1176  	// code paths should all be nosplit.
  1177  	if gp == getg().m.curg && gp.m.libcallsp != 0 {
  1178  		throw("shrinking stack in libcall")
  1179  	}
  1180  
  1181  	if debug.gcshrinkstackoff > 0 {
  1182  		return
  1183  	}
  1184  	f := findfunc(gp.startpc)
  1185  	if f.valid() && f.funcID == funcID_gcBgMarkWorker {
  1186  		// We're not allowed to shrink the gcBgMarkWorker
  1187  		// stack (see gcBgMarkWorker for explanation).
  1188  		return
  1189  	}
  1190  
  1191  	oldsize := gp.stack.hi - gp.stack.lo
  1192  	newsize := oldsize / 2
  1193  	// Don't shrink the allocation below the minimum-sized stack
  1194  	// allocation.
  1195  	if newsize < _FixedStack {
  1196  		return
  1197  	}
  1198  	// Compute how much of the stack is currently in use and only
  1199  	// shrink the stack if gp is using less than a quarter of its
  1200  	// current stack. The currently used stack includes everything
  1201  	// down to the SP plus the stack guard space that ensures
  1202  	// there's room for nosplit functions.
  1203  	avail := gp.stack.hi - gp.stack.lo
  1204  	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
  1205  		return
  1206  	}
  1207  
  1208  	if stackDebug > 0 {
  1209  		print("shrinking stack ", oldsize, "->", newsize, "\n")
  1210  	}
  1211  
  1212  	copystack(gp, newsize)
  1213  }
  1214  
  1215  // freeStackSpans frees unused stack spans at the end of GC.
  1216  func freeStackSpans() {
  1217  	// Scan stack pools for empty stack spans.
  1218  	for order := range stackpool {
  1219  		lock(&stackpool[order].item.mu)
  1220  		list := &stackpool[order].item.span
  1221  		for s := list.first; s != nil; {
  1222  			next := s.next
  1223  			if s.allocCount == 0 {
  1224  				list.remove(s)
  1225  				s.manualFreeList = 0
  1226  				osStackFree(s)
  1227  				mheap_.freeManual(s, spanAllocStack)
  1228  			}
  1229  			s = next
  1230  		}
  1231  		unlock(&stackpool[order].item.mu)
  1232  	}
  1233  
  1234  	// Free large stack spans.
  1235  	lock(&stackLarge.lock)
  1236  	for i := range stackLarge.free {
  1237  		for s := stackLarge.free[i].first; s != nil; {
  1238  			next := s.next
  1239  			stackLarge.free[i].remove(s)
  1240  			osStackFree(s)
  1241  			mheap_.freeManual(s, spanAllocStack)
  1242  			s = next
  1243  		}
  1244  	}
  1245  	unlock(&stackLarge.lock)
  1246  }
  1247  
  1248  // getStackMap returns the locals and arguments live pointer maps, and
  1249  // stack object list for frame.
  1250  func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
  1251  	targetpc := frame.continpc
  1252  	if targetpc == 0 {
  1253  		// Frame is dead. Return empty bitvectors.
  1254  		return
  1255  	}
  1256  
  1257  	f := frame.fn
  1258  	pcdata := int32(-1)
  1259  	if targetpc != f.entry() {
  1260  		// Back up to the CALL. If we're at the function entry
  1261  		// point, we want to use the entry map (-1), even if
  1262  		// the first instruction of the function changes the
  1263  		// stack map.
  1264  		targetpc--
  1265  		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
  1266  	}
  1267  	if pcdata == -1 {
  1268  		// We do not have a valid pcdata value but there might be a
  1269  		// stackmap for this function. It is likely that we are looking
  1270  		// at the function prologue, assume so and hope for the best.
  1271  		pcdata = 0
  1272  	}
  1273  
  1274  	// Local variables.
  1275  	size := frame.varp - frame.sp
  1276  	var minsize uintptr
  1277  	switch goarch.ArchFamily {
  1278  	case goarch.ARM64:
  1279  		minsize = sys.StackAlign
  1280  	default:
  1281  		minsize = sys.MinFrameSize
  1282  	}
  1283  	if size > minsize {
  1284  		stackid := pcdata
  1285  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  1286  		if stkmap == nil || stkmap.n <= 0 {
  1287  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
  1288  			throw("missing stackmap")
  1289  		}
  1290  		// If nbit == 0, there's no work to do.
  1291  		if stkmap.nbit > 0 {
  1292  			if stackid < 0 || stackid >= stkmap.n {
  1293  				// don't know where we are
  1294  				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1295  				throw("bad symbol table")
  1296  			}
  1297  			locals = stackmapdata(stkmap, stackid)
  1298  			if stackDebug >= 3 && debug {
  1299  				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
  1300  			}
  1301  		} else if stackDebug >= 3 && debug {
  1302  			print("      no locals to adjust\n")
  1303  		}
  1304  	}
  1305  
  1306  	// Arguments.
  1307  	if frame.arglen > 0 {
  1308  		if frame.argmap != nil {
  1309  			// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
  1310  			// In this case, arglen specifies how much of the args section is actually live.
  1311  			// (It could be either all the args + results, or just the args.)
  1312  			args = *frame.argmap
  1313  			n := int32(frame.arglen / goarch.PtrSize)
  1314  			if n < args.n {
  1315  				args.n = n // Don't use more of the arguments than arglen.
  1316  			}
  1317  		} else {
  1318  			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
  1319  			if stackmap == nil || stackmap.n <= 0 {
  1320  				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
  1321  				throw("missing stackmap")
  1322  			}
  1323  			if pcdata < 0 || pcdata >= stackmap.n {
  1324  				// don't know where we are
  1325  				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1326  				throw("bad symbol table")
  1327  			}
  1328  			if stackmap.nbit > 0 {
  1329  				args = stackmapdata(stackmap, pcdata)
  1330  			}
  1331  		}
  1332  	}
  1333  
  1334  	// stack objects.
  1335  	if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le") && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
  1336  		// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
  1337  		// We don't actually use argmap in this case, but we need to fake the stack object
  1338  		// record for these frames which contain an internal/abi.RegArgs at a hard-coded offset.
  1339  		// This offset matches the assembly code on amd64 and arm64.
  1340  		objs = methodValueCallFrameObjs[:]
  1341  	} else {
  1342  		p := funcdata(f, _FUNCDATA_StackObjects)
  1343  		if p != nil {
  1344  			n := *(*uintptr)(p)
  1345  			p = add(p, goarch.PtrSize)
  1346  			*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
  1347  			// Note: the noescape above is needed to keep
  1348  			// getStackMap from "leaking param content:
  1349  			// frame".  That leak propagates up to getgcmask, then
  1350  			// GCMask, then verifyGCInfo, which converts the stack
  1351  			// gcinfo tests into heap gcinfo tests :(
  1352  		}
  1353  	}
  1354  
  1355  	return
  1356  }
  1357  
  1358  var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stackobjectinit
  1359  
  1360  func stkobjinit() {
  1361  	var abiRegArgsEface any = abi.RegArgs{}
  1362  	abiRegArgsType := efaceOf(&abiRegArgsEface)._type
  1363  	if abiRegArgsType.kind&kindGCProg != 0 {
  1364  		throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
  1365  	}
  1366  	// Set methodValueCallFrameObjs[0].gcdataoff so that
  1367  	// stackObjectRecord.gcdata() will work correctly with it.
  1368  	ptr := uintptr(unsafe.Pointer(&methodValueCallFrameObjs[0]))
  1369  	var mod *moduledata
  1370  	for datap := &firstmoduledata; datap != nil; datap = datap.next {
  1371  		if datap.gofunc <= ptr && ptr < datap.end {
  1372  			mod = datap
  1373  			break
  1374  		}
  1375  	}
  1376  	if mod == nil {
  1377  		throw("methodValueCallFrameObjs is not in a module")
  1378  	}
  1379  	methodValueCallFrameObjs[0] = stackObjectRecord{
  1380  		off:       -int32(alignUp(abiRegArgsType.size, 8)), // It's always the highest address local.
  1381  		size:      int32(abiRegArgsType.size),
  1382  		_ptrdata:  int32(abiRegArgsType.ptrdata),
  1383  		gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.gcdata)) - mod.rodata),
  1384  	}
  1385  }
  1386  
  1387  // A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
  1388  // This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.
  1389  type stackObjectRecord struct {
  1390  	// offset in frame
  1391  	// if negative, offset from varp
  1392  	// if non-negative, offset from argp
  1393  	off       int32
  1394  	size      int32
  1395  	_ptrdata  int32  // ptrdata, or -ptrdata is GC prog is used
  1396  	gcdataoff uint32 // offset to gcdata from moduledata.rodata
  1397  }
  1398  
  1399  func (r *stackObjectRecord) useGCProg() bool {
  1400  	return r._ptrdata < 0
  1401  }
  1402  
  1403  func (r *stackObjectRecord) ptrdata() uintptr {
  1404  	x := r._ptrdata
  1405  	if x < 0 {
  1406  		return uintptr(-x)
  1407  	}
  1408  	return uintptr(x)
  1409  }
  1410  
  1411  // gcdata returns pointer map or GC prog of the type.
  1412  func (r *stackObjectRecord) gcdata() *byte {
  1413  	ptr := uintptr(unsafe.Pointer(r))
  1414  	var mod *moduledata
  1415  	for datap := &firstmoduledata; datap != nil; datap = datap.next {
  1416  		if datap.gofunc <= ptr && ptr < datap.end {
  1417  			mod = datap
  1418  			break
  1419  		}
  1420  	}
  1421  	// If you get a panic here due to a nil mod,
  1422  	// you may have made a copy of a stackObjectRecord.
  1423  	// You must use the original pointer.
  1424  	res := mod.rodata + uintptr(r.gcdataoff)
  1425  	return (*byte)(unsafe.Pointer(res))
  1426  }
  1427  
  1428  // This is exported as ABI0 via linkname so obj can call it.
  1429  //
  1430  //go:nosplit
  1431  //go:linkname morestackc
  1432  func morestackc() {
  1433  	throw("attempt to execute system stack code on user stack")
  1434  }
  1435  

View as plain text