Source file src/runtime/stack_test.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"reflect"
    11  	"regexp"
    12  	. "runtime"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	"time"
    18  	_ "unsafe" // for go:linkname
    19  )
    20  
    21  // TestStackMem measures per-thread stack segment cache behavior.
    22  // The test consumed up to 500MB in the past.
    23  func TestStackMem(t *testing.T) {
    24  	const (
    25  		BatchSize      = 32
    26  		BatchCount     = 256
    27  		ArraySize      = 1024
    28  		RecursionDepth = 128
    29  	)
    30  	if testing.Short() {
    31  		return
    32  	}
    33  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
    34  	s0 := new(MemStats)
    35  	ReadMemStats(s0)
    36  	for b := 0; b < BatchCount; b++ {
    37  		c := make(chan bool, BatchSize)
    38  		for i := 0; i < BatchSize; i++ {
    39  			go func() {
    40  				var f func(k int, a [ArraySize]byte)
    41  				f = func(k int, a [ArraySize]byte) {
    42  					if k == 0 {
    43  						time.Sleep(time.Millisecond)
    44  						return
    45  					}
    46  					f(k-1, a)
    47  				}
    48  				f(RecursionDepth, [ArraySize]byte{})
    49  				c <- true
    50  			}()
    51  		}
    52  		for i := 0; i < BatchSize; i++ {
    53  			<-c
    54  		}
    55  
    56  		// The goroutines have signaled via c that they are ready to exit.
    57  		// Give them a chance to exit by sleeping. If we don't wait, we
    58  		// might not reuse them on the next batch.
    59  		time.Sleep(10 * time.Millisecond)
    60  	}
    61  	s1 := new(MemStats)
    62  	ReadMemStats(s1)
    63  	consumed := int64(s1.StackSys - s0.StackSys)
    64  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
    65  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
    66  	if consumed > estimate {
    67  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    68  	}
    69  	// Due to broken stack memory accounting (https://golang.org/issue/7468),
    70  	// StackInuse can decrease during function execution, so we cast the values to int64.
    71  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    72  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    73  	if inuse > 4<<20 {
    74  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
    75  	}
    76  }
    77  
    78  // Test stack growing in different contexts.
    79  func TestStackGrowth(t *testing.T) {
    80  	if *flagQuick {
    81  		t.Skip("-quick")
    82  	}
    83  
    84  	t.Parallel()
    85  
    86  	var wg sync.WaitGroup
    87  
    88  	// in a normal goroutine
    89  	var growDuration time.Duration // For debugging failures
    90  	wg.Add(1)
    91  	go func() {
    92  		defer wg.Done()
    93  		start := time.Now()
    94  		growStack(nil)
    95  		growDuration = time.Since(start)
    96  	}()
    97  	wg.Wait()
    98  	t.Log("first growStack took", growDuration)
    99  
   100  	// in locked goroutine
   101  	wg.Add(1)
   102  	go func() {
   103  		defer wg.Done()
   104  		LockOSThread()
   105  		growStack(nil)
   106  		UnlockOSThread()
   107  	}()
   108  	wg.Wait()
   109  
   110  	// in finalizer
   111  	var finalizerStart time.Time
   112  	var started, progress uint32
   113  	wg.Add(1)
   114  	s := new(string) // Must be of a type that avoids the tiny allocator, or else the finalizer might not run.
   115  	SetFinalizer(s, func(ss *string) {
   116  		defer wg.Done()
   117  		finalizerStart = time.Now()
   118  		atomic.StoreUint32(&started, 1)
   119  		growStack(&progress)
   120  	})
   121  	setFinalizerTime := time.Now()
   122  	s = nil
   123  
   124  	if d, ok := t.Deadline(); ok {
   125  		// Pad the timeout by an arbitrary 5% to give the AfterFunc time to run.
   126  		timeout := time.Until(d) * 19 / 20
   127  		timer := time.AfterFunc(timeout, func() {
   128  			// Panic — instead of calling t.Error and returning from the test — so
   129  			// that we get a useful goroutine dump if the test times out, especially
   130  			// if GOTRACEBACK=system or GOTRACEBACK=crash is set.
   131  			if atomic.LoadUint32(&started) == 0 {
   132  				panic("finalizer did not start")
   133  			} else {
   134  				panic(fmt.Sprintf("finalizer started %s ago (%s after registration) and ran %d iterations, but did not return", time.Since(finalizerStart), finalizerStart.Sub(setFinalizerTime), atomic.LoadUint32(&progress)))
   135  			}
   136  		})
   137  		defer timer.Stop()
   138  	}
   139  
   140  	GC()
   141  	wg.Wait()
   142  	t.Logf("finalizer started after %s and ran %d iterations in %v", finalizerStart.Sub(setFinalizerTime), atomic.LoadUint32(&progress), time.Since(finalizerStart))
   143  }
   144  
   145  // ... and in init
   146  //func init() {
   147  //	growStack()
   148  //}
   149  
   150  func growStack(progress *uint32) {
   151  	n := 1 << 10
   152  	if testing.Short() {
   153  		n = 1 << 8
   154  	}
   155  	for i := 0; i < n; i++ {
   156  		x := 0
   157  		growStackIter(&x, i)
   158  		if x != i+1 {
   159  			panic("stack is corrupted")
   160  		}
   161  		if progress != nil {
   162  			atomic.StoreUint32(progress, uint32(i))
   163  		}
   164  	}
   165  	GC()
   166  }
   167  
   168  // This function is not an anonymous func, so that the compiler can do escape
   169  // analysis and place x on stack (and subsequently stack growth update the pointer).
   170  func growStackIter(p *int, n int) {
   171  	if n == 0 {
   172  		*p = n + 1
   173  		GC()
   174  		return
   175  	}
   176  	*p = n + 1
   177  	x := 0
   178  	growStackIter(&x, n-1)
   179  	if x != n {
   180  		panic("stack is corrupted")
   181  	}
   182  }
   183  
   184  func TestStackGrowthCallback(t *testing.T) {
   185  	t.Parallel()
   186  	var wg sync.WaitGroup
   187  
   188  	// test stack growth at chan op
   189  	wg.Add(1)
   190  	go func() {
   191  		defer wg.Done()
   192  		c := make(chan int, 1)
   193  		growStackWithCallback(func() {
   194  			c <- 1
   195  			<-c
   196  		})
   197  	}()
   198  
   199  	// test stack growth at map op
   200  	wg.Add(1)
   201  	go func() {
   202  		defer wg.Done()
   203  		m := make(map[int]int)
   204  		growStackWithCallback(func() {
   205  			_, _ = m[1]
   206  			m[1] = 1
   207  		})
   208  	}()
   209  
   210  	// test stack growth at goroutine creation
   211  	wg.Add(1)
   212  	go func() {
   213  		defer wg.Done()
   214  		growStackWithCallback(func() {
   215  			done := make(chan bool)
   216  			go func() {
   217  				done <- true
   218  			}()
   219  			<-done
   220  		})
   221  	}()
   222  	wg.Wait()
   223  }
   224  
   225  func growStackWithCallback(cb func()) {
   226  	var f func(n int)
   227  	f = func(n int) {
   228  		if n == 0 {
   229  			cb()
   230  			return
   231  		}
   232  		f(n - 1)
   233  	}
   234  	for i := 0; i < 1<<10; i++ {
   235  		f(i)
   236  	}
   237  }
   238  
   239  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
   240  // during a stack copy.
   241  func set(p *int, x int) {
   242  	*p = x
   243  }
   244  func TestDeferPtrs(t *testing.T) {
   245  	var y int
   246  
   247  	defer func() {
   248  		if y != 42 {
   249  			t.Errorf("defer's stack references were not adjusted appropriately")
   250  		}
   251  	}()
   252  	defer set(&y, 42)
   253  	growStack(nil)
   254  }
   255  
   256  type bigBuf [4 * 1024]byte
   257  
   258  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
   259  // stack grows as part of starting the deferred function. It calls Goexit at various
   260  // stack depths, forcing the deferred function (with >4kB of args) to be run at
   261  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
   262  // the end of the stack. Each trial runs in a different goroutine so that an earlier
   263  // stack growth does not invalidate a later attempt.
   264  func TestDeferPtrsGoexit(t *testing.T) {
   265  	for i := 0; i < 100; i++ {
   266  		c := make(chan int, 1)
   267  		go testDeferPtrsGoexit(c, i)
   268  		if n := <-c; n != 42 {
   269  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   270  		}
   271  	}
   272  }
   273  
   274  func testDeferPtrsGoexit(c chan int, i int) {
   275  	var y int
   276  	defer func() {
   277  		c <- y
   278  	}()
   279  	defer setBig(&y, 42, bigBuf{})
   280  	useStackAndCall(i, Goexit)
   281  }
   282  
   283  func setBig(p *int, x int, b bigBuf) {
   284  	*p = x
   285  }
   286  
   287  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
   288  // of Goexit to run the Defers. Those two are different execution paths
   289  // in the runtime.
   290  func TestDeferPtrsPanic(t *testing.T) {
   291  	for i := 0; i < 100; i++ {
   292  		c := make(chan int, 1)
   293  		go testDeferPtrsGoexit(c, i)
   294  		if n := <-c; n != 42 {
   295  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   296  		}
   297  	}
   298  }
   299  
   300  func testDeferPtrsPanic(c chan int, i int) {
   301  	var y int
   302  	defer func() {
   303  		if recover() == nil {
   304  			c <- -1
   305  			return
   306  		}
   307  		c <- y
   308  	}()
   309  	defer setBig(&y, 42, bigBuf{})
   310  	useStackAndCall(i, func() { panic(1) })
   311  }
   312  
   313  //go:noinline
   314  func testDeferLeafSigpanic1() {
   315  	// Cause a sigpanic to be injected in this frame.
   316  	//
   317  	// This function has to be declared before
   318  	// TestDeferLeafSigpanic so the runtime will crash if we think
   319  	// this function's continuation PC is in
   320  	// TestDeferLeafSigpanic.
   321  	*(*int)(nil) = 0
   322  }
   323  
   324  // TestDeferLeafSigpanic tests defer matching around leaf functions
   325  // that sigpanic. This is tricky because on LR machines the outer
   326  // function and the inner function have the same SP, but it's critical
   327  // that we match up the defer correctly to get the right liveness map.
   328  // See issue #25499.
   329  func TestDeferLeafSigpanic(t *testing.T) {
   330  	// Push a defer that will walk the stack.
   331  	defer func() {
   332  		if err := recover(); err == nil {
   333  			t.Fatal("expected panic from nil pointer")
   334  		}
   335  		GC()
   336  	}()
   337  	// Call a leaf function. We must set up the exact call stack:
   338  	//
   339  	//  defering function -> leaf function -> sigpanic
   340  	//
   341  	// On LR machines, the leaf function will have the same SP as
   342  	// the SP pushed for the defer frame.
   343  	testDeferLeafSigpanic1()
   344  }
   345  
   346  // TestPanicUseStack checks that a chain of Panic structs on the stack are
   347  // updated correctly if the stack grows during the deferred execution that
   348  // happens as a result of the panic.
   349  func TestPanicUseStack(t *testing.T) {
   350  	pc := make([]uintptr, 10000)
   351  	defer func() {
   352  		recover()
   353  		Callers(0, pc) // force stack walk
   354  		useStackAndCall(100, func() {
   355  			defer func() {
   356  				recover()
   357  				Callers(0, pc) // force stack walk
   358  				useStackAndCall(200, func() {
   359  					defer func() {
   360  						recover()
   361  						Callers(0, pc) // force stack walk
   362  					}()
   363  					panic(3)
   364  				})
   365  			}()
   366  			panic(2)
   367  		})
   368  	}()
   369  	panic(1)
   370  }
   371  
   372  func TestPanicFar(t *testing.T) {
   373  	var xtree *xtreeNode
   374  	pc := make([]uintptr, 10000)
   375  	defer func() {
   376  		// At this point we created a large stack and unwound
   377  		// it via recovery. Force a stack walk, which will
   378  		// check the stack's consistency.
   379  		Callers(0, pc)
   380  	}()
   381  	defer func() {
   382  		recover()
   383  	}()
   384  	useStackAndCall(100, func() {
   385  		// Kick off the GC and make it do something nontrivial.
   386  		// (This used to force stack barriers to stick around.)
   387  		xtree = makeTree(18)
   388  		// Give the GC time to start scanning stacks.
   389  		time.Sleep(time.Millisecond)
   390  		panic(1)
   391  	})
   392  	_ = xtree
   393  }
   394  
   395  type xtreeNode struct {
   396  	l, r *xtreeNode
   397  }
   398  
   399  func makeTree(d int) *xtreeNode {
   400  	if d == 0 {
   401  		return new(xtreeNode)
   402  	}
   403  	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
   404  }
   405  
   406  // use about n KB of stack and call f
   407  func useStackAndCall(n int, f func()) {
   408  	if n == 0 {
   409  		f()
   410  		return
   411  	}
   412  	var b [1024]byte // makes frame about 1KB
   413  	useStackAndCall(n-1+int(b[99]), f)
   414  }
   415  
   416  func useStack(n int) {
   417  	useStackAndCall(n, func() {})
   418  }
   419  
   420  func growing(c chan int, done chan struct{}) {
   421  	for n := range c {
   422  		useStack(n)
   423  		done <- struct{}{}
   424  	}
   425  	done <- struct{}{}
   426  }
   427  
   428  func TestStackCache(t *testing.T) {
   429  	// Allocate a bunch of goroutines and grow their stacks.
   430  	// Repeat a few times to test the stack cache.
   431  	const (
   432  		R = 4
   433  		G = 200
   434  		S = 5
   435  	)
   436  	for i := 0; i < R; i++ {
   437  		var reqchans [G]chan int
   438  		done := make(chan struct{})
   439  		for j := 0; j < G; j++ {
   440  			reqchans[j] = make(chan int)
   441  			go growing(reqchans[j], done)
   442  		}
   443  		for s := 0; s < S; s++ {
   444  			for j := 0; j < G; j++ {
   445  				reqchans[j] <- 1 << uint(s)
   446  			}
   447  			for j := 0; j < G; j++ {
   448  				<-done
   449  			}
   450  		}
   451  		for j := 0; j < G; j++ {
   452  			close(reqchans[j])
   453  		}
   454  		for j := 0; j < G; j++ {
   455  			<-done
   456  		}
   457  	}
   458  }
   459  
   460  func TestStackOutput(t *testing.T) {
   461  	b := make([]byte, 1024)
   462  	stk := string(b[:Stack(b, false)])
   463  	if !strings.HasPrefix(stk, "goroutine ") {
   464  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   465  		t.Errorf("Stack output should begin with \"goroutine \"")
   466  	}
   467  }
   468  
   469  func TestStackAllOutput(t *testing.T) {
   470  	b := make([]byte, 1024)
   471  	stk := string(b[:Stack(b, true)])
   472  	if !strings.HasPrefix(stk, "goroutine ") {
   473  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   474  		t.Errorf("Stack output should begin with \"goroutine \"")
   475  	}
   476  }
   477  
   478  func TestStackPanic(t *testing.T) {
   479  	// Test that stack copying copies panics correctly. This is difficult
   480  	// to test because it is very unlikely that the stack will be copied
   481  	// in the middle of gopanic. But it can happen.
   482  	// To make this test effective, edit panic.go:gopanic and uncomment
   483  	// the GC() call just before freedefer(d).
   484  	defer func() {
   485  		if x := recover(); x == nil {
   486  			t.Errorf("recover failed")
   487  		}
   488  	}()
   489  	useStack(32)
   490  	panic("test panic")
   491  }
   492  
   493  func BenchmarkStackCopyPtr(b *testing.B) {
   494  	c := make(chan bool)
   495  	for i := 0; i < b.N; i++ {
   496  		go func() {
   497  			i := 1000000
   498  			countp(&i)
   499  			c <- true
   500  		}()
   501  		<-c
   502  	}
   503  }
   504  
   505  func countp(n *int) {
   506  	if *n == 0 {
   507  		return
   508  	}
   509  	*n--
   510  	countp(n)
   511  }
   512  
   513  func BenchmarkStackCopy(b *testing.B) {
   514  	c := make(chan bool)
   515  	for i := 0; i < b.N; i++ {
   516  		go func() {
   517  			count(1000000)
   518  			c <- true
   519  		}()
   520  		<-c
   521  	}
   522  }
   523  
   524  func count(n int) int {
   525  	if n == 0 {
   526  		return 0
   527  	}
   528  	return 1 + count(n-1)
   529  }
   530  
   531  func BenchmarkStackCopyNoCache(b *testing.B) {
   532  	c := make(chan bool)
   533  	for i := 0; i < b.N; i++ {
   534  		go func() {
   535  			count1(1000000)
   536  			c <- true
   537  		}()
   538  		<-c
   539  	}
   540  }
   541  
   542  func count1(n int) int {
   543  	if n <= 0 {
   544  		return 0
   545  	}
   546  	return 1 + count2(n-1)
   547  }
   548  
   549  func count2(n int) int  { return 1 + count3(n-1) }
   550  func count3(n int) int  { return 1 + count4(n-1) }
   551  func count4(n int) int  { return 1 + count5(n-1) }
   552  func count5(n int) int  { return 1 + count6(n-1) }
   553  func count6(n int) int  { return 1 + count7(n-1) }
   554  func count7(n int) int  { return 1 + count8(n-1) }
   555  func count8(n int) int  { return 1 + count9(n-1) }
   556  func count9(n int) int  { return 1 + count10(n-1) }
   557  func count10(n int) int { return 1 + count11(n-1) }
   558  func count11(n int) int { return 1 + count12(n-1) }
   559  func count12(n int) int { return 1 + count13(n-1) }
   560  func count13(n int) int { return 1 + count14(n-1) }
   561  func count14(n int) int { return 1 + count15(n-1) }
   562  func count15(n int) int { return 1 + count16(n-1) }
   563  func count16(n int) int { return 1 + count17(n-1) }
   564  func count17(n int) int { return 1 + count18(n-1) }
   565  func count18(n int) int { return 1 + count19(n-1) }
   566  func count19(n int) int { return 1 + count20(n-1) }
   567  func count20(n int) int { return 1 + count21(n-1) }
   568  func count21(n int) int { return 1 + count22(n-1) }
   569  func count22(n int) int { return 1 + count23(n-1) }
   570  func count23(n int) int { return 1 + count1(n-1) }
   571  
   572  type stkobjT struct {
   573  	p *stkobjT
   574  	x int64
   575  	y [20]int // consume some stack
   576  }
   577  
   578  // Sum creates a linked list of stkobjTs.
   579  func Sum(n int64, p *stkobjT) {
   580  	if n == 0 {
   581  		return
   582  	}
   583  	s := stkobjT{p: p, x: n}
   584  	Sum(n-1, &s)
   585  	p.x += s.x
   586  }
   587  
   588  func BenchmarkStackCopyWithStkobj(b *testing.B) {
   589  	c := make(chan bool)
   590  	for i := 0; i < b.N; i++ {
   591  		go func() {
   592  			var s stkobjT
   593  			Sum(100000, &s)
   594  			c <- true
   595  		}()
   596  		<-c
   597  	}
   598  }
   599  
   600  type structWithMethod struct{}
   601  
   602  func (s structWithMethod) caller() string {
   603  	_, file, line, ok := Caller(1)
   604  	if !ok {
   605  		panic("Caller failed")
   606  	}
   607  	return fmt.Sprintf("%s:%d", file, line)
   608  }
   609  
   610  func (s structWithMethod) callers() []uintptr {
   611  	pc := make([]uintptr, 16)
   612  	return pc[:Callers(0, pc)]
   613  }
   614  
   615  func (s structWithMethod) stack() string {
   616  	buf := make([]byte, 4<<10)
   617  	return string(buf[:Stack(buf, false)])
   618  }
   619  
   620  func (s structWithMethod) nop() {}
   621  
   622  func TestStackWrapperCaller(t *testing.T) {
   623  	var d structWithMethod
   624  	// Force the compiler to construct a wrapper method.
   625  	wrapper := (*structWithMethod).caller
   626  	// Check that the wrapper doesn't affect the stack trace.
   627  	if dc, ic := d.caller(), wrapper(&d); dc != ic {
   628  		t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
   629  	}
   630  }
   631  
   632  func TestStackWrapperCallers(t *testing.T) {
   633  	var d structWithMethod
   634  	wrapper := (*structWithMethod).callers
   635  	// Check that <autogenerated> doesn't appear in the stack trace.
   636  	pcs := wrapper(&d)
   637  	frames := CallersFrames(pcs)
   638  	for {
   639  		fr, more := frames.Next()
   640  		if fr.File == "<autogenerated>" {
   641  			t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
   642  		}
   643  		if !more {
   644  			break
   645  		}
   646  	}
   647  }
   648  
   649  func TestStackWrapperStack(t *testing.T) {
   650  	var d structWithMethod
   651  	wrapper := (*structWithMethod).stack
   652  	// Check that <autogenerated> doesn't appear in the stack trace.
   653  	stk := wrapper(&d)
   654  	if strings.Contains(stk, "<autogenerated>") {
   655  		t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
   656  	}
   657  }
   658  
   659  type I interface {
   660  	M()
   661  }
   662  
   663  func TestStackWrapperStackPanic(t *testing.T) {
   664  	t.Run("sigpanic", func(t *testing.T) {
   665  		// nil calls to interface methods cause a sigpanic.
   666  		testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
   667  	})
   668  	t.Run("panicwrap", func(t *testing.T) {
   669  		// Nil calls to value method wrappers call panicwrap.
   670  		wrapper := (*structWithMethod).nop
   671  		testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
   672  	})
   673  }
   674  
   675  func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
   676  	// Test that the stack trace from a panicking wrapper includes
   677  	// the wrapper, even though elide these when they don't panic.
   678  	t.Run("CallersFrames", func(t *testing.T) {
   679  		defer func() {
   680  			err := recover()
   681  			if err == nil {
   682  				t.Fatalf("expected panic")
   683  			}
   684  			pcs := make([]uintptr, 10)
   685  			n := Callers(0, pcs)
   686  			frames := CallersFrames(pcs[:n])
   687  			for {
   688  				frame, more := frames.Next()
   689  				t.Log(frame.Function)
   690  				if frame.Function == expect {
   691  					return
   692  				}
   693  				if !more {
   694  					break
   695  				}
   696  			}
   697  			t.Fatalf("panicking wrapper %s missing from stack trace", expect)
   698  		}()
   699  		cb()
   700  	})
   701  	t.Run("Stack", func(t *testing.T) {
   702  		defer func() {
   703  			err := recover()
   704  			if err == nil {
   705  				t.Fatalf("expected panic")
   706  			}
   707  			buf := make([]byte, 4<<10)
   708  			stk := string(buf[:Stack(buf, false)])
   709  			if !strings.Contains(stk, "\n"+expect) {
   710  				t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
   711  			}
   712  		}()
   713  		cb()
   714  	})
   715  }
   716  
   717  func TestCallersFromWrapper(t *testing.T) {
   718  	// Test that invoking CallersFrames on a stack where the first
   719  	// PC is an autogenerated wrapper keeps the wrapper in the
   720  	// trace. Normally we elide these, assuming that the wrapper
   721  	// calls the thing you actually wanted to see, but in this
   722  	// case we need to keep it.
   723  	pc := reflect.ValueOf(I.M).Pointer()
   724  	frames := CallersFrames([]uintptr{pc})
   725  	frame, more := frames.Next()
   726  	if frame.Function != "runtime_test.I.M" {
   727  		t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
   728  	}
   729  	if more {
   730  		t.Fatalf("want 1 frame, got > 1")
   731  	}
   732  }
   733  
   734  func TestTracebackSystemstack(t *testing.T) {
   735  	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
   736  		t.Skip("systemstack tail call not implemented on ppc64x")
   737  	}
   738  
   739  	// Test that profiles correctly jump over systemstack,
   740  	// including nested systemstack calls.
   741  	pcs := make([]uintptr, 20)
   742  	pcs = pcs[:TracebackSystemstack(pcs, 5)]
   743  	// Check that runtime.TracebackSystemstack appears five times
   744  	// and that we see TestTracebackSystemstack.
   745  	countIn, countOut := 0, 0
   746  	frames := CallersFrames(pcs)
   747  	var tb bytes.Buffer
   748  	for {
   749  		frame, more := frames.Next()
   750  		fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
   751  		switch frame.Function {
   752  		case "runtime.TracebackSystemstack":
   753  			countIn++
   754  		case "runtime_test.TestTracebackSystemstack":
   755  			countOut++
   756  		}
   757  		if !more {
   758  			break
   759  		}
   760  	}
   761  	if countIn != 5 || countOut != 1 {
   762  		t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
   763  	}
   764  }
   765  
   766  func TestTracebackAncestors(t *testing.T) {
   767  	goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
   768  	for _, tracebackDepth := range []int{0, 1, 5, 50} {
   769  		output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
   770  
   771  		numGoroutines := 3
   772  		numFrames := 2
   773  		ancestorsExpected := numGoroutines
   774  		if numGoroutines > tracebackDepth {
   775  			ancestorsExpected = tracebackDepth
   776  		}
   777  
   778  		matches := goroutineRegex.FindAllStringSubmatch(output, -1)
   779  		if len(matches) != 2 {
   780  			t.Fatalf("want 2 goroutines, got:\n%s", output)
   781  		}
   782  
   783  		// Check functions in the traceback.
   784  		fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
   785  		for _, fn := range fns {
   786  			if !strings.Contains(output, "\n"+fn+"(") {
   787  				t.Fatalf("expected %q function in traceback:\n%s", fn, output)
   788  			}
   789  		}
   790  
   791  		if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
   792  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   793  		}
   794  
   795  		if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
   796  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   797  		}
   798  
   799  		if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
   800  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   801  		}
   802  	}
   803  }
   804  
   805  // Test that defer closure is correctly scanned when the stack is scanned.
   806  func TestDeferLiveness(t *testing.T) {
   807  	output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
   808  	if output != "" {
   809  		t.Errorf("output:\n%s\n\nwant no output", output)
   810  	}
   811  }
   812  
   813  func TestDeferHeapAndStack(t *testing.T) {
   814  	P := 4     // processors
   815  	N := 10000 //iterations
   816  	D := 200   // stack depth
   817  
   818  	if testing.Short() {
   819  		P /= 2
   820  		N /= 10
   821  		D /= 10
   822  	}
   823  	c := make(chan bool)
   824  	for p := 0; p < P; p++ {
   825  		go func() {
   826  			for i := 0; i < N; i++ {
   827  				if deferHeapAndStack(D) != 2*D {
   828  					panic("bad result")
   829  				}
   830  			}
   831  			c <- true
   832  		}()
   833  	}
   834  	for p := 0; p < P; p++ {
   835  		<-c
   836  	}
   837  }
   838  
   839  // deferHeapAndStack(n) computes 2*n
   840  func deferHeapAndStack(n int) (r int) {
   841  	if n == 0 {
   842  		return 0
   843  	}
   844  	if n%2 == 0 {
   845  		// heap-allocated defers
   846  		for i := 0; i < 2; i++ {
   847  			defer func() {
   848  				r++
   849  			}()
   850  		}
   851  	} else {
   852  		// stack-allocated defers
   853  		defer func() {
   854  			r++
   855  		}()
   856  		defer func() {
   857  			r++
   858  		}()
   859  	}
   860  	r = deferHeapAndStack(n - 1)
   861  	escapeMe(new([1024]byte)) // force some GCs
   862  	return
   863  }
   864  
   865  // Pass a value to escapeMe to force it to escape.
   866  var escapeMe = func(x any) {}
   867  
   868  // Test that when F -> G is inlined and F is excluded from stack
   869  // traces, G still appears.
   870  func TestTracebackInlineExcluded(t *testing.T) {
   871  	defer func() {
   872  		recover()
   873  		buf := make([]byte, 4<<10)
   874  		stk := string(buf[:Stack(buf, false)])
   875  
   876  		t.Log(stk)
   877  
   878  		if not := "tracebackExcluded"; strings.Contains(stk, not) {
   879  			t.Errorf("found but did not expect %q", not)
   880  		}
   881  		if want := "tracebackNotExcluded"; !strings.Contains(stk, want) {
   882  			t.Errorf("expected %q in stack", want)
   883  		}
   884  	}()
   885  	tracebackExcluded()
   886  }
   887  
   888  // tracebackExcluded should be excluded from tracebacks. There are
   889  // various ways this could come up. Linking it to a "runtime." name is
   890  // rather synthetic, but it's easy and reliable. See issue #42754 for
   891  // one way this happened in real code.
   892  //
   893  //go:linkname tracebackExcluded runtime.tracebackExcluded
   894  //go:noinline
   895  func tracebackExcluded() {
   896  	// Call an inlined function that should not itself be excluded
   897  	// from tracebacks.
   898  	tracebackNotExcluded()
   899  }
   900  
   901  // tracebackNotExcluded should be inlined into tracebackExcluded, but
   902  // should not itself be excluded from the traceback.
   903  func tracebackNotExcluded() {
   904  	var x *int
   905  	*x = 0
   906  }
   907  

View as plain text