Source file src/runtime/pprof/pprof_test.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !js
     6  
     7  package pprof
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"internal/abi"
    14  	"internal/profile"
    15  	"internal/testenv"
    16  	"io"
    17  	"math"
    18  	"math/big"
    19  	"os"
    20  	"os/exec"
    21  	"regexp"
    22  	"runtime"
    23  	"runtime/debug"
    24  	"strings"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  	_ "unsafe"
    30  )
    31  
    32  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
    33  	// We only need to get one 100 Hz clock tick, so we've got
    34  	// a large safety buffer.
    35  	// But do at least 500 iterations (which should take about 100ms),
    36  	// otherwise TestCPUProfileMultithreaded can fail if only one
    37  	// thread is scheduled during the testing period.
    38  	t0 := time.Now()
    39  	accum := *y
    40  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
    41  		accum = f(accum)
    42  	}
    43  	*y = accum
    44  }
    45  
    46  var (
    47  	salt1 = 0
    48  	salt2 = 0
    49  )
    50  
    51  // The actual CPU hogging function.
    52  // Must not call other functions nor access heap/globals in the loop,
    53  // otherwise under race detector the samples will be in the race runtime.
    54  func cpuHog1(x int) int {
    55  	return cpuHog0(x, 1e5)
    56  }
    57  
    58  func cpuHog0(x, n int) int {
    59  	foo := x
    60  	for i := 0; i < n; i++ {
    61  		if foo > 0 {
    62  			foo *= foo
    63  		} else {
    64  			foo *= foo + 1
    65  		}
    66  	}
    67  	return foo
    68  }
    69  
    70  func cpuHog2(x int) int {
    71  	foo := x
    72  	for i := 0; i < 1e5; i++ {
    73  		if foo > 0 {
    74  			foo *= foo
    75  		} else {
    76  			foo *= foo + 2
    77  		}
    78  	}
    79  	return foo
    80  }
    81  
    82  // Return a list of functions that we don't want to ever appear in CPU
    83  // profiles. For gccgo, that list includes the sigprof handler itself.
    84  func avoidFunctions() []string {
    85  	if runtime.Compiler == "gccgo" {
    86  		return []string{"runtime.sigprof"}
    87  	}
    88  	return nil
    89  }
    90  
    91  func TestCPUProfile(t *testing.T) {
    92  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
    93  	testCPUProfile(t, matches, func(dur time.Duration) {
    94  		cpuHogger(cpuHog1, &salt1, dur)
    95  	})
    96  }
    97  
    98  func TestCPUProfileMultithreaded(t *testing.T) {
    99  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   100  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions())
   101  	testCPUProfile(t, matches, func(dur time.Duration) {
   102  		c := make(chan int)
   103  		go func() {
   104  			cpuHogger(cpuHog1, &salt1, dur)
   105  			c <- 1
   106  		}()
   107  		cpuHogger(cpuHog2, &salt2, dur)
   108  		<-c
   109  	})
   110  }
   111  
   112  func TestCPUProfileMultithreadMagnitude(t *testing.T) {
   113  	if runtime.GOOS != "linux" {
   114  		t.Skip("issue 35057 is only confirmed on Linux")
   115  	}
   116  
   117  	// Linux [5.9,5.16) has a kernel bug that can break CPU timers on newly
   118  	// created threads, breaking our CPU accounting.
   119  	major, minor, patch, err := linuxKernelVersion()
   120  	if err != nil {
   121  		t.Errorf("Error determining kernel version: %v", err)
   122  	}
   123  	t.Logf("Running on Linux %d.%d.%d", major, minor, patch)
   124  	defer func() {
   125  		if t.Failed() {
   126  			t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
   127  		}
   128  	}()
   129  
   130  	// Disable on affected builders to avoid flakiness, but otherwise keep
   131  	// it enabled to potentially warn users that they are on a broken
   132  	// kernel.
   133  	if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
   134  		have59 := major > 5 || (major == 5 && minor >= 9)
   135  		have516 := major > 5 || (major == 5 && minor >= 16)
   136  		if have59 && !have516 {
   137  			testenv.SkipFlaky(t, 49065)
   138  		}
   139  	}
   140  
   141  	// Run a workload in a single goroutine, then run copies of the same
   142  	// workload in several goroutines. For both the serial and parallel cases,
   143  	// the CPU time the process measures with its own profiler should match the
   144  	// total CPU usage that the OS reports.
   145  	//
   146  	// We could also check that increases in parallelism (GOMAXPROCS) lead to a
   147  	// linear increase in the CPU usage reported by both the OS and the
   148  	// profiler, but without a guarantee of exclusive access to CPU resources
   149  	// that is likely to be a flaky test.
   150  
   151  	// Require the smaller value to be within 10%, or 40% in short mode.
   152  	maxDiff := 0.10
   153  	if testing.Short() {
   154  		maxDiff = 0.40
   155  	}
   156  
   157  	compare := func(a, b time.Duration, maxDiff float64) error {
   158  		if a <= 0 || b <= 0 {
   159  			return fmt.Errorf("Expected both time reports to be positive")
   160  		}
   161  
   162  		if a < b {
   163  			a, b = b, a
   164  		}
   165  
   166  		diff := float64(a-b) / float64(a)
   167  		if diff > maxDiff {
   168  			return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100)
   169  		}
   170  
   171  		return nil
   172  	}
   173  
   174  	for _, tc := range []struct {
   175  		name    string
   176  		workers int
   177  	}{
   178  		{
   179  			name:    "serial",
   180  			workers: 1,
   181  		},
   182  		{
   183  			name:    "parallel",
   184  			workers: runtime.GOMAXPROCS(0),
   185  		},
   186  	} {
   187  		// check that the OS's perspective matches what the Go runtime measures.
   188  		t.Run(tc.name, func(t *testing.T) {
   189  			t.Logf("Running with %d workers", tc.workers)
   190  
   191  			var cpuTime time.Duration
   192  			matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
   193  			p := testCPUProfile(t, matches, func(dur time.Duration) {
   194  				cpuTime = diffCPUTime(t, func() {
   195  					var wg sync.WaitGroup
   196  					var once sync.Once
   197  					for i := 0; i < tc.workers; i++ {
   198  						wg.Add(1)
   199  						go func() {
   200  							defer wg.Done()
   201  							var salt = 0
   202  							cpuHogger(cpuHog1, &salt, dur)
   203  							once.Do(func() { salt1 = salt })
   204  						}()
   205  					}
   206  					wg.Wait()
   207  				})
   208  			})
   209  
   210  			for i, unit := range []string{"count", "nanoseconds"} {
   211  				if have, want := p.SampleType[i].Unit, unit; have != want {
   212  					t.Errorf("pN SampleType[%d]; %q != %q", i, have, want)
   213  				}
   214  			}
   215  
   216  			// cpuHog1 called above is the primary source of CPU
   217  			// load, but there may be some background work by the
   218  			// runtime. Since the OS rusage measurement will
   219  			// include all work done by the process, also compare
   220  			// against all samples in our profile.
   221  			var value time.Duration
   222  			for _, sample := range p.Sample {
   223  				value += time.Duration(sample.Value[1]) * time.Nanosecond
   224  			}
   225  
   226  			t.Logf("compare %s vs %s", cpuTime, value)
   227  			if err := compare(cpuTime, value, maxDiff); err != nil {
   228  				t.Errorf("compare got %v want nil", err)
   229  			}
   230  		})
   231  	}
   232  }
   233  
   234  // containsInlinedCall reports whether the function body for the function f is
   235  // known to contain an inlined function call within the first maxBytes bytes.
   236  func containsInlinedCall(f any, maxBytes int) bool {
   237  	_, found := findInlinedCall(f, maxBytes)
   238  	return found
   239  }
   240  
   241  // findInlinedCall returns the PC of an inlined function call within
   242  // the function body for the function f if any.
   243  func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {
   244  	fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
   245  	if fFunc == nil || fFunc.Entry() == 0 {
   246  		panic("failed to locate function entry")
   247  	}
   248  
   249  	for offset := 0; offset < maxBytes; offset++ {
   250  		innerPC := fFunc.Entry() + uintptr(offset)
   251  		inner := runtime.FuncForPC(innerPC)
   252  		if inner == nil {
   253  			// No function known for this PC value.
   254  			// It might simply be misaligned, so keep searching.
   255  			continue
   256  		}
   257  		if inner.Entry() != fFunc.Entry() {
   258  			// Scanned past f and didn't find any inlined functions.
   259  			break
   260  		}
   261  		if inner.Name() != fFunc.Name() {
   262  			// This PC has f as its entry-point, but is not f. Therefore, it must be a
   263  			// function inlined into f.
   264  			return uint64(innerPC), true
   265  		}
   266  	}
   267  
   268  	return 0, false
   269  }
   270  
   271  func TestCPUProfileInlining(t *testing.T) {
   272  	if !containsInlinedCall(inlinedCaller, 4<<10) {
   273  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
   274  	}
   275  
   276  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions())
   277  	p := testCPUProfile(t, matches, func(dur time.Duration) {
   278  		cpuHogger(inlinedCaller, &salt1, dur)
   279  	})
   280  
   281  	// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
   282  	for _, loc := range p.Location {
   283  		hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
   284  		for _, line := range loc.Line {
   285  			if line.Function.Name == "runtime/pprof.inlinedCallee" {
   286  				hasInlinedCallee = true
   287  			}
   288  			if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
   289  				hasInlinedCallerAfterInlinedCallee = true
   290  			}
   291  		}
   292  		if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
   293  			t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
   294  		}
   295  	}
   296  }
   297  
   298  func inlinedCaller(x int) int {
   299  	x = inlinedCallee(x, 1e5)
   300  	return x
   301  }
   302  
   303  func inlinedCallee(x, n int) int {
   304  	return cpuHog0(x, n)
   305  }
   306  
   307  //go:noinline
   308  func dumpCallers(pcs []uintptr) {
   309  	if pcs == nil {
   310  		return
   311  	}
   312  
   313  	skip := 2 // Callers and dumpCallers
   314  	runtime.Callers(skip, pcs)
   315  }
   316  
   317  //go:noinline
   318  func inlinedCallerDump(pcs []uintptr) {
   319  	inlinedCalleeDump(pcs)
   320  }
   321  
   322  func inlinedCalleeDump(pcs []uintptr) {
   323  	dumpCallers(pcs)
   324  }
   325  
   326  func TestCPUProfileRecursion(t *testing.T) {
   327  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions())
   328  	p := testCPUProfile(t, matches, func(dur time.Duration) {
   329  		cpuHogger(recursionCaller, &salt1, dur)
   330  	})
   331  
   332  	// check the Location encoding was not confused by recursive calls.
   333  	for i, loc := range p.Location {
   334  		recursionFunc := 0
   335  		for _, line := range loc.Line {
   336  			if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
   337  				recursionFunc++
   338  			}
   339  		}
   340  		if recursionFunc > 1 {
   341  			t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
   342  		}
   343  	}
   344  }
   345  
   346  func recursionCaller(x int) int {
   347  	y := recursionCallee(3, x)
   348  	return y
   349  }
   350  
   351  func recursionCallee(n, x int) int {
   352  	if n == 0 {
   353  		return 1
   354  	}
   355  	y := inlinedCallee(x, 1e4)
   356  	return y * recursionCallee(n-1, x)
   357  }
   358  
   359  func recursionChainTop(x int, pcs []uintptr) {
   360  	if x < 0 {
   361  		return
   362  	}
   363  	recursionChainMiddle(x, pcs)
   364  }
   365  
   366  func recursionChainMiddle(x int, pcs []uintptr) {
   367  	recursionChainBottom(x, pcs)
   368  }
   369  
   370  func recursionChainBottom(x int, pcs []uintptr) {
   371  	// This will be called each time, we only care about the last. We
   372  	// can't make this conditional or this function won't be inlined.
   373  	dumpCallers(pcs)
   374  
   375  	recursionChainTop(x-1, pcs)
   376  }
   377  
   378  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
   379  	p, err := profile.Parse(bytes.NewReader(valBytes))
   380  	if err != nil {
   381  		t.Fatal(err)
   382  	}
   383  	for _, sample := range p.Sample {
   384  		count := uintptr(sample.Value[0])
   385  		f(count, sample.Location, sample.Label)
   386  	}
   387  	return p
   388  }
   389  
   390  func cpuProfilingBroken() bool {
   391  	switch runtime.GOOS {
   392  	case "plan9":
   393  		// Profiling unimplemented.
   394  		return true
   395  	case "aix":
   396  		// See https://golang.org/issue/45170.
   397  		return true
   398  	case "ios", "dragonfly", "netbsd", "illumos", "solaris":
   399  		// See https://golang.org/issue/13841.
   400  		return true
   401  	case "openbsd":
   402  		if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
   403  			// See https://golang.org/issue/13841.
   404  			return true
   405  		}
   406  	}
   407  
   408  	return false
   409  }
   410  
   411  // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
   412  // as interpreted by matches, and returns the parsed profile.
   413  func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
   414  	switch runtime.GOOS {
   415  	case "darwin":
   416  		out, err := exec.Command("uname", "-a").CombinedOutput()
   417  		if err != nil {
   418  			t.Fatal(err)
   419  		}
   420  		vers := string(out)
   421  		t.Logf("uname -a: %v", vers)
   422  	case "plan9":
   423  		t.Skip("skipping on plan9")
   424  	}
   425  
   426  	broken := cpuProfilingBroken()
   427  
   428  	maxDuration := 5 * time.Second
   429  	if testing.Short() && broken {
   430  		// If it's expected to be broken, no point waiting around.
   431  		maxDuration /= 10
   432  	}
   433  
   434  	// If we're running a long test, start with a long duration
   435  	// for tests that try to make sure something *doesn't* happen.
   436  	duration := 5 * time.Second
   437  	if testing.Short() {
   438  		duration = 100 * time.Millisecond
   439  	}
   440  
   441  	// Profiling tests are inherently flaky, especially on a
   442  	// loaded system, such as when this test is running with
   443  	// several others under go test std. If a test fails in a way
   444  	// that could mean it just didn't run long enough, try with a
   445  	// longer duration.
   446  	for duration <= maxDuration {
   447  		var prof bytes.Buffer
   448  		if err := StartCPUProfile(&prof); err != nil {
   449  			t.Fatal(err)
   450  		}
   451  		f(duration)
   452  		StopCPUProfile()
   453  
   454  		if p, ok := profileOk(t, matches, prof, duration); ok {
   455  			return p
   456  		}
   457  
   458  		duration *= 2
   459  		if duration <= maxDuration {
   460  			t.Logf("retrying with %s duration", duration)
   461  		}
   462  	}
   463  
   464  	if broken {
   465  		t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
   466  	}
   467  
   468  	// Ignore the failure if the tests are running in a QEMU-based emulator,
   469  	// QEMU is not perfect at emulating everything.
   470  	// IN_QEMU environmental variable is set by some of the Go builders.
   471  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
   472  	if os.Getenv("IN_QEMU") == "1" {
   473  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
   474  	}
   475  	t.FailNow()
   476  	return nil
   477  }
   478  
   479  var diffCPUTimeImpl func(f func()) time.Duration
   480  
   481  func diffCPUTime(t *testing.T, f func()) time.Duration {
   482  	if fn := diffCPUTimeImpl; fn != nil {
   483  		return fn(f)
   484  	}
   485  	t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH)
   486  	return 0
   487  }
   488  
   489  func contains(slice []string, s string) bool {
   490  	for i := range slice {
   491  		if slice[i] == s {
   492  			return true
   493  		}
   494  	}
   495  	return false
   496  }
   497  
   498  // stackContains matches if a function named spec appears anywhere in the stack trace.
   499  func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   500  	for _, loc := range stk {
   501  		for _, line := range loc.Line {
   502  			if strings.Contains(line.Function.Name, spec) {
   503  				return true
   504  			}
   505  		}
   506  	}
   507  	return false
   508  }
   509  
   510  type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
   511  
   512  func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
   513  	ok = true
   514  
   515  	var samples uintptr
   516  	var buf bytes.Buffer
   517  	p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
   518  		fmt.Fprintf(&buf, "%d:", count)
   519  		fprintStack(&buf, stk)
   520  		fmt.Fprintf(&buf, " labels: %v\n", labels)
   521  		samples += count
   522  		fmt.Fprintf(&buf, "\n")
   523  	})
   524  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
   525  
   526  	if samples < 10 && runtime.GOOS == "windows" {
   527  		// On some windows machines we end up with
   528  		// not enough samples due to coarse timer
   529  		// resolution. Let it go.
   530  		t.Log("too few samples on Windows (golang.org/issue/10842)")
   531  		return p, false
   532  	}
   533  
   534  	// Check that we got a reasonable number of samples.
   535  	// We used to always require at least ideal/4 samples,
   536  	// but that is too hard to guarantee on a loaded system.
   537  	// Now we accept 10 or more samples, which we take to be
   538  	// enough to show that at least some profiling is occurring.
   539  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
   540  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
   541  		ok = false
   542  	}
   543  
   544  	if matches != nil && !matches(t, p) {
   545  		ok = false
   546  	}
   547  
   548  	return p, ok
   549  }
   550  
   551  type profileMatchFunc func(*testing.T, *profile.Profile) bool
   552  
   553  func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {
   554  	return func(t *testing.T, p *profile.Profile) (ok bool) {
   555  		ok = true
   556  
   557  		// Check that profile is well formed, contains 'need', and does not contain
   558  		// anything from 'avoid'.
   559  		have := make([]uintptr, len(need))
   560  		avoidSamples := make([]uintptr, len(avoid))
   561  
   562  		for _, sample := range p.Sample {
   563  			count := uintptr(sample.Value[0])
   564  			for i, spec := range need {
   565  				if matches(spec, count, sample.Location, sample.Label) {
   566  					have[i] += count
   567  				}
   568  			}
   569  			for i, name := range avoid {
   570  				for _, loc := range sample.Location {
   571  					for _, line := range loc.Line {
   572  						if strings.Contains(line.Function.Name, name) {
   573  							avoidSamples[i] += count
   574  						}
   575  					}
   576  				}
   577  			}
   578  		}
   579  
   580  		for i, name := range avoid {
   581  			bad := avoidSamples[i]
   582  			if bad != 0 {
   583  				t.Logf("found %d samples in avoid-function %s\n", bad, name)
   584  				ok = false
   585  			}
   586  		}
   587  
   588  		if len(need) == 0 {
   589  			return
   590  		}
   591  
   592  		var total uintptr
   593  		for i, name := range need {
   594  			total += have[i]
   595  			t.Logf("%s: %d\n", name, have[i])
   596  		}
   597  		if total == 0 {
   598  			t.Logf("no samples in expected functions")
   599  			ok = false
   600  		}
   601  
   602  		// We'd like to check a reasonable minimum, like
   603  		// total / len(have) / smallconstant, but this test is
   604  		// pretty flaky (see bug 7095).  So we'll just test to
   605  		// make sure we got at least one sample.
   606  		min := uintptr(1)
   607  		for i, name := range need {
   608  			if have[i] < min {
   609  				t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
   610  				ok = false
   611  			}
   612  		}
   613  		return
   614  	}
   615  }
   616  
   617  // Fork can hang if preempted with signals frequently enough (see issue 5517).
   618  // Ensure that we do not do this.
   619  func TestCPUProfileWithFork(t *testing.T) {
   620  	testenv.MustHaveExec(t)
   621  
   622  	heap := 1 << 30
   623  	if runtime.GOOS == "android" {
   624  		// Use smaller size for Android to avoid crash.
   625  		heap = 100 << 20
   626  	}
   627  	if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
   628  		// Use smaller heap for Windows/ARM to avoid crash.
   629  		heap = 100 << 20
   630  	}
   631  	if testing.Short() {
   632  		heap = 100 << 20
   633  	}
   634  	// This makes fork slower.
   635  	garbage := make([]byte, heap)
   636  	// Need to touch the slice, otherwise it won't be paged in.
   637  	done := make(chan bool)
   638  	go func() {
   639  		for i := range garbage {
   640  			garbage[i] = 42
   641  		}
   642  		done <- true
   643  	}()
   644  	<-done
   645  
   646  	var prof bytes.Buffer
   647  	if err := StartCPUProfile(&prof); err != nil {
   648  		t.Fatal(err)
   649  	}
   650  	defer StopCPUProfile()
   651  
   652  	for i := 0; i < 10; i++ {
   653  		exec.Command(os.Args[0], "-h").CombinedOutput()
   654  	}
   655  }
   656  
   657  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
   658  // If it did, it would see inconsistent state and would either record an incorrect stack
   659  // or crash because the stack was malformed.
   660  func TestGoroutineSwitch(t *testing.T) {
   661  	if runtime.Compiler == "gccgo" {
   662  		t.Skip("not applicable for gccgo")
   663  	}
   664  	// How much to try. These defaults take about 1 seconds
   665  	// on a 2012 MacBook Pro. The ones in short mode take
   666  	// about 0.1 seconds.
   667  	tries := 10
   668  	count := 1000000
   669  	if testing.Short() {
   670  		tries = 1
   671  	}
   672  	for try := 0; try < tries; try++ {
   673  		var prof bytes.Buffer
   674  		if err := StartCPUProfile(&prof); err != nil {
   675  			t.Fatal(err)
   676  		}
   677  		for i := 0; i < count; i++ {
   678  			runtime.Gosched()
   679  		}
   680  		StopCPUProfile()
   681  
   682  		// Read profile to look for entries for gogo with an attempt at a traceback.
   683  		// "runtime.gogo" is OK, because that's the part of the context switch
   684  		// before the actual switch begins. But we should not see "gogo",
   685  		// aka "gogo<>(SB)", which does the actual switch and is marked SPWRITE.
   686  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
   687  			// An entry with two frames with 'System' in its top frame
   688  			// exists to record a PC without a traceback. Those are okay.
   689  			if len(stk) == 2 {
   690  				name := stk[1].Line[0].Function.Name
   691  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
   692  					return
   693  				}
   694  			}
   695  
   696  			// An entry with just one frame is OK too:
   697  			// it knew to stop at gogo.
   698  			if len(stk) == 1 {
   699  				return
   700  			}
   701  
   702  			// Otherwise, should not see gogo.
   703  			// The place we'd see it would be the inner most frame.
   704  			name := stk[0].Line[0].Function.Name
   705  			if name == "gogo" {
   706  				var buf bytes.Buffer
   707  				fprintStack(&buf, stk)
   708  				t.Fatalf("found profile entry for gogo:\n%s", buf.String())
   709  			}
   710  		})
   711  	}
   712  }
   713  
   714  func fprintStack(w io.Writer, stk []*profile.Location) {
   715  	for _, loc := range stk {
   716  		fmt.Fprintf(w, " %#x", loc.Address)
   717  		fmt.Fprintf(w, " (")
   718  		for i, line := range loc.Line {
   719  			if i > 0 {
   720  				fmt.Fprintf(w, " ")
   721  			}
   722  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
   723  		}
   724  		fmt.Fprintf(w, ")")
   725  	}
   726  }
   727  
   728  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
   729  func TestMathBigDivide(t *testing.T) {
   730  	testCPUProfile(t, nil, func(duration time.Duration) {
   731  		t := time.After(duration)
   732  		pi := new(big.Int)
   733  		for {
   734  			for i := 0; i < 100; i++ {
   735  				n := big.NewInt(2646693125139304345)
   736  				d := big.NewInt(842468587426513207)
   737  				pi.Div(n, d)
   738  			}
   739  			select {
   740  			case <-t:
   741  				return
   742  			default:
   743  			}
   744  		}
   745  	})
   746  }
   747  
   748  // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
   749  func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   750  	for _, f := range strings.Split(spec, ",") {
   751  		if !stackContains(f, count, stk, labels) {
   752  			return false
   753  		}
   754  	}
   755  	return true
   756  }
   757  
   758  func TestMorestack(t *testing.T) {
   759  	matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions())
   760  	testCPUProfile(t, matches, func(duration time.Duration) {
   761  		t := time.After(duration)
   762  		c := make(chan bool)
   763  		for {
   764  			go func() {
   765  				growstack1()
   766  				c <- true
   767  			}()
   768  			select {
   769  			case <-t:
   770  				return
   771  			case <-c:
   772  			}
   773  		}
   774  	})
   775  }
   776  
   777  //go:noinline
   778  func growstack1() {
   779  	growstack(10)
   780  }
   781  
   782  //go:noinline
   783  func growstack(n int) {
   784  	var buf [8 << 18]byte
   785  	use(buf)
   786  	if n > 0 {
   787  		growstack(n - 1)
   788  	}
   789  }
   790  
   791  //go:noinline
   792  func use(x [8 << 18]byte) {}
   793  
   794  func TestBlockProfile(t *testing.T) {
   795  	type TestCase struct {
   796  		name string
   797  		f    func(*testing.T)
   798  		stk  []string
   799  		re   string
   800  	}
   801  	tests := [...]TestCase{
   802  		{
   803  			name: "chan recv",
   804  			f:    blockChanRecv,
   805  			stk: []string{
   806  				"runtime.chanrecv1",
   807  				"runtime/pprof.blockChanRecv",
   808  				"runtime/pprof.TestBlockProfile",
   809  			},
   810  			re: `
   811  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   812  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   813  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   814  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   815  `},
   816  		{
   817  			name: "chan send",
   818  			f:    blockChanSend,
   819  			stk: []string{
   820  				"runtime.chansend1",
   821  				"runtime/pprof.blockChanSend",
   822  				"runtime/pprof.TestBlockProfile",
   823  			},
   824  			re: `
   825  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   826  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   827  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   828  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   829  `},
   830  		{
   831  			name: "chan close",
   832  			f:    blockChanClose,
   833  			stk: []string{
   834  				"runtime.chanrecv1",
   835  				"runtime/pprof.blockChanClose",
   836  				"runtime/pprof.TestBlockProfile",
   837  			},
   838  			re: `
   839  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   840  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   841  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   842  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   843  `},
   844  		{
   845  			name: "select recv async",
   846  			f:    blockSelectRecvAsync,
   847  			stk: []string{
   848  				"runtime.selectgo",
   849  				"runtime/pprof.blockSelectRecvAsync",
   850  				"runtime/pprof.TestBlockProfile",
   851  			},
   852  			re: `
   853  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   854  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   855  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   856  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   857  `},
   858  		{
   859  			name: "select send sync",
   860  			f:    blockSelectSendSync,
   861  			stk: []string{
   862  				"runtime.selectgo",
   863  				"runtime/pprof.blockSelectSendSync",
   864  				"runtime/pprof.TestBlockProfile",
   865  			},
   866  			re: `
   867  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   868  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   869  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   870  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   871  `},
   872  		{
   873  			name: "mutex",
   874  			f:    blockMutex,
   875  			stk: []string{
   876  				"sync.(*Mutex).Lock",
   877  				"runtime/pprof.blockMutex",
   878  				"runtime/pprof.TestBlockProfile",
   879  			},
   880  			re: `
   881  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   882  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*/src/sync/mutex\.go:[0-9]+
   883  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   884  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   885  `},
   886  		{
   887  			name: "cond",
   888  			f:    blockCond,
   889  			stk: []string{
   890  				"sync.(*Cond).Wait",
   891  				"runtime/pprof.blockCond",
   892  				"runtime/pprof.TestBlockProfile",
   893  			},
   894  			re: `
   895  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   896  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*/src/sync/cond\.go:[0-9]+
   897  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   898  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   899  `},
   900  	}
   901  
   902  	// Generate block profile
   903  	runtime.SetBlockProfileRate(1)
   904  	defer runtime.SetBlockProfileRate(0)
   905  	for _, test := range tests {
   906  		test.f(t)
   907  	}
   908  
   909  	t.Run("debug=1", func(t *testing.T) {
   910  		var w bytes.Buffer
   911  		Lookup("block").WriteTo(&w, 1)
   912  		prof := w.String()
   913  
   914  		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
   915  			t.Fatalf("Bad profile header:\n%v", prof)
   916  		}
   917  
   918  		if strings.HasSuffix(prof, "#\t0x0\n\n") {
   919  			t.Errorf("Useless 0 suffix:\n%v", prof)
   920  		}
   921  
   922  		for _, test := range tests {
   923  			if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
   924  				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
   925  			}
   926  		}
   927  	})
   928  
   929  	t.Run("proto", func(t *testing.T) {
   930  		// proto format
   931  		var w bytes.Buffer
   932  		Lookup("block").WriteTo(&w, 0)
   933  		p, err := profile.Parse(&w)
   934  		if err != nil {
   935  			t.Fatalf("failed to parse profile: %v", err)
   936  		}
   937  		t.Logf("parsed proto: %s", p)
   938  		if err := p.CheckValid(); err != nil {
   939  			t.Fatalf("invalid profile: %v", err)
   940  		}
   941  
   942  		stks := stacks(p)
   943  		for _, test := range tests {
   944  			if !containsStack(stks, test.stk) {
   945  				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
   946  			}
   947  		}
   948  	})
   949  
   950  }
   951  
   952  func stacks(p *profile.Profile) (res [][]string) {
   953  	for _, s := range p.Sample {
   954  		var stk []string
   955  		for _, l := range s.Location {
   956  			for _, line := range l.Line {
   957  				stk = append(stk, line.Function.Name)
   958  			}
   959  		}
   960  		res = append(res, stk)
   961  	}
   962  	return res
   963  }
   964  
   965  func containsStack(got [][]string, want []string) bool {
   966  	for _, stk := range got {
   967  		if len(stk) < len(want) {
   968  			continue
   969  		}
   970  		for i, f := range want {
   971  			if f != stk[i] {
   972  				break
   973  			}
   974  			if i == len(want)-1 {
   975  				return true
   976  			}
   977  		}
   978  	}
   979  	return false
   980  }
   981  
   982  // awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
   983  // shows a goroutine in the given state with a stack frame in
   984  // runtime/pprof.<fName>.
   985  func awaitBlockedGoroutine(t *testing.T, state, fName string) {
   986  	re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
   987  	r := regexp.MustCompile(re)
   988  
   989  	if deadline, ok := t.Deadline(); ok {
   990  		if d := time.Until(deadline); d > 1*time.Second {
   991  			timer := time.AfterFunc(d-1*time.Second, func() {
   992  				debug.SetTraceback("all")
   993  				panic(fmt.Sprintf("timed out waiting for %#q", re))
   994  			})
   995  			defer timer.Stop()
   996  		}
   997  	}
   998  
   999  	buf := make([]byte, 64<<10)
  1000  	for {
  1001  		runtime.Gosched()
  1002  		n := runtime.Stack(buf, true)
  1003  		if n == len(buf) {
  1004  			// Buffer wasn't large enough for a full goroutine dump.
  1005  			// Resize it and try again.
  1006  			buf = make([]byte, 2*len(buf))
  1007  			continue
  1008  		}
  1009  		if r.Match(buf[:n]) {
  1010  			return
  1011  		}
  1012  	}
  1013  }
  1014  
  1015  func blockChanRecv(t *testing.T) {
  1016  	c := make(chan bool)
  1017  	go func() {
  1018  		awaitBlockedGoroutine(t, "chan receive", "blockChanRecv")
  1019  		c <- true
  1020  	}()
  1021  	<-c
  1022  }
  1023  
  1024  func blockChanSend(t *testing.T) {
  1025  	c := make(chan bool)
  1026  	go func() {
  1027  		awaitBlockedGoroutine(t, "chan send", "blockChanSend")
  1028  		<-c
  1029  	}()
  1030  	c <- true
  1031  }
  1032  
  1033  func blockChanClose(t *testing.T) {
  1034  	c := make(chan bool)
  1035  	go func() {
  1036  		awaitBlockedGoroutine(t, "chan receive", "blockChanClose")
  1037  		close(c)
  1038  	}()
  1039  	<-c
  1040  }
  1041  
  1042  func blockSelectRecvAsync(t *testing.T) {
  1043  	const numTries = 3
  1044  	c := make(chan bool, 1)
  1045  	c2 := make(chan bool, 1)
  1046  	go func() {
  1047  		for i := 0; i < numTries; i++ {
  1048  			awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync")
  1049  			c <- true
  1050  		}
  1051  	}()
  1052  	for i := 0; i < numTries; i++ {
  1053  		select {
  1054  		case <-c:
  1055  		case <-c2:
  1056  		}
  1057  	}
  1058  }
  1059  
  1060  func blockSelectSendSync(t *testing.T) {
  1061  	c := make(chan bool)
  1062  	c2 := make(chan bool)
  1063  	go func() {
  1064  		awaitBlockedGoroutine(t, "select", "blockSelectSendSync")
  1065  		<-c
  1066  	}()
  1067  	select {
  1068  	case c <- true:
  1069  	case c2 <- true:
  1070  	}
  1071  }
  1072  
  1073  func blockMutex(t *testing.T) {
  1074  	var mu sync.Mutex
  1075  	mu.Lock()
  1076  	go func() {
  1077  		awaitBlockedGoroutine(t, "semacquire", "blockMutex")
  1078  		mu.Unlock()
  1079  	}()
  1080  	// Note: Unlock releases mu before recording the mutex event,
  1081  	// so it's theoretically possible for this to proceed and
  1082  	// capture the profile before the event is recorded. As long
  1083  	// as this is blocked before the unlock happens, it's okay.
  1084  	mu.Lock()
  1085  }
  1086  
  1087  func blockCond(t *testing.T) {
  1088  	var mu sync.Mutex
  1089  	c := sync.NewCond(&mu)
  1090  	mu.Lock()
  1091  	go func() {
  1092  		awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond")
  1093  		mu.Lock()
  1094  		c.Signal()
  1095  		mu.Unlock()
  1096  	}()
  1097  	c.Wait()
  1098  	mu.Unlock()
  1099  }
  1100  
  1101  // See http://golang.org/cl/299991.
  1102  func TestBlockProfileBias(t *testing.T) {
  1103  	rate := int(1000) // arbitrary value
  1104  	runtime.SetBlockProfileRate(rate)
  1105  	defer runtime.SetBlockProfileRate(0)
  1106  
  1107  	// simulate blocking events
  1108  	blockFrequentShort(rate)
  1109  	blockInfrequentLong(rate)
  1110  
  1111  	var w bytes.Buffer
  1112  	Lookup("block").WriteTo(&w, 0)
  1113  	p, err := profile.Parse(&w)
  1114  	if err != nil {
  1115  		t.Fatalf("failed to parse profile: %v", err)
  1116  	}
  1117  	t.Logf("parsed proto: %s", p)
  1118  
  1119  	il := float64(-1) // blockInfrequentLong duration
  1120  	fs := float64(-1) // blockFrequentShort duration
  1121  	for _, s := range p.Sample {
  1122  		for _, l := range s.Location {
  1123  			for _, line := range l.Line {
  1124  				if len(s.Value) < 2 {
  1125  					t.Fatal("block profile has less than 2 sample types")
  1126  				}
  1127  
  1128  				if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
  1129  					il = float64(s.Value[1])
  1130  				} else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
  1131  					fs = float64(s.Value[1])
  1132  				}
  1133  			}
  1134  		}
  1135  	}
  1136  	if il == -1 || fs == -1 {
  1137  		t.Fatal("block profile is missing expected functions")
  1138  	}
  1139  
  1140  	// stddev of bias from 100 runs on local machine multiplied by 10x
  1141  	const threshold = 0.2
  1142  	if bias := (il - fs) / il; math.Abs(bias) > threshold {
  1143  		t.Fatalf("bias: abs(%f) > %f", bias, threshold)
  1144  	} else {
  1145  		t.Logf("bias: abs(%f) < %f", bias, threshold)
  1146  	}
  1147  }
  1148  
  1149  // blockFrequentShort produces 100000 block events with an average duration of
  1150  // rate / 10.
  1151  func blockFrequentShort(rate int) {
  1152  	for i := 0; i < 100000; i++ {
  1153  		blockevent(int64(rate/10), 1)
  1154  	}
  1155  }
  1156  
  1157  // blockFrequentShort produces 10000 block events with an average duration of
  1158  // rate.
  1159  func blockInfrequentLong(rate int) {
  1160  	for i := 0; i < 10000; i++ {
  1161  		blockevent(int64(rate), 1)
  1162  	}
  1163  }
  1164  
  1165  // Used by TestBlockProfileBias.
  1166  //go:linkname blockevent runtime.blockevent
  1167  func blockevent(cycles int64, skip int)
  1168  
  1169  func TestMutexProfile(t *testing.T) {
  1170  	// Generate mutex profile
  1171  
  1172  	old := runtime.SetMutexProfileFraction(1)
  1173  	defer runtime.SetMutexProfileFraction(old)
  1174  	if old != 0 {
  1175  		t.Fatalf("need MutexProfileRate 0, got %d", old)
  1176  	}
  1177  
  1178  	blockMutex(t)
  1179  
  1180  	t.Run("debug=1", func(t *testing.T) {
  1181  		var w bytes.Buffer
  1182  		Lookup("mutex").WriteTo(&w, 1)
  1183  		prof := w.String()
  1184  		t.Logf("received profile: %v", prof)
  1185  
  1186  		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
  1187  			t.Errorf("Bad profile header:\n%v", prof)
  1188  		}
  1189  		prof = strings.Trim(prof, "\n")
  1190  		lines := strings.Split(prof, "\n")
  1191  		if len(lines) != 6 {
  1192  			t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
  1193  		}
  1194  		if len(lines) < 6 {
  1195  			return
  1196  		}
  1197  		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
  1198  		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
  1199  		//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
  1200  		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
  1201  			t.Errorf("%q didn't match %q", lines[3], r2)
  1202  		}
  1203  		r3 := "^#.*runtime/pprof.blockMutex.*$"
  1204  		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
  1205  			t.Errorf("%q didn't match %q", lines[5], r3)
  1206  		}
  1207  		t.Logf(prof)
  1208  	})
  1209  	t.Run("proto", func(t *testing.T) {
  1210  		// proto format
  1211  		var w bytes.Buffer
  1212  		Lookup("mutex").WriteTo(&w, 0)
  1213  		p, err := profile.Parse(&w)
  1214  		if err != nil {
  1215  			t.Fatalf("failed to parse profile: %v", err)
  1216  		}
  1217  		t.Logf("parsed proto: %s", p)
  1218  		if err := p.CheckValid(); err != nil {
  1219  			t.Fatalf("invalid profile: %v", err)
  1220  		}
  1221  
  1222  		stks := stacks(p)
  1223  		for _, want := range [][]string{
  1224  			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
  1225  		} {
  1226  			if !containsStack(stks, want) {
  1227  				t.Errorf("No matching stack entry for %+v", want)
  1228  			}
  1229  		}
  1230  	})
  1231  }
  1232  
  1233  func func1(c chan int) { <-c }
  1234  func func2(c chan int) { <-c }
  1235  func func3(c chan int) { <-c }
  1236  func func4(c chan int) { <-c }
  1237  
  1238  func TestGoroutineCounts(t *testing.T) {
  1239  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
  1240  	// desired blocking point.
  1241  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
  1242  
  1243  	c := make(chan int)
  1244  	for i := 0; i < 100; i++ {
  1245  		switch {
  1246  		case i%10 == 0:
  1247  			go func1(c)
  1248  		case i%2 == 0:
  1249  			go func2(c)
  1250  		default:
  1251  			go func3(c)
  1252  		}
  1253  		// Let goroutines block on channel
  1254  		for j := 0; j < 5; j++ {
  1255  			runtime.Gosched()
  1256  		}
  1257  	}
  1258  	ctx := context.Background()
  1259  
  1260  	// ... and again, with labels this time (just with fewer iterations to keep
  1261  	// sorting deterministic).
  1262  	Do(ctx, Labels("label", "value"), func(context.Context) {
  1263  		for i := 0; i < 89; i++ {
  1264  			switch {
  1265  			case i%10 == 0:
  1266  				go func1(c)
  1267  			case i%2 == 0:
  1268  				go func2(c)
  1269  			default:
  1270  				go func3(c)
  1271  			}
  1272  			// Let goroutines block on channel
  1273  			for j := 0; j < 5; j++ {
  1274  				runtime.Gosched()
  1275  			}
  1276  		}
  1277  	})
  1278  
  1279  	var w bytes.Buffer
  1280  	goroutineProf := Lookup("goroutine")
  1281  
  1282  	// Check debug profile
  1283  	goroutineProf.WriteTo(&w, 1)
  1284  	prof := w.String()
  1285  
  1286  	labels := labelMap{"label": "value"}
  1287  	labelStr := "\n# labels: " + labels.String()
  1288  	if !containsInOrder(prof, "\n50 @ ", "\n44 @", labelStr,
  1289  		"\n40 @", "\n36 @", labelStr, "\n10 @", "\n9 @", labelStr, "\n1 @") {
  1290  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
  1291  	}
  1292  
  1293  	// Check proto profile
  1294  	w.Reset()
  1295  	goroutineProf.WriteTo(&w, 0)
  1296  	p, err := profile.Parse(&w)
  1297  	if err != nil {
  1298  		t.Errorf("error parsing protobuf profile: %v", err)
  1299  	}
  1300  	if err := p.CheckValid(); err != nil {
  1301  		t.Errorf("protobuf profile is invalid: %v", err)
  1302  	}
  1303  	expectedLabels := map[int64]map[string]string{
  1304  		50: map[string]string{},
  1305  		44: map[string]string{"label": "value"},
  1306  		40: map[string]string{},
  1307  		36: map[string]string{"label": "value"},
  1308  		10: map[string]string{},
  1309  		9:  map[string]string{"label": "value"},
  1310  		1:  map[string]string{},
  1311  	}
  1312  	if !containsCountsLabels(p, expectedLabels) {
  1313  		t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
  1314  			expectedLabels, p)
  1315  	}
  1316  
  1317  	close(c)
  1318  
  1319  	time.Sleep(10 * time.Millisecond) // let goroutines exit
  1320  }
  1321  
  1322  func containsInOrder(s string, all ...string) bool {
  1323  	for _, t := range all {
  1324  		var ok bool
  1325  		if _, s, ok = strings.Cut(s, t); !ok {
  1326  			return false
  1327  		}
  1328  	}
  1329  	return true
  1330  }
  1331  
  1332  func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
  1333  	m := make(map[int64]int)
  1334  	type nkey struct {
  1335  		count    int64
  1336  		key, val string
  1337  	}
  1338  	n := make(map[nkey]int)
  1339  	for c, kv := range countLabels {
  1340  		m[c]++
  1341  		for k, v := range kv {
  1342  			n[nkey{
  1343  				count: c,
  1344  				key:   k,
  1345  				val:   v,
  1346  			}]++
  1347  
  1348  		}
  1349  	}
  1350  	for _, s := range prof.Sample {
  1351  		// The count is the single value in the sample
  1352  		if len(s.Value) != 1 {
  1353  			return false
  1354  		}
  1355  		m[s.Value[0]]--
  1356  		for k, vs := range s.Label {
  1357  			for _, v := range vs {
  1358  				n[nkey{
  1359  					count: s.Value[0],
  1360  					key:   k,
  1361  					val:   v,
  1362  				}]--
  1363  			}
  1364  		}
  1365  	}
  1366  	for _, n := range m {
  1367  		if n > 0 {
  1368  			return false
  1369  		}
  1370  	}
  1371  	for _, ncnt := range n {
  1372  		if ncnt != 0 {
  1373  			return false
  1374  		}
  1375  	}
  1376  	return true
  1377  }
  1378  
  1379  var emptyCallStackTestRun int64
  1380  
  1381  // Issue 18836.
  1382  func TestEmptyCallStack(t *testing.T) {
  1383  	name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
  1384  	emptyCallStackTestRun++
  1385  
  1386  	t.Parallel()
  1387  	var buf bytes.Buffer
  1388  	p := NewProfile(name)
  1389  
  1390  	p.Add("foo", 47674)
  1391  	p.WriteTo(&buf, 1)
  1392  	p.Remove("foo")
  1393  	got := buf.String()
  1394  	prefix := name + " profile: total 1\n"
  1395  	if !strings.HasPrefix(got, prefix) {
  1396  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
  1397  	}
  1398  	lostevent := "lostProfileEvent"
  1399  	if !strings.Contains(got, lostevent) {
  1400  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
  1401  	}
  1402  }
  1403  
  1404  // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
  1405  // and value and has funcname somewhere in the stack.
  1406  func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
  1407  	base, kv, ok := strings.Cut(spec, ";")
  1408  	if !ok {
  1409  		panic("no semicolon in key/value spec")
  1410  	}
  1411  	k, v, ok := strings.Cut(kv, "=")
  1412  	if !ok {
  1413  		panic("missing = in key/value spec")
  1414  	}
  1415  	if !contains(labels[k], v) {
  1416  		return false
  1417  	}
  1418  	return stackContains(base, count, stk, labels)
  1419  }
  1420  
  1421  func TestCPUProfileLabel(t *testing.T) {
  1422  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions())
  1423  	testCPUProfile(t, matches, func(dur time.Duration) {
  1424  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1425  			cpuHogger(cpuHog1, &salt1, dur)
  1426  		})
  1427  	})
  1428  }
  1429  
  1430  func TestLabelRace(t *testing.T) {
  1431  	// Test the race detector annotations for synchronization
  1432  	// between settings labels and consuming them from the
  1433  	// profile.
  1434  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil)
  1435  	testCPUProfile(t, matches, func(dur time.Duration) {
  1436  		start := time.Now()
  1437  		var wg sync.WaitGroup
  1438  		for time.Since(start) < dur {
  1439  			var salts [10]int
  1440  			for i := 0; i < 10; i++ {
  1441  				wg.Add(1)
  1442  				go func(j int) {
  1443  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1444  						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
  1445  					})
  1446  					wg.Done()
  1447  				}(i)
  1448  			}
  1449  			wg.Wait()
  1450  		}
  1451  	})
  1452  }
  1453  
  1454  // TestLabelSystemstack makes sure CPU profiler samples of goroutines running
  1455  // on systemstack include the correct pprof labels. See issue #48577
  1456  func TestLabelSystemstack(t *testing.T) {
  1457  	// Grab and re-set the initial value before continuing to ensure
  1458  	// GOGC doesn't actually change following the test.
  1459  	gogc := debug.SetGCPercent(100)
  1460  	debug.SetGCPercent(gogc)
  1461  
  1462  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions())
  1463  	p := testCPUProfile(t, matches, func(dur time.Duration) {
  1464  		Do(context.Background(), Labels("key", "value"), func(ctx context.Context) {
  1465  			parallelLabelHog(ctx, dur, gogc)
  1466  		})
  1467  	})
  1468  
  1469  	// Two conditions to check:
  1470  	// * labelHog should always be labeled.
  1471  	// * The label should _only_ appear on labelHog and the Do call above.
  1472  	for _, s := range p.Sample {
  1473  		isLabeled := s.Label != nil && contains(s.Label["key"], "value")
  1474  		var (
  1475  			mayBeLabeled     bool
  1476  			mustBeLabeled    bool
  1477  			mustNotBeLabeled bool
  1478  		)
  1479  		for _, loc := range s.Location {
  1480  			for _, l := range loc.Line {
  1481  				switch l.Function.Name {
  1482  				case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1":
  1483  					mustBeLabeled = true
  1484  				case "runtime/pprof.Do":
  1485  					// Do sets the labels, so samples may
  1486  					// or may not be labeled depending on
  1487  					// which part of the function they are
  1488  					// at.
  1489  					mayBeLabeled = true
  1490  				case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runfinq", "runtime.sysmon":
  1491  					// Runtime system goroutines or threads
  1492  					// (such as those identified by
  1493  					// runtime.isSystemGoroutine). These
  1494  					// should never be labeled.
  1495  					mustNotBeLabeled = true
  1496  				case "gogo", "gosave_systemstack_switch", "racecall":
  1497  					// These are context switch/race
  1498  					// critical that we can't do a full
  1499  					// traceback from. Typically this would
  1500  					// be covered by the runtime check
  1501  					// below, but these symbols don't have
  1502  					// the package name.
  1503  					mayBeLabeled = true
  1504  				}
  1505  
  1506  				if strings.HasPrefix(l.Function.Name, "runtime.") {
  1507  					// There are many places in the runtime
  1508  					// where we can't do a full traceback.
  1509  					// Ideally we'd list them all, but
  1510  					// barring that allow anything in the
  1511  					// runtime, unless explicitly excluded
  1512  					// above.
  1513  					mayBeLabeled = true
  1514  				}
  1515  			}
  1516  		}
  1517  		if mustNotBeLabeled {
  1518  			// If this must not be labeled, then mayBeLabeled hints
  1519  			// are not relevant.
  1520  			mayBeLabeled = false
  1521  		}
  1522  		if mustBeLabeled && !isLabeled {
  1523  			var buf bytes.Buffer
  1524  			fprintStack(&buf, s.Location)
  1525  			t.Errorf("Sample labeled got false want true: %s", buf.String())
  1526  		}
  1527  		if mustNotBeLabeled && isLabeled {
  1528  			var buf bytes.Buffer
  1529  			fprintStack(&buf, s.Location)
  1530  			t.Errorf("Sample labeled got true want false: %s", buf.String())
  1531  		}
  1532  		if isLabeled && !(mayBeLabeled || mustBeLabeled) {
  1533  			var buf bytes.Buffer
  1534  			fprintStack(&buf, s.Location)
  1535  			t.Errorf("Sample labeled got true want false: %s", buf.String())
  1536  		}
  1537  	}
  1538  }
  1539  
  1540  // labelHog is designed to burn CPU time in a way that a high number of CPU
  1541  // samples end up running on systemstack.
  1542  func labelHog(stop chan struct{}, gogc int) {
  1543  	// Regression test for issue 50032. We must give GC an opportunity to
  1544  	// be initially triggered by a labelled goroutine.
  1545  	runtime.GC()
  1546  
  1547  	for i := 0; ; i++ {
  1548  		select {
  1549  		case <-stop:
  1550  			return
  1551  		default:
  1552  			debug.SetGCPercent(gogc)
  1553  		}
  1554  	}
  1555  }
  1556  
  1557  // parallelLabelHog runs GOMAXPROCS goroutines running labelHog.
  1558  func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {
  1559  	var wg sync.WaitGroup
  1560  	stop := make(chan struct{})
  1561  	for i := 0; i < runtime.GOMAXPROCS(0); i++ {
  1562  		wg.Add(1)
  1563  		go func() {
  1564  			defer wg.Done()
  1565  			labelHog(stop, gogc)
  1566  		}()
  1567  	}
  1568  
  1569  	time.Sleep(dur)
  1570  	close(stop)
  1571  	wg.Wait()
  1572  }
  1573  
  1574  // Check that there is no deadlock when the program receives SIGPROF while in
  1575  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
  1576  func TestAtomicLoadStore64(t *testing.T) {
  1577  	f, err := os.CreateTemp("", "profatomic")
  1578  	if err != nil {
  1579  		t.Fatalf("TempFile: %v", err)
  1580  	}
  1581  	defer os.Remove(f.Name())
  1582  	defer f.Close()
  1583  
  1584  	if err := StartCPUProfile(f); err != nil {
  1585  		t.Fatal(err)
  1586  	}
  1587  	defer StopCPUProfile()
  1588  
  1589  	var flag uint64
  1590  	done := make(chan bool, 1)
  1591  
  1592  	go func() {
  1593  		for atomic.LoadUint64(&flag) == 0 {
  1594  			runtime.Gosched()
  1595  		}
  1596  		done <- true
  1597  	}()
  1598  	time.Sleep(50 * time.Millisecond)
  1599  	atomic.StoreUint64(&flag, 1)
  1600  	<-done
  1601  }
  1602  
  1603  func TestTracebackAll(t *testing.T) {
  1604  	// With gccgo, if a profiling signal arrives at the wrong time
  1605  	// during traceback, it may crash or hang. See issue #29448.
  1606  	f, err := os.CreateTemp("", "proftraceback")
  1607  	if err != nil {
  1608  		t.Fatalf("TempFile: %v", err)
  1609  	}
  1610  	defer os.Remove(f.Name())
  1611  	defer f.Close()
  1612  
  1613  	if err := StartCPUProfile(f); err != nil {
  1614  		t.Fatal(err)
  1615  	}
  1616  	defer StopCPUProfile()
  1617  
  1618  	ch := make(chan int)
  1619  	defer close(ch)
  1620  
  1621  	count := 10
  1622  	for i := 0; i < count; i++ {
  1623  		go func() {
  1624  			<-ch // block
  1625  		}()
  1626  	}
  1627  
  1628  	N := 10000
  1629  	if testing.Short() {
  1630  		N = 500
  1631  	}
  1632  	buf := make([]byte, 10*1024)
  1633  	for i := 0; i < N; i++ {
  1634  		runtime.Stack(buf, true)
  1635  	}
  1636  }
  1637  
  1638  // TestTryAdd tests the cases that are hard to test with real program execution.
  1639  //
  1640  // For example, the current go compilers may not always inline functions
  1641  // involved in recursion but that may not be true in the future compilers. This
  1642  // tests such cases by using fake call sequences and forcing the profile build
  1643  // utilizing translateCPUProfile defined in proto_test.go
  1644  func TestTryAdd(t *testing.T) {
  1645  	if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
  1646  		t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
  1647  	}
  1648  
  1649  	// inlinedCallerDump
  1650  	//   inlinedCalleeDump
  1651  	pcs := make([]uintptr, 2)
  1652  	inlinedCallerDump(pcs)
  1653  	inlinedCallerStack := make([]uint64, 2)
  1654  	for i := range pcs {
  1655  		inlinedCallerStack[i] = uint64(pcs[i])
  1656  	}
  1657  
  1658  	if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
  1659  		t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
  1660  	}
  1661  
  1662  	// recursionChainTop
  1663  	//   recursionChainMiddle
  1664  	//     recursionChainBottom
  1665  	//       recursionChainTop
  1666  	//         recursionChainMiddle
  1667  	//           recursionChainBottom
  1668  	pcs = make([]uintptr, 6)
  1669  	recursionChainTop(1, pcs)
  1670  	recursionStack := make([]uint64, len(pcs))
  1671  	for i := range pcs {
  1672  		recursionStack[i] = uint64(pcs[i])
  1673  	}
  1674  
  1675  	period := int64(2000 * 1000) // 1/500*1e9 nanosec.
  1676  
  1677  	testCases := []struct {
  1678  		name        string
  1679  		input       []uint64          // following the input format assumed by profileBuilder.addCPUData.
  1680  		count       int               // number of records in input.
  1681  		wantLocs    [][]string        // ordered location entries with function names.
  1682  		wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
  1683  	}{{
  1684  		// Sanity test for a normal, complete stack trace.
  1685  		name: "full_stack_trace",
  1686  		input: []uint64{
  1687  			3, 0, 500, // hz = 500. Must match the period.
  1688  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  1689  		},
  1690  		count: 2,
  1691  		wantLocs: [][]string{
  1692  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  1693  		},
  1694  		wantSamples: []*profile.Sample{
  1695  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  1696  		},
  1697  	}, {
  1698  		name: "bug35538",
  1699  		input: []uint64{
  1700  			3, 0, 500, // hz = 500. Must match the period.
  1701  			// Fake frame: tryAdd will have inlinedCallerDump
  1702  			// (stack[1]) on the deck when it encounters the next
  1703  			// inline function. It should accept this.
  1704  			7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
  1705  			5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
  1706  		},
  1707  		count:    3,
  1708  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1709  		wantSamples: []*profile.Sample{
  1710  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
  1711  			{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
  1712  		},
  1713  	}, {
  1714  		name: "bug38096",
  1715  		input: []uint64{
  1716  			3, 0, 500, // hz = 500. Must match the period.
  1717  			// count (data[2]) == 0 && len(stk) == 1 is an overflow
  1718  			// entry. The "stk" entry is actually the count.
  1719  			4, 0, 0, 4242,
  1720  		},
  1721  		count:    2,
  1722  		wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
  1723  		wantSamples: []*profile.Sample{
  1724  			{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
  1725  		},
  1726  	}, {
  1727  		// If a function is directly called recursively then it must
  1728  		// not be inlined in the caller.
  1729  		//
  1730  		// N.B. We're generating an impossible profile here, with a
  1731  		// recursive inlineCalleeDump call. This is simulating a non-Go
  1732  		// function that looks like an inlined Go function other than
  1733  		// its recursive property. See pcDeck.tryAdd.
  1734  		name: "directly_recursive_func_is_not_inlined",
  1735  		input: []uint64{
  1736  			3, 0, 500, // hz = 500. Must match the period.
  1737  			5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
  1738  			4, 0, 40, inlinedCallerStack[0],
  1739  		},
  1740  		count: 3,
  1741  		// inlinedCallerDump shows up here because
  1742  		// runtime_expandFinalInlineFrame adds it to the stack frame.
  1743  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
  1744  		wantSamples: []*profile.Sample{
  1745  			{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
  1746  			{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
  1747  		},
  1748  	}, {
  1749  		name: "recursion_chain_inline",
  1750  		input: []uint64{
  1751  			3, 0, 500, // hz = 500. Must match the period.
  1752  			9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
  1753  		},
  1754  		count: 2,
  1755  		wantLocs: [][]string{
  1756  			{"runtime/pprof.recursionChainBottom"},
  1757  			{
  1758  				"runtime/pprof.recursionChainMiddle",
  1759  				"runtime/pprof.recursionChainTop",
  1760  				"runtime/pprof.recursionChainBottom",
  1761  			},
  1762  			{
  1763  				"runtime/pprof.recursionChainMiddle",
  1764  				"runtime/pprof.recursionChainTop",
  1765  				"runtime/pprof.TestTryAdd", // inlined into the test.
  1766  			},
  1767  		},
  1768  		wantSamples: []*profile.Sample{
  1769  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
  1770  		},
  1771  	}, {
  1772  		name: "truncated_stack_trace_later",
  1773  		input: []uint64{
  1774  			3, 0, 500, // hz = 500. Must match the period.
  1775  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  1776  			4, 0, 60, inlinedCallerStack[0],
  1777  		},
  1778  		count:    3,
  1779  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1780  		wantSamples: []*profile.Sample{
  1781  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  1782  			{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
  1783  		},
  1784  	}, {
  1785  		name: "truncated_stack_trace_first",
  1786  		input: []uint64{
  1787  			3, 0, 500, // hz = 500. Must match the period.
  1788  			4, 0, 70, inlinedCallerStack[0],
  1789  			5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
  1790  		},
  1791  		count:    3,
  1792  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1793  		wantSamples: []*profile.Sample{
  1794  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1795  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
  1796  		},
  1797  	}, {
  1798  		// We can recover the inlined caller from a truncated stack.
  1799  		name: "truncated_stack_trace_only",
  1800  		input: []uint64{
  1801  			3, 0, 500, // hz = 500. Must match the period.
  1802  			4, 0, 70, inlinedCallerStack[0],
  1803  		},
  1804  		count:    2,
  1805  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1806  		wantSamples: []*profile.Sample{
  1807  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1808  		},
  1809  	}, {
  1810  		// The same location is used for duplicated stacks.
  1811  		name: "truncated_stack_trace_twice",
  1812  		input: []uint64{
  1813  			3, 0, 500, // hz = 500. Must match the period.
  1814  			4, 0, 70, inlinedCallerStack[0],
  1815  			// Fake frame: add a fake call to
  1816  			// inlinedCallerDump to prevent this sample
  1817  			// from getting merged into above.
  1818  			5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
  1819  		},
  1820  		count: 3,
  1821  		wantLocs: [][]string{
  1822  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  1823  			{"runtime/pprof.inlinedCallerDump"},
  1824  		},
  1825  		wantSamples: []*profile.Sample{
  1826  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1827  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
  1828  		},
  1829  	}}
  1830  
  1831  	for _, tc := range testCases {
  1832  		t.Run(tc.name, func(t *testing.T) {
  1833  			p, err := translateCPUProfile(tc.input, tc.count)
  1834  			if err != nil {
  1835  				t.Fatalf("translating profile: %v", err)
  1836  			}
  1837  			t.Logf("Profile: %v\n", p)
  1838  
  1839  			// One location entry with all inlined functions.
  1840  			var gotLoc [][]string
  1841  			for _, loc := range p.Location {
  1842  				var names []string
  1843  				for _, line := range loc.Line {
  1844  					names = append(names, line.Function.Name)
  1845  				}
  1846  				gotLoc = append(gotLoc, names)
  1847  			}
  1848  			if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
  1849  				t.Errorf("Got Location = %+v\n\twant %+v", got, want)
  1850  			}
  1851  			// All samples should point to one location.
  1852  			var gotSamples []*profile.Sample
  1853  			for _, sample := range p.Sample {
  1854  				var locs []*profile.Location
  1855  				for _, loc := range sample.Location {
  1856  					locs = append(locs, &profile.Location{ID: loc.ID})
  1857  				}
  1858  				gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
  1859  			}
  1860  			if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
  1861  				t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
  1862  			}
  1863  		})
  1864  	}
  1865  }
  1866  
  1867  func TestTimeVDSO(t *testing.T) {
  1868  	// Test that time functions have the right stack trace. In particular,
  1869  	// it shouldn't be recursive.
  1870  
  1871  	if runtime.GOOS == "android" {
  1872  		// Flaky on Android, issue 48655. VDSO may not be enabled.
  1873  		testenv.SkipFlaky(t, 48655)
  1874  	}
  1875  
  1876  	matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions())
  1877  	p := testCPUProfile(t, matches, func(dur time.Duration) {
  1878  		t0 := time.Now()
  1879  		for {
  1880  			t := time.Now()
  1881  			if t.Sub(t0) >= dur {
  1882  				return
  1883  			}
  1884  		}
  1885  	})
  1886  
  1887  	// Check for recursive time.now sample.
  1888  	for _, sample := range p.Sample {
  1889  		var seenNow bool
  1890  		for _, loc := range sample.Location {
  1891  			for _, line := range loc.Line {
  1892  				if line.Function.Name == "time.now" {
  1893  					if seenNow {
  1894  						t.Fatalf("unexpected recursive time.now")
  1895  					}
  1896  					seenNow = true
  1897  				}
  1898  			}
  1899  		}
  1900  	}
  1901  }
  1902  

View as plain text