Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/pprof/pprof_test.go

Documentation: runtime/pprof

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build !js
     6  
     7  package pprof
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"internal/profile"
    14  	"internal/testenv"
    15  	"io"
    16  	"io/ioutil"
    17  	"math/big"
    18  	"os"
    19  	"os/exec"
    20  	"regexp"
    21  	"runtime"
    22  	"strings"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  )
    28  
    29  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
    30  	// We only need to get one 100 Hz clock tick, so we've got
    31  	// a large safety buffer.
    32  	// But do at least 500 iterations (which should take about 100ms),
    33  	// otherwise TestCPUProfileMultithreaded can fail if only one
    34  	// thread is scheduled during the testing period.
    35  	t0 := time.Now()
    36  	accum := *y
    37  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
    38  		accum = f(accum)
    39  	}
    40  	*y = accum
    41  }
    42  
    43  var (
    44  	salt1 = 0
    45  	salt2 = 0
    46  )
    47  
    48  // The actual CPU hogging function.
    49  // Must not call other functions nor access heap/globals in the loop,
    50  // otherwise under race detector the samples will be in the race runtime.
    51  func cpuHog1(x int) int {
    52  	return cpuHog0(x, 1e5)
    53  }
    54  
    55  func cpuHog0(x, n int) int {
    56  	foo := x
    57  	for i := 0; i < n; i++ {
    58  		if foo > 0 {
    59  			foo *= foo
    60  		} else {
    61  			foo *= foo + 1
    62  		}
    63  	}
    64  	return foo
    65  }
    66  
    67  func cpuHog2(x int) int {
    68  	foo := x
    69  	for i := 0; i < 1e5; i++ {
    70  		if foo > 0 {
    71  			foo *= foo
    72  		} else {
    73  			foo *= foo + 2
    74  		}
    75  	}
    76  	return foo
    77  }
    78  
    79  // Return a list of functions that we don't want to ever appear in CPU
    80  // profiles. For gccgo, that list includes the sigprof handler itself.
    81  func avoidFunctions() []string {
    82  	if runtime.Compiler == "gccgo" {
    83  		return []string{"runtime.sigprof"}
    84  	}
    85  	return nil
    86  }
    87  
    88  func TestCPUProfile(t *testing.T) {
    89  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) {
    90  		cpuHogger(cpuHog1, &salt1, dur)
    91  	})
    92  }
    93  
    94  func TestCPUProfileMultithreaded(t *testing.T) {
    95  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
    96  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) {
    97  		c := make(chan int)
    98  		go func() {
    99  			cpuHogger(cpuHog1, &salt1, dur)
   100  			c <- 1
   101  		}()
   102  		cpuHogger(cpuHog2, &salt2, dur)
   103  		<-c
   104  	})
   105  }
   106  
   107  // containsInlinedCall reports whether the function body for the function f is
   108  // known to contain an inlined function call within the first maxBytes bytes.
   109  func containsInlinedCall(f interface{}, maxBytes int) bool {
   110  	_, found := findInlinedCall(f, maxBytes)
   111  	return found
   112  }
   113  
   114  // findInlinedCall returns the PC of an inlined function call within
   115  // the function body for the function f if any.
   116  func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
   117  	fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
   118  	if fFunc == nil || fFunc.Entry() == 0 {
   119  		panic("failed to locate function entry")
   120  	}
   121  
   122  	for offset := 0; offset < maxBytes; offset++ {
   123  		innerPC := fFunc.Entry() + uintptr(offset)
   124  		inner := runtime.FuncForPC(innerPC)
   125  		if inner == nil {
   126  			// No function known for this PC value.
   127  			// It might simply be misaligned, so keep searching.
   128  			continue
   129  		}
   130  		if inner.Entry() != fFunc.Entry() {
   131  			// Scanned past f and didn't find any inlined functions.
   132  			break
   133  		}
   134  		if inner.Name() != fFunc.Name() {
   135  			// This PC has f as its entry-point, but is not f. Therefore, it must be a
   136  			// function inlined into f.
   137  			return uint64(innerPC), true
   138  		}
   139  	}
   140  
   141  	return 0, false
   142  }
   143  
   144  func TestCPUProfileInlining(t *testing.T) {
   145  	if !containsInlinedCall(inlinedCaller, 4<<10) {
   146  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
   147  	}
   148  
   149  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) {
   150  		cpuHogger(inlinedCaller, &salt1, dur)
   151  	})
   152  
   153  	// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
   154  	for _, loc := range p.Location {
   155  		hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
   156  		for _, line := range loc.Line {
   157  			if line.Function.Name == "runtime/pprof.inlinedCallee" {
   158  				hasInlinedCallee = true
   159  			}
   160  			if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
   161  				hasInlinedCallerAfterInlinedCallee = true
   162  			}
   163  		}
   164  		if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
   165  			t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
   166  		}
   167  	}
   168  }
   169  
   170  func inlinedCaller(x int) int {
   171  	x = inlinedCallee(x, 1e5)
   172  	return x
   173  }
   174  
   175  func inlinedCallee(x, n int) int {
   176  	return cpuHog0(x, n)
   177  }
   178  
   179  //go:noinline
   180  func dumpCallers(pcs []uintptr) {
   181  	if pcs == nil {
   182  		return
   183  	}
   184  
   185  	skip := 2 // Callers and dumpCallers
   186  	runtime.Callers(skip, pcs)
   187  }
   188  
   189  //go:noinline
   190  func inlinedCallerDump(pcs []uintptr) {
   191  	inlinedCalleeDump(pcs)
   192  }
   193  
   194  func inlinedCalleeDump(pcs []uintptr) {
   195  	dumpCallers(pcs)
   196  }
   197  
   198  func TestCPUProfileRecursion(t *testing.T) {
   199  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions(), func(dur time.Duration) {
   200  		cpuHogger(recursionCaller, &salt1, dur)
   201  	})
   202  
   203  	// check the Location encoding was not confused by recursive calls.
   204  	for i, loc := range p.Location {
   205  		recursionFunc := 0
   206  		for _, line := range loc.Line {
   207  			if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
   208  				recursionFunc++
   209  			}
   210  		}
   211  		if recursionFunc > 1 {
   212  			t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
   213  		}
   214  	}
   215  }
   216  
   217  func recursionCaller(x int) int {
   218  	y := recursionCallee(3, x)
   219  	return y
   220  }
   221  
   222  func recursionCallee(n, x int) int {
   223  	if n == 0 {
   224  		return 1
   225  	}
   226  	y := inlinedCallee(x, 1e4)
   227  	return y * recursionCallee(n-1, x)
   228  }
   229  
   230  func recursionChainTop(x int, pcs []uintptr) {
   231  	if x < 0 {
   232  		return
   233  	}
   234  	recursionChainMiddle(x, pcs)
   235  }
   236  
   237  func recursionChainMiddle(x int, pcs []uintptr) {
   238  	recursionChainBottom(x, pcs)
   239  }
   240  
   241  func recursionChainBottom(x int, pcs []uintptr) {
   242  	// This will be called each time, we only care about the last. We
   243  	// can't make this conditional or this function won't be inlined.
   244  	dumpCallers(pcs)
   245  
   246  	recursionChainTop(x-1, pcs)
   247  }
   248  
   249  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
   250  	p, err := profile.Parse(bytes.NewReader(valBytes))
   251  	if err != nil {
   252  		t.Fatal(err)
   253  	}
   254  	for _, sample := range p.Sample {
   255  		count := uintptr(sample.Value[0])
   256  		f(count, sample.Location, sample.Label)
   257  	}
   258  	return p
   259  }
   260  
   261  // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
   262  // as interpreted by matches, and returns the parsed profile.
   263  func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) *profile.Profile {
   264  	switch runtime.GOOS {
   265  	case "darwin":
   266  		switch runtime.GOARCH {
   267  		case "arm64":
   268  			// nothing
   269  		default:
   270  			out, err := exec.Command("uname", "-a").CombinedOutput()
   271  			if err != nil {
   272  				t.Fatal(err)
   273  			}
   274  			vers := string(out)
   275  			t.Logf("uname -a: %v", vers)
   276  		}
   277  	case "plan9":
   278  		t.Skip("skipping on plan9")
   279  	}
   280  
   281  	broken := false
   282  	switch runtime.GOOS {
   283  	case "darwin", "dragonfly", "netbsd", "illumos", "solaris":
   284  		broken = true
   285  	case "openbsd":
   286  		if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
   287  			broken = true
   288  		}
   289  	}
   290  
   291  	maxDuration := 5 * time.Second
   292  	if testing.Short() && broken {
   293  		// If it's expected to be broken, no point waiting around.
   294  		maxDuration /= 10
   295  	}
   296  
   297  	// If we're running a long test, start with a long duration
   298  	// for tests that try to make sure something *doesn't* happen.
   299  	duration := 5 * time.Second
   300  	if testing.Short() {
   301  		duration = 100 * time.Millisecond
   302  	}
   303  
   304  	// Profiling tests are inherently flaky, especially on a
   305  	// loaded system, such as when this test is running with
   306  	// several others under go test std. If a test fails in a way
   307  	// that could mean it just didn't run long enough, try with a
   308  	// longer duration.
   309  	for duration <= maxDuration {
   310  		var prof bytes.Buffer
   311  		if err := StartCPUProfile(&prof); err != nil {
   312  			t.Fatal(err)
   313  		}
   314  		f(duration)
   315  		StopCPUProfile()
   316  
   317  		if p, ok := profileOk(t, matches, need, avoid, prof, duration); ok {
   318  			return p
   319  		}
   320  
   321  		duration *= 2
   322  		if duration <= maxDuration {
   323  			t.Logf("retrying with %s duration", duration)
   324  		}
   325  	}
   326  
   327  	if broken {
   328  		t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
   329  	}
   330  
   331  	// Ignore the failure if the tests are running in a QEMU-based emulator,
   332  	// QEMU is not perfect at emulating everything.
   333  	// IN_QEMU environmental variable is set by some of the Go builders.
   334  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
   335  	if os.Getenv("IN_QEMU") == "1" {
   336  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
   337  	}
   338  	t.FailNow()
   339  	return nil
   340  }
   341  
   342  func contains(slice []string, s string) bool {
   343  	for i := range slice {
   344  		if slice[i] == s {
   345  			return true
   346  		}
   347  	}
   348  	return false
   349  }
   350  
   351  // stackContains matches if a function named spec appears anywhere in the stack trace.
   352  func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   353  	for _, loc := range stk {
   354  		for _, line := range loc.Line {
   355  			if strings.Contains(line.Function.Name, spec) {
   356  				return true
   357  			}
   358  		}
   359  	}
   360  	return false
   361  }
   362  
   363  type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
   364  
   365  func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
   366  	ok = true
   367  
   368  	// Check that profile is well formed, contains 'need', and does not contain
   369  	// anything from 'avoid'.
   370  	have := make([]uintptr, len(need))
   371  	avoidSamples := make([]uintptr, len(avoid))
   372  	var samples uintptr
   373  	var buf bytes.Buffer
   374  	p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
   375  		fmt.Fprintf(&buf, "%d:", count)
   376  		fprintStack(&buf, stk)
   377  		samples += count
   378  		for i, spec := range need {
   379  			if matches(spec, count, stk, labels) {
   380  				have[i] += count
   381  			}
   382  		}
   383  		for i, name := range avoid {
   384  			for _, loc := range stk {
   385  				for _, line := range loc.Line {
   386  					if strings.Contains(line.Function.Name, name) {
   387  						avoidSamples[i] += count
   388  					}
   389  				}
   390  			}
   391  		}
   392  		fmt.Fprintf(&buf, "\n")
   393  	})
   394  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
   395  
   396  	if samples < 10 && runtime.GOOS == "windows" {
   397  		// On some windows machines we end up with
   398  		// not enough samples due to coarse timer
   399  		// resolution. Let it go.
   400  		t.Log("too few samples on Windows (golang.org/issue/10842)")
   401  		return p, false
   402  	}
   403  
   404  	// Check that we got a reasonable number of samples.
   405  	// We used to always require at least ideal/4 samples,
   406  	// but that is too hard to guarantee on a loaded system.
   407  	// Now we accept 10 or more samples, which we take to be
   408  	// enough to show that at least some profiling is occurring.
   409  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
   410  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
   411  		ok = false
   412  	}
   413  
   414  	for i, name := range avoid {
   415  		bad := avoidSamples[i]
   416  		if bad != 0 {
   417  			t.Logf("found %d samples in avoid-function %s\n", bad, name)
   418  			ok = false
   419  		}
   420  	}
   421  
   422  	if len(need) == 0 {
   423  		return p, ok
   424  	}
   425  
   426  	var total uintptr
   427  	for i, name := range need {
   428  		total += have[i]
   429  		t.Logf("%s: %d\n", name, have[i])
   430  	}
   431  	if total == 0 {
   432  		t.Logf("no samples in expected functions")
   433  		ok = false
   434  	}
   435  	// We'd like to check a reasonable minimum, like
   436  	// total / len(have) / smallconstant, but this test is
   437  	// pretty flaky (see bug 7095).  So we'll just test to
   438  	// make sure we got at least one sample.
   439  	min := uintptr(1)
   440  	for i, name := range need {
   441  		if have[i] < min {
   442  			t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
   443  			ok = false
   444  		}
   445  	}
   446  	return p, ok
   447  }
   448  
   449  // Fork can hang if preempted with signals frequently enough (see issue 5517).
   450  // Ensure that we do not do this.
   451  func TestCPUProfileWithFork(t *testing.T) {
   452  	testenv.MustHaveExec(t)
   453  
   454  	heap := 1 << 30
   455  	if runtime.GOOS == "android" {
   456  		// Use smaller size for Android to avoid crash.
   457  		heap = 100 << 20
   458  	}
   459  	if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
   460  		// Use smaller heap for Windows/ARM to avoid crash.
   461  		heap = 100 << 20
   462  	}
   463  	if testing.Short() {
   464  		heap = 100 << 20
   465  	}
   466  	// This makes fork slower.
   467  	garbage := make([]byte, heap)
   468  	// Need to touch the slice, otherwise it won't be paged in.
   469  	done := make(chan bool)
   470  	go func() {
   471  		for i := range garbage {
   472  			garbage[i] = 42
   473  		}
   474  		done <- true
   475  	}()
   476  	<-done
   477  
   478  	var prof bytes.Buffer
   479  	if err := StartCPUProfile(&prof); err != nil {
   480  		t.Fatal(err)
   481  	}
   482  	defer StopCPUProfile()
   483  
   484  	for i := 0; i < 10; i++ {
   485  		exec.Command(os.Args[0], "-h").CombinedOutput()
   486  	}
   487  }
   488  
   489  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
   490  // If it did, it would see inconsistent state and would either record an incorrect stack
   491  // or crash because the stack was malformed.
   492  func TestGoroutineSwitch(t *testing.T) {
   493  	if runtime.Compiler == "gccgo" {
   494  		t.Skip("not applicable for gccgo")
   495  	}
   496  	// How much to try. These defaults take about 1 seconds
   497  	// on a 2012 MacBook Pro. The ones in short mode take
   498  	// about 0.1 seconds.
   499  	tries := 10
   500  	count := 1000000
   501  	if testing.Short() {
   502  		tries = 1
   503  	}
   504  	for try := 0; try < tries; try++ {
   505  		var prof bytes.Buffer
   506  		if err := StartCPUProfile(&prof); err != nil {
   507  			t.Fatal(err)
   508  		}
   509  		for i := 0; i < count; i++ {
   510  			runtime.Gosched()
   511  		}
   512  		StopCPUProfile()
   513  
   514  		// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
   515  		// The special entry
   516  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
   517  			// An entry with two frames with 'System' in its top frame
   518  			// exists to record a PC without a traceback. Those are okay.
   519  			if len(stk) == 2 {
   520  				name := stk[1].Line[0].Function.Name
   521  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
   522  					return
   523  				}
   524  			}
   525  
   526  			// Otherwise, should not see runtime.gogo.
   527  			// The place we'd see it would be the inner most frame.
   528  			name := stk[0].Line[0].Function.Name
   529  			if name == "runtime.gogo" {
   530  				var buf bytes.Buffer
   531  				fprintStack(&buf, stk)
   532  				t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
   533  			}
   534  		})
   535  	}
   536  }
   537  
   538  func fprintStack(w io.Writer, stk []*profile.Location) {
   539  	for _, loc := range stk {
   540  		fmt.Fprintf(w, " %#x", loc.Address)
   541  		fmt.Fprintf(w, " (")
   542  		for i, line := range loc.Line {
   543  			if i > 0 {
   544  				fmt.Fprintf(w, " ")
   545  			}
   546  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
   547  		}
   548  		fmt.Fprintf(w, ")")
   549  	}
   550  	fmt.Fprintf(w, "\n")
   551  }
   552  
   553  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
   554  func TestMathBigDivide(t *testing.T) {
   555  	testCPUProfile(t, nil, nil, nil, func(duration time.Duration) {
   556  		t := time.After(duration)
   557  		pi := new(big.Int)
   558  		for {
   559  			for i := 0; i < 100; i++ {
   560  				n := big.NewInt(2646693125139304345)
   561  				d := big.NewInt(842468587426513207)
   562  				pi.Div(n, d)
   563  			}
   564  			select {
   565  			case <-t:
   566  				return
   567  			default:
   568  			}
   569  		}
   570  	})
   571  }
   572  
   573  // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
   574  func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   575  	for _, f := range strings.Split(spec, ",") {
   576  		if !stackContains(f, count, stk, labels) {
   577  			return false
   578  		}
   579  	}
   580  	return true
   581  }
   582  
   583  func TestMorestack(t *testing.T) {
   584  	testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) {
   585  		t := time.After(duration)
   586  		c := make(chan bool)
   587  		for {
   588  			go func() {
   589  				growstack1()
   590  				c <- true
   591  			}()
   592  			select {
   593  			case <-t:
   594  				return
   595  			case <-c:
   596  			}
   597  		}
   598  	})
   599  }
   600  
   601  //go:noinline
   602  func growstack1() {
   603  	growstack()
   604  }
   605  
   606  //go:noinline
   607  func growstack() {
   608  	var buf [8 << 10]byte
   609  	use(buf)
   610  }
   611  
   612  //go:noinline
   613  func use(x [8 << 10]byte) {}
   614  
   615  func TestBlockProfile(t *testing.T) {
   616  	type TestCase struct {
   617  		name string
   618  		f    func()
   619  		stk  []string
   620  		re   string
   621  	}
   622  	tests := [...]TestCase{
   623  		{
   624  			name: "chan recv",
   625  			f:    blockChanRecv,
   626  			stk: []string{
   627  				"runtime.chanrecv1",
   628  				"runtime/pprof.blockChanRecv",
   629  				"runtime/pprof.TestBlockProfile",
   630  			},
   631  			re: `
   632  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   633  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   634  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   635  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   636  `},
   637  		{
   638  			name: "chan send",
   639  			f:    blockChanSend,
   640  			stk: []string{
   641  				"runtime.chansend1",
   642  				"runtime/pprof.blockChanSend",
   643  				"runtime/pprof.TestBlockProfile",
   644  			},
   645  			re: `
   646  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   647  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   648  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   649  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   650  `},
   651  		{
   652  			name: "chan close",
   653  			f:    blockChanClose,
   654  			stk: []string{
   655  				"runtime.chanrecv1",
   656  				"runtime/pprof.blockChanClose",
   657  				"runtime/pprof.TestBlockProfile",
   658  			},
   659  			re: `
   660  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   661  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   662  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   663  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   664  `},
   665  		{
   666  			name: "select recv async",
   667  			f:    blockSelectRecvAsync,
   668  			stk: []string{
   669  				"runtime.selectgo",
   670  				"runtime/pprof.blockSelectRecvAsync",
   671  				"runtime/pprof.TestBlockProfile",
   672  			},
   673  			re: `
   674  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   675  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   676  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   677  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   678  `},
   679  		{
   680  			name: "select send sync",
   681  			f:    blockSelectSendSync,
   682  			stk: []string{
   683  				"runtime.selectgo",
   684  				"runtime/pprof.blockSelectSendSync",
   685  				"runtime/pprof.TestBlockProfile",
   686  			},
   687  			re: `
   688  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   689  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   690  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   691  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   692  `},
   693  		{
   694  			name: "mutex",
   695  			f:    blockMutex,
   696  			stk: []string{
   697  				"sync.(*Mutex).Lock",
   698  				"runtime/pprof.blockMutex",
   699  				"runtime/pprof.TestBlockProfile",
   700  			},
   701  			re: `
   702  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   703  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*/src/sync/mutex\.go:[0-9]+
   704  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   705  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   706  `},
   707  		{
   708  			name: "cond",
   709  			f:    blockCond,
   710  			stk: []string{
   711  				"sync.(*Cond).Wait",
   712  				"runtime/pprof.blockCond",
   713  				"runtime/pprof.TestBlockProfile",
   714  			},
   715  			re: `
   716  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   717  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*/src/sync/cond\.go:[0-9]+
   718  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   719  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   720  `},
   721  	}
   722  
   723  	// Generate block profile
   724  	runtime.SetBlockProfileRate(1)
   725  	defer runtime.SetBlockProfileRate(0)
   726  	for _, test := range tests {
   727  		test.f()
   728  	}
   729  
   730  	t.Run("debug=1", func(t *testing.T) {
   731  		var w bytes.Buffer
   732  		Lookup("block").WriteTo(&w, 1)
   733  		prof := w.String()
   734  
   735  		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
   736  			t.Fatalf("Bad profile header:\n%v", prof)
   737  		}
   738  
   739  		if strings.HasSuffix(prof, "#\t0x0\n\n") {
   740  			t.Errorf("Useless 0 suffix:\n%v", prof)
   741  		}
   742  
   743  		for _, test := range tests {
   744  			if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
   745  				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
   746  			}
   747  		}
   748  	})
   749  
   750  	t.Run("proto", func(t *testing.T) {
   751  		// proto format
   752  		var w bytes.Buffer
   753  		Lookup("block").WriteTo(&w, 0)
   754  		p, err := profile.Parse(&w)
   755  		if err != nil {
   756  			t.Fatalf("failed to parse profile: %v", err)
   757  		}
   758  		t.Logf("parsed proto: %s", p)
   759  		if err := p.CheckValid(); err != nil {
   760  			t.Fatalf("invalid profile: %v", err)
   761  		}
   762  
   763  		stks := stacks(p)
   764  		for _, test := range tests {
   765  			if !containsStack(stks, test.stk) {
   766  				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
   767  			}
   768  		}
   769  	})
   770  
   771  }
   772  
   773  func stacks(p *profile.Profile) (res [][]string) {
   774  	for _, s := range p.Sample {
   775  		var stk []string
   776  		for _, l := range s.Location {
   777  			for _, line := range l.Line {
   778  				stk = append(stk, line.Function.Name)
   779  			}
   780  		}
   781  		res = append(res, stk)
   782  	}
   783  	return res
   784  }
   785  
   786  func containsStack(got [][]string, want []string) bool {
   787  	for _, stk := range got {
   788  		if len(stk) < len(want) {
   789  			continue
   790  		}
   791  		for i, f := range want {
   792  			if f != stk[i] {
   793  				break
   794  			}
   795  			if i == len(want)-1 {
   796  				return true
   797  			}
   798  		}
   799  	}
   800  	return false
   801  }
   802  
   803  const blockDelay = 10 * time.Millisecond
   804  
   805  func blockChanRecv() {
   806  	c := make(chan bool)
   807  	go func() {
   808  		time.Sleep(blockDelay)
   809  		c <- true
   810  	}()
   811  	<-c
   812  }
   813  
   814  func blockChanSend() {
   815  	c := make(chan bool)
   816  	go func() {
   817  		time.Sleep(blockDelay)
   818  		<-c
   819  	}()
   820  	c <- true
   821  }
   822  
   823  func blockChanClose() {
   824  	c := make(chan bool)
   825  	go func() {
   826  		time.Sleep(blockDelay)
   827  		close(c)
   828  	}()
   829  	<-c
   830  }
   831  
   832  func blockSelectRecvAsync() {
   833  	const numTries = 3
   834  	c := make(chan bool, 1)
   835  	c2 := make(chan bool, 1)
   836  	go func() {
   837  		for i := 0; i < numTries; i++ {
   838  			time.Sleep(blockDelay)
   839  			c <- true
   840  		}
   841  	}()
   842  	for i := 0; i < numTries; i++ {
   843  		select {
   844  		case <-c:
   845  		case <-c2:
   846  		}
   847  	}
   848  }
   849  
   850  func blockSelectSendSync() {
   851  	c := make(chan bool)
   852  	c2 := make(chan bool)
   853  	go func() {
   854  		time.Sleep(blockDelay)
   855  		<-c
   856  	}()
   857  	select {
   858  	case c <- true:
   859  	case c2 <- true:
   860  	}
   861  }
   862  
   863  func blockMutex() {
   864  	var mu sync.Mutex
   865  	mu.Lock()
   866  	go func() {
   867  		time.Sleep(blockDelay)
   868  		mu.Unlock()
   869  	}()
   870  	// Note: Unlock releases mu before recording the mutex event,
   871  	// so it's theoretically possible for this to proceed and
   872  	// capture the profile before the event is recorded. As long
   873  	// as this is blocked before the unlock happens, it's okay.
   874  	mu.Lock()
   875  }
   876  
   877  func blockCond() {
   878  	var mu sync.Mutex
   879  	c := sync.NewCond(&mu)
   880  	mu.Lock()
   881  	go func() {
   882  		time.Sleep(blockDelay)
   883  		mu.Lock()
   884  		c.Signal()
   885  		mu.Unlock()
   886  	}()
   887  	c.Wait()
   888  	mu.Unlock()
   889  }
   890  
   891  func TestMutexProfile(t *testing.T) {
   892  	// Generate mutex profile
   893  
   894  	old := runtime.SetMutexProfileFraction(1)
   895  	defer runtime.SetMutexProfileFraction(old)
   896  	if old != 0 {
   897  		t.Fatalf("need MutexProfileRate 0, got %d", old)
   898  	}
   899  
   900  	blockMutex()
   901  
   902  	t.Run("debug=1", func(t *testing.T) {
   903  		var w bytes.Buffer
   904  		Lookup("mutex").WriteTo(&w, 1)
   905  		prof := w.String()
   906  		t.Logf("received profile: %v", prof)
   907  
   908  		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
   909  			t.Errorf("Bad profile header:\n%v", prof)
   910  		}
   911  		prof = strings.Trim(prof, "\n")
   912  		lines := strings.Split(prof, "\n")
   913  		if len(lines) != 6 {
   914  			t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
   915  		}
   916  		if len(lines) < 6 {
   917  			return
   918  		}
   919  		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
   920  		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
   921  		//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
   922  		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
   923  			t.Errorf("%q didn't match %q", lines[3], r2)
   924  		}
   925  		r3 := "^#.*runtime/pprof.blockMutex.*$"
   926  		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
   927  			t.Errorf("%q didn't match %q", lines[5], r3)
   928  		}
   929  		t.Logf(prof)
   930  	})
   931  	t.Run("proto", func(t *testing.T) {
   932  		// proto format
   933  		var w bytes.Buffer
   934  		Lookup("mutex").WriteTo(&w, 0)
   935  		p, err := profile.Parse(&w)
   936  		if err != nil {
   937  			t.Fatalf("failed to parse profile: %v", err)
   938  		}
   939  		t.Logf("parsed proto: %s", p)
   940  		if err := p.CheckValid(); err != nil {
   941  			t.Fatalf("invalid profile: %v", err)
   942  		}
   943  
   944  		stks := stacks(p)
   945  		for _, want := range [][]string{
   946  			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
   947  		} {
   948  			if !containsStack(stks, want) {
   949  				t.Errorf("No matching stack entry for %+v", want)
   950  			}
   951  		}
   952  	})
   953  }
   954  
   955  func func1(c chan int) { <-c }
   956  func func2(c chan int) { <-c }
   957  func func3(c chan int) { <-c }
   958  func func4(c chan int) { <-c }
   959  
   960  func TestGoroutineCounts(t *testing.T) {
   961  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
   962  	// desired blocking point.
   963  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   964  
   965  	c := make(chan int)
   966  	for i := 0; i < 100; i++ {
   967  		switch {
   968  		case i%10 == 0:
   969  			go func1(c)
   970  		case i%2 == 0:
   971  			go func2(c)
   972  		default:
   973  			go func3(c)
   974  		}
   975  		// Let goroutines block on channel
   976  		for j := 0; j < 5; j++ {
   977  			runtime.Gosched()
   978  		}
   979  	}
   980  	ctx := context.Background()
   981  
   982  	// ... and again, with labels this time (just with fewer iterations to keep
   983  	// sorting deterministic).
   984  	Do(ctx, Labels("label", "value"), func(context.Context) {
   985  		for i := 0; i < 89; i++ {
   986  			switch {
   987  			case i%10 == 0:
   988  				go func1(c)
   989  			case i%2 == 0:
   990  				go func2(c)
   991  			default:
   992  				go func3(c)
   993  			}
   994  			// Let goroutines block on channel
   995  			for j := 0; j < 5; j++ {
   996  				runtime.Gosched()
   997  			}
   998  		}
   999  	})
  1000  
  1001  	var w bytes.Buffer
  1002  	goroutineProf := Lookup("goroutine")
  1003  
  1004  	// Check debug profile
  1005  	goroutineProf.WriteTo(&w, 1)
  1006  	prof := w.String()
  1007  
  1008  	labels := labelMap{"label": "value"}
  1009  	labelStr := "\n# labels: " + labels.String()
  1010  	if !containsInOrder(prof, "\n50 @ ", "\n44 @", labelStr,
  1011  		"\n40 @", "\n36 @", labelStr, "\n10 @", "\n9 @", labelStr, "\n1 @") {
  1012  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
  1013  	}
  1014  
  1015  	// Check proto profile
  1016  	w.Reset()
  1017  	goroutineProf.WriteTo(&w, 0)
  1018  	p, err := profile.Parse(&w)
  1019  	if err != nil {
  1020  		t.Errorf("error parsing protobuf profile: %v", err)
  1021  	}
  1022  	if err := p.CheckValid(); err != nil {
  1023  		t.Errorf("protobuf profile is invalid: %v", err)
  1024  	}
  1025  	expectedLabels := map[int64]map[string]string{
  1026  		50: map[string]string{},
  1027  		44: map[string]string{"label": "value"},
  1028  		40: map[string]string{},
  1029  		36: map[string]string{"label": "value"},
  1030  		10: map[string]string{},
  1031  		9:  map[string]string{"label": "value"},
  1032  		1:  map[string]string{},
  1033  	}
  1034  	if !containsCountsLabels(p, expectedLabels) {
  1035  		t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
  1036  			expectedLabels, p)
  1037  	}
  1038  
  1039  	close(c)
  1040  
  1041  	time.Sleep(10 * time.Millisecond) // let goroutines exit
  1042  }
  1043  
  1044  func containsInOrder(s string, all ...string) bool {
  1045  	for _, t := range all {
  1046  		i := strings.Index(s, t)
  1047  		if i < 0 {
  1048  			return false
  1049  		}
  1050  		s = s[i+len(t):]
  1051  	}
  1052  	return true
  1053  }
  1054  
  1055  func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
  1056  	m := make(map[int64]int)
  1057  	type nkey struct {
  1058  		count    int64
  1059  		key, val string
  1060  	}
  1061  	n := make(map[nkey]int)
  1062  	for c, kv := range countLabels {
  1063  		m[c]++
  1064  		for k, v := range kv {
  1065  			n[nkey{
  1066  				count: c,
  1067  				key:   k,
  1068  				val:   v,
  1069  			}]++
  1070  
  1071  		}
  1072  	}
  1073  	for _, s := range prof.Sample {
  1074  		// The count is the single value in the sample
  1075  		if len(s.Value) != 1 {
  1076  			return false
  1077  		}
  1078  		m[s.Value[0]]--
  1079  		for k, vs := range s.Label {
  1080  			for _, v := range vs {
  1081  				n[nkey{
  1082  					count: s.Value[0],
  1083  					key:   k,
  1084  					val:   v,
  1085  				}]--
  1086  			}
  1087  		}
  1088  	}
  1089  	for _, n := range m {
  1090  		if n > 0 {
  1091  			return false
  1092  		}
  1093  	}
  1094  	for _, ncnt := range n {
  1095  		if ncnt != 0 {
  1096  			return false
  1097  		}
  1098  	}
  1099  	return true
  1100  }
  1101  
  1102  var emptyCallStackTestRun int64
  1103  
  1104  // Issue 18836.
  1105  func TestEmptyCallStack(t *testing.T) {
  1106  	name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
  1107  	emptyCallStackTestRun++
  1108  
  1109  	t.Parallel()
  1110  	var buf bytes.Buffer
  1111  	p := NewProfile(name)
  1112  
  1113  	p.Add("foo", 47674)
  1114  	p.WriteTo(&buf, 1)
  1115  	p.Remove("foo")
  1116  	got := buf.String()
  1117  	prefix := name + " profile: total 1\n"
  1118  	if !strings.HasPrefix(got, prefix) {
  1119  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
  1120  	}
  1121  	lostevent := "lostProfileEvent"
  1122  	if !strings.Contains(got, lostevent) {
  1123  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
  1124  	}
  1125  }
  1126  
  1127  // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
  1128  // and value and has funcname somewhere in the stack.
  1129  func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
  1130  	semi := strings.Index(spec, ";")
  1131  	if semi == -1 {
  1132  		panic("no semicolon in key/value spec")
  1133  	}
  1134  	kv := strings.SplitN(spec[semi+1:], "=", 2)
  1135  	if len(kv) != 2 {
  1136  		panic("missing = in key/value spec")
  1137  	}
  1138  	if !contains(labels[kv[0]], kv[1]) {
  1139  		return false
  1140  	}
  1141  	return stackContains(spec[:semi], count, stk, labels)
  1142  }
  1143  
  1144  func TestCPUProfileLabel(t *testing.T) {
  1145  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
  1146  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1147  			cpuHogger(cpuHog1, &salt1, dur)
  1148  		})
  1149  	})
  1150  }
  1151  
  1152  func TestLabelRace(t *testing.T) {
  1153  	// Test the race detector annotations for synchronization
  1154  	// between settings labels and consuming them from the
  1155  	// profile.
  1156  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) {
  1157  		start := time.Now()
  1158  		var wg sync.WaitGroup
  1159  		for time.Since(start) < dur {
  1160  			var salts [10]int
  1161  			for i := 0; i < 10; i++ {
  1162  				wg.Add(1)
  1163  				go func(j int) {
  1164  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1165  						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
  1166  					})
  1167  					wg.Done()
  1168  				}(i)
  1169  			}
  1170  			wg.Wait()
  1171  		}
  1172  	})
  1173  }
  1174  
  1175  // Check that there is no deadlock when the program receives SIGPROF while in
  1176  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
  1177  func TestAtomicLoadStore64(t *testing.T) {
  1178  	f, err := ioutil.TempFile("", "profatomic")
  1179  	if err != nil {
  1180  		t.Fatalf("TempFile: %v", err)
  1181  	}
  1182  	defer os.Remove(f.Name())
  1183  	defer f.Close()
  1184  
  1185  	if err := StartCPUProfile(f); err != nil {
  1186  		t.Fatal(err)
  1187  	}
  1188  	defer StopCPUProfile()
  1189  
  1190  	var flag uint64
  1191  	done := make(chan bool, 1)
  1192  
  1193  	go func() {
  1194  		for atomic.LoadUint64(&flag) == 0 {
  1195  			runtime.Gosched()
  1196  		}
  1197  		done <- true
  1198  	}()
  1199  	time.Sleep(50 * time.Millisecond)
  1200  	atomic.StoreUint64(&flag, 1)
  1201  	<-done
  1202  }
  1203  
  1204  func TestTracebackAll(t *testing.T) {
  1205  	// With gccgo, if a profiling signal arrives at the wrong time
  1206  	// during traceback, it may crash or hang. See issue #29448.
  1207  	f, err := ioutil.TempFile("", "proftraceback")
  1208  	if err != nil {
  1209  		t.Fatalf("TempFile: %v", err)
  1210  	}
  1211  	defer os.Remove(f.Name())
  1212  	defer f.Close()
  1213  
  1214  	if err := StartCPUProfile(f); err != nil {
  1215  		t.Fatal(err)
  1216  	}
  1217  	defer StopCPUProfile()
  1218  
  1219  	ch := make(chan int)
  1220  	defer close(ch)
  1221  
  1222  	count := 10
  1223  	for i := 0; i < count; i++ {
  1224  		go func() {
  1225  			<-ch // block
  1226  		}()
  1227  	}
  1228  
  1229  	N := 10000
  1230  	if testing.Short() {
  1231  		N = 500
  1232  	}
  1233  	buf := make([]byte, 10*1024)
  1234  	for i := 0; i < N; i++ {
  1235  		runtime.Stack(buf, true)
  1236  	}
  1237  }
  1238  
  1239  // TestTryAdd tests the cases that are hard to test with real program execution.
  1240  //
  1241  // For example, the current go compilers may not always inline functions
  1242  // involved in recursion but that may not be true in the future compilers. This
  1243  // tests such cases by using fake call sequences and forcing the profile build
  1244  // utilizing translateCPUProfile defined in proto_test.go
  1245  func TestTryAdd(t *testing.T) {
  1246  	if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
  1247  		t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
  1248  	}
  1249  
  1250  	// inlinedCallerDump
  1251  	//   inlinedCalleeDump
  1252  	pcs := make([]uintptr, 2)
  1253  	inlinedCallerDump(pcs)
  1254  	inlinedCallerStack := make([]uint64, 2)
  1255  	for i := range pcs {
  1256  		inlinedCallerStack[i] = uint64(pcs[i])
  1257  	}
  1258  
  1259  	if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
  1260  		t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
  1261  	}
  1262  
  1263  	// recursionChainTop
  1264  	//   recursionChainMiddle
  1265  	//     recursionChainBottom
  1266  	//       recursionChainTop
  1267  	//         recursionChainMiddle
  1268  	//           recursionChainBottom
  1269  	pcs = make([]uintptr, 6)
  1270  	recursionChainTop(1, pcs)
  1271  	recursionStack := make([]uint64, len(pcs))
  1272  	for i := range pcs {
  1273  		recursionStack[i] = uint64(pcs[i])
  1274  	}
  1275  
  1276  	period := int64(2000 * 1000) // 1/500*1e9 nanosec.
  1277  
  1278  	testCases := []struct {
  1279  		name        string
  1280  		input       []uint64          // following the input format assumed by profileBuilder.addCPUData.
  1281  		wantLocs    [][]string        // ordered location entries with function names.
  1282  		wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
  1283  	}{{
  1284  		// Sanity test for a normal, complete stack trace.
  1285  		name: "full_stack_trace",
  1286  		input: []uint64{
  1287  			3, 0, 500, // hz = 500. Must match the period.
  1288  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  1289  		},
  1290  		wantLocs: [][]string{
  1291  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  1292  		},
  1293  		wantSamples: []*profile.Sample{
  1294  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  1295  		},
  1296  	}, {
  1297  		name: "bug35538",
  1298  		input: []uint64{
  1299  			3, 0, 500, // hz = 500. Must match the period.
  1300  			// Fake frame: tryAdd will have inlinedCallerDump
  1301  			// (stack[1]) on the deck when it encounters the next
  1302  			// inline function. It should accept this.
  1303  			7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
  1304  			5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
  1305  		},
  1306  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1307  		wantSamples: []*profile.Sample{
  1308  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
  1309  			{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
  1310  		},
  1311  	}, {
  1312  		name: "bug38096",
  1313  		input: []uint64{
  1314  			3, 0, 500, // hz = 500. Must match the period.
  1315  			// count (data[2]) == 0 && len(stk) == 1 is an overflow
  1316  			// entry. The "stk" entry is actually the count.
  1317  			4, 0, 0, 4242,
  1318  		},
  1319  		wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
  1320  		wantSamples: []*profile.Sample{
  1321  			{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
  1322  		},
  1323  	}, {
  1324  		// If a function is directly called recursively then it must
  1325  		// not be inlined in the caller.
  1326  		//
  1327  		// N.B. We're generating an impossible profile here, with a
  1328  		// recursive inlineCalleeDump call. This is simulating a non-Go
  1329  		// function that looks like an inlined Go function other than
  1330  		// its recursive property. See pcDeck.tryAdd.
  1331  		name: "directly_recursive_func_is_not_inlined",
  1332  		input: []uint64{
  1333  			3, 0, 500, // hz = 500. Must match the period.
  1334  			5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
  1335  			4, 0, 40, inlinedCallerStack[0],
  1336  		},
  1337  		// inlinedCallerDump shows up here because
  1338  		// runtime_expandFinalInlineFrame adds it to the stack frame.
  1339  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
  1340  		wantSamples: []*profile.Sample{
  1341  			{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
  1342  			{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
  1343  		},
  1344  	}, {
  1345  		name: "recursion_chain_inline",
  1346  		input: []uint64{
  1347  			3, 0, 500, // hz = 500. Must match the period.
  1348  			9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
  1349  		},
  1350  		wantLocs: [][]string{
  1351  			{"runtime/pprof.recursionChainBottom"},
  1352  			{
  1353  				"runtime/pprof.recursionChainMiddle",
  1354  				"runtime/pprof.recursionChainTop",
  1355  				"runtime/pprof.recursionChainBottom",
  1356  			},
  1357  			{
  1358  				"runtime/pprof.recursionChainMiddle",
  1359  				"runtime/pprof.recursionChainTop",
  1360  				"runtime/pprof.TestTryAdd", // inlined into the test.
  1361  			},
  1362  		},
  1363  		wantSamples: []*profile.Sample{
  1364  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
  1365  		},
  1366  	}, {
  1367  		name: "truncated_stack_trace_later",
  1368  		input: []uint64{
  1369  			3, 0, 500, // hz = 500. Must match the period.
  1370  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  1371  			4, 0, 60, inlinedCallerStack[0],
  1372  		},
  1373  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1374  		wantSamples: []*profile.Sample{
  1375  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  1376  			{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
  1377  		},
  1378  	}, {
  1379  		name: "truncated_stack_trace_first",
  1380  		input: []uint64{
  1381  			3, 0, 500, // hz = 500. Must match the period.
  1382  			4, 0, 70, inlinedCallerStack[0],
  1383  			5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
  1384  		},
  1385  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1386  		wantSamples: []*profile.Sample{
  1387  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1388  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
  1389  		},
  1390  	}, {
  1391  		// We can recover the inlined caller from a truncated stack.
  1392  		name: "truncated_stack_trace_only",
  1393  		input: []uint64{
  1394  			3, 0, 500, // hz = 500. Must match the period.
  1395  			4, 0, 70, inlinedCallerStack[0],
  1396  		},
  1397  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1398  		wantSamples: []*profile.Sample{
  1399  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1400  		},
  1401  	}, {
  1402  		// The same location is used for duplicated stacks.
  1403  		name: "truncated_stack_trace_twice",
  1404  		input: []uint64{
  1405  			3, 0, 500, // hz = 500. Must match the period.
  1406  			4, 0, 70, inlinedCallerStack[0],
  1407  			// Fake frame: add a fake call to
  1408  			// inlinedCallerDump to prevent this sample
  1409  			// from getting merged into above.
  1410  			5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
  1411  		},
  1412  		wantLocs: [][]string{
  1413  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  1414  			{"runtime/pprof.inlinedCallerDump"},
  1415  		},
  1416  		wantSamples: []*profile.Sample{
  1417  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1418  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
  1419  		},
  1420  	}}
  1421  
  1422  	for _, tc := range testCases {
  1423  		t.Run(tc.name, func(t *testing.T) {
  1424  			p, err := translateCPUProfile(tc.input)
  1425  			if err != nil {
  1426  				t.Fatalf("translating profile: %v", err)
  1427  			}
  1428  			t.Logf("Profile: %v\n", p)
  1429  
  1430  			// One location entry with all inlined functions.
  1431  			var gotLoc [][]string
  1432  			for _, loc := range p.Location {
  1433  				var names []string
  1434  				for _, line := range loc.Line {
  1435  					names = append(names, line.Function.Name)
  1436  				}
  1437  				gotLoc = append(gotLoc, names)
  1438  			}
  1439  			if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
  1440  				t.Errorf("Got Location = %+v\n\twant %+v", got, want)
  1441  			}
  1442  			// All samples should point to one location.
  1443  			var gotSamples []*profile.Sample
  1444  			for _, sample := range p.Sample {
  1445  				var locs []*profile.Location
  1446  				for _, loc := range sample.Location {
  1447  					locs = append(locs, &profile.Location{ID: loc.ID})
  1448  				}
  1449  				gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
  1450  			}
  1451  			if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
  1452  				t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
  1453  			}
  1454  		})
  1455  	}
  1456  }
  1457  

View as plain text