Source file src/runtime/pprof/pprof_test.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !js
     6  
     7  package pprof
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"internal/abi"
    14  	"internal/profile"
    15  	"internal/syscall/unix"
    16  	"internal/testenv"
    17  	"io"
    18  	"math"
    19  	"math/big"
    20  	"os"
    21  	"regexp"
    22  	"runtime"
    23  	"runtime/debug"
    24  	"strings"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  	_ "unsafe"
    30  )
    31  
    32  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
    33  	// We only need to get one 100 Hz clock tick, so we've got
    34  	// a large safety buffer.
    35  	// But do at least 500 iterations (which should take about 100ms),
    36  	// otherwise TestCPUProfileMultithreaded can fail if only one
    37  	// thread is scheduled during the testing period.
    38  	t0 := time.Now()
    39  	accum := *y
    40  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
    41  		accum = f(accum)
    42  	}
    43  	*y = accum
    44  }
    45  
    46  var (
    47  	salt1 = 0
    48  	salt2 = 0
    49  )
    50  
    51  // The actual CPU hogging function.
    52  // Must not call other functions nor access heap/globals in the loop,
    53  // otherwise under race detector the samples will be in the race runtime.
    54  func cpuHog1(x int) int {
    55  	return cpuHog0(x, 1e5)
    56  }
    57  
    58  func cpuHog0(x, n int) int {
    59  	foo := x
    60  	for i := 0; i < n; i++ {
    61  		if foo > 0 {
    62  			foo *= foo
    63  		} else {
    64  			foo *= foo + 1
    65  		}
    66  	}
    67  	return foo
    68  }
    69  
    70  func cpuHog2(x int) int {
    71  	foo := x
    72  	for i := 0; i < 1e5; i++ {
    73  		if foo > 0 {
    74  			foo *= foo
    75  		} else {
    76  			foo *= foo + 2
    77  		}
    78  	}
    79  	return foo
    80  }
    81  
    82  // Return a list of functions that we don't want to ever appear in CPU
    83  // profiles. For gccgo, that list includes the sigprof handler itself.
    84  func avoidFunctions() []string {
    85  	if runtime.Compiler == "gccgo" {
    86  		return []string{"runtime.sigprof"}
    87  	}
    88  	return nil
    89  }
    90  
    91  func TestCPUProfile(t *testing.T) {
    92  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
    93  	testCPUProfile(t, matches, func(dur time.Duration) {
    94  		cpuHogger(cpuHog1, &salt1, dur)
    95  	})
    96  }
    97  
    98  func TestCPUProfileMultithreaded(t *testing.T) {
    99  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   100  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions())
   101  	testCPUProfile(t, matches, func(dur time.Duration) {
   102  		c := make(chan int)
   103  		go func() {
   104  			cpuHogger(cpuHog1, &salt1, dur)
   105  			c <- 1
   106  		}()
   107  		cpuHogger(cpuHog2, &salt2, dur)
   108  		<-c
   109  	})
   110  }
   111  
   112  func TestCPUProfileMultithreadMagnitude(t *testing.T) {
   113  	if runtime.GOOS != "linux" {
   114  		t.Skip("issue 35057 is only confirmed on Linux")
   115  	}
   116  
   117  	// Linux [5.9,5.16) has a kernel bug that can break CPU timers on newly
   118  	// created threads, breaking our CPU accounting.
   119  	major, minor := unix.KernelVersion()
   120  	t.Logf("Running on Linux %d.%d", major, minor)
   121  	defer func() {
   122  		if t.Failed() {
   123  			t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
   124  		}
   125  	}()
   126  
   127  	// Disable on affected builders to avoid flakiness, but otherwise keep
   128  	// it enabled to potentially warn users that they are on a broken
   129  	// kernel.
   130  	if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
   131  		have59 := major > 5 || (major == 5 && minor >= 9)
   132  		have516 := major > 5 || (major == 5 && minor >= 16)
   133  		if have59 && !have516 {
   134  			testenv.SkipFlaky(t, 49065)
   135  		}
   136  	}
   137  
   138  	// Run a workload in a single goroutine, then run copies of the same
   139  	// workload in several goroutines. For both the serial and parallel cases,
   140  	// the CPU time the process measures with its own profiler should match the
   141  	// total CPU usage that the OS reports.
   142  	//
   143  	// We could also check that increases in parallelism (GOMAXPROCS) lead to a
   144  	// linear increase in the CPU usage reported by both the OS and the
   145  	// profiler, but without a guarantee of exclusive access to CPU resources
   146  	// that is likely to be a flaky test.
   147  
   148  	// Require the smaller value to be within 10%, or 40% in short mode.
   149  	maxDiff := 0.10
   150  	if testing.Short() {
   151  		maxDiff = 0.40
   152  	}
   153  
   154  	compare := func(a, b time.Duration, maxDiff float64) error {
   155  		if a <= 0 || b <= 0 {
   156  			return fmt.Errorf("Expected both time reports to be positive")
   157  		}
   158  
   159  		if a < b {
   160  			a, b = b, a
   161  		}
   162  
   163  		diff := float64(a-b) / float64(a)
   164  		if diff > maxDiff {
   165  			return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100)
   166  		}
   167  
   168  		return nil
   169  	}
   170  
   171  	for _, tc := range []struct {
   172  		name    string
   173  		workers int
   174  	}{
   175  		{
   176  			name:    "serial",
   177  			workers: 1,
   178  		},
   179  		{
   180  			name:    "parallel",
   181  			workers: runtime.GOMAXPROCS(0),
   182  		},
   183  	} {
   184  		// check that the OS's perspective matches what the Go runtime measures.
   185  		t.Run(tc.name, func(t *testing.T) {
   186  			t.Logf("Running with %d workers", tc.workers)
   187  
   188  			var userTime, systemTime time.Duration
   189  			matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
   190  			acceptProfile := func(t *testing.T, p *profile.Profile) bool {
   191  				if !matches(t, p) {
   192  					return false
   193  				}
   194  
   195  				ok := true
   196  				for i, unit := range []string{"count", "nanoseconds"} {
   197  					if have, want := p.SampleType[i].Unit, unit; have != want {
   198  						t.Logf("pN SampleType[%d]; %q != %q", i, have, want)
   199  						ok = false
   200  					}
   201  				}
   202  
   203  				// cpuHog1 called below is the primary source of CPU
   204  				// load, but there may be some background work by the
   205  				// runtime. Since the OS rusage measurement will
   206  				// include all work done by the process, also compare
   207  				// against all samples in our profile.
   208  				var value time.Duration
   209  				for _, sample := range p.Sample {
   210  					value += time.Duration(sample.Value[1]) * time.Nanosecond
   211  				}
   212  
   213  				totalTime := userTime + systemTime
   214  				t.Logf("compare %s user + %s system = %s vs %s", userTime, systemTime, totalTime, value)
   215  				if err := compare(totalTime, value, maxDiff); err != nil {
   216  					t.Logf("compare got %v want nil", err)
   217  					ok = false
   218  				}
   219  
   220  				return ok
   221  			}
   222  
   223  			testCPUProfile(t, acceptProfile, func(dur time.Duration) {
   224  				userTime, systemTime = diffCPUTime(t, func() {
   225  					var wg sync.WaitGroup
   226  					var once sync.Once
   227  					for i := 0; i < tc.workers; i++ {
   228  						wg.Add(1)
   229  						go func() {
   230  							defer wg.Done()
   231  							var salt = 0
   232  							cpuHogger(cpuHog1, &salt, dur)
   233  							once.Do(func() { salt1 = salt })
   234  						}()
   235  					}
   236  					wg.Wait()
   237  				})
   238  			})
   239  		})
   240  	}
   241  }
   242  
   243  // containsInlinedCall reports whether the function body for the function f is
   244  // known to contain an inlined function call within the first maxBytes bytes.
   245  func containsInlinedCall(f any, maxBytes int) bool {
   246  	_, found := findInlinedCall(f, maxBytes)
   247  	return found
   248  }
   249  
   250  // findInlinedCall returns the PC of an inlined function call within
   251  // the function body for the function f if any.
   252  func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {
   253  	fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
   254  	if fFunc == nil || fFunc.Entry() == 0 {
   255  		panic("failed to locate function entry")
   256  	}
   257  
   258  	for offset := 0; offset < maxBytes; offset++ {
   259  		innerPC := fFunc.Entry() + uintptr(offset)
   260  		inner := runtime.FuncForPC(innerPC)
   261  		if inner == nil {
   262  			// No function known for this PC value.
   263  			// It might simply be misaligned, so keep searching.
   264  			continue
   265  		}
   266  		if inner.Entry() != fFunc.Entry() {
   267  			// Scanned past f and didn't find any inlined functions.
   268  			break
   269  		}
   270  		if inner.Name() != fFunc.Name() {
   271  			// This PC has f as its entry-point, but is not f. Therefore, it must be a
   272  			// function inlined into f.
   273  			return uint64(innerPC), true
   274  		}
   275  	}
   276  
   277  	return 0, false
   278  }
   279  
   280  func TestCPUProfileInlining(t *testing.T) {
   281  	if !containsInlinedCall(inlinedCaller, 4<<10) {
   282  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
   283  	}
   284  
   285  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions())
   286  	p := testCPUProfile(t, matches, func(dur time.Duration) {
   287  		cpuHogger(inlinedCaller, &salt1, dur)
   288  	})
   289  
   290  	// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
   291  	for _, loc := range p.Location {
   292  		hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
   293  		for _, line := range loc.Line {
   294  			if line.Function.Name == "runtime/pprof.inlinedCallee" {
   295  				hasInlinedCallee = true
   296  			}
   297  			if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
   298  				hasInlinedCallerAfterInlinedCallee = true
   299  			}
   300  		}
   301  		if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
   302  			t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
   303  		}
   304  	}
   305  }
   306  
   307  func inlinedCaller(x int) int {
   308  	x = inlinedCallee(x, 1e5)
   309  	return x
   310  }
   311  
   312  func inlinedCallee(x, n int) int {
   313  	return cpuHog0(x, n)
   314  }
   315  
   316  //go:noinline
   317  func dumpCallers(pcs []uintptr) {
   318  	if pcs == nil {
   319  		return
   320  	}
   321  
   322  	skip := 2 // Callers and dumpCallers
   323  	runtime.Callers(skip, pcs)
   324  }
   325  
   326  //go:noinline
   327  func inlinedCallerDump(pcs []uintptr) {
   328  	inlinedCalleeDump(pcs)
   329  }
   330  
   331  func inlinedCalleeDump(pcs []uintptr) {
   332  	dumpCallers(pcs)
   333  }
   334  
   335  type inlineWrapperInterface interface {
   336  	dump(stack []uintptr)
   337  }
   338  
   339  type inlineWrapper struct {
   340  }
   341  
   342  func (h inlineWrapper) dump(pcs []uintptr) {
   343  	dumpCallers(pcs)
   344  }
   345  
   346  func inlinedWrapperCallerDump(pcs []uintptr) {
   347  	var h inlineWrapperInterface
   348  	h = &inlineWrapper{}
   349  	h.dump(pcs)
   350  }
   351  
   352  func TestCPUProfileRecursion(t *testing.T) {
   353  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions())
   354  	p := testCPUProfile(t, matches, func(dur time.Duration) {
   355  		cpuHogger(recursionCaller, &salt1, dur)
   356  	})
   357  
   358  	// check the Location encoding was not confused by recursive calls.
   359  	for i, loc := range p.Location {
   360  		recursionFunc := 0
   361  		for _, line := range loc.Line {
   362  			if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
   363  				recursionFunc++
   364  			}
   365  		}
   366  		if recursionFunc > 1 {
   367  			t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
   368  		}
   369  	}
   370  }
   371  
   372  func recursionCaller(x int) int {
   373  	y := recursionCallee(3, x)
   374  	return y
   375  }
   376  
   377  func recursionCallee(n, x int) int {
   378  	if n == 0 {
   379  		return 1
   380  	}
   381  	y := inlinedCallee(x, 1e4)
   382  	return y * recursionCallee(n-1, x)
   383  }
   384  
   385  func recursionChainTop(x int, pcs []uintptr) {
   386  	if x < 0 {
   387  		return
   388  	}
   389  	recursionChainMiddle(x, pcs)
   390  }
   391  
   392  func recursionChainMiddle(x int, pcs []uintptr) {
   393  	recursionChainBottom(x, pcs)
   394  }
   395  
   396  func recursionChainBottom(x int, pcs []uintptr) {
   397  	// This will be called each time, we only care about the last. We
   398  	// can't make this conditional or this function won't be inlined.
   399  	dumpCallers(pcs)
   400  
   401  	recursionChainTop(x-1, pcs)
   402  }
   403  
   404  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
   405  	p, err := profile.Parse(bytes.NewReader(valBytes))
   406  	if err != nil {
   407  		t.Fatal(err)
   408  	}
   409  	for _, sample := range p.Sample {
   410  		count := uintptr(sample.Value[0])
   411  		f(count, sample.Location, sample.Label)
   412  	}
   413  	return p
   414  }
   415  
   416  func cpuProfilingBroken() bool {
   417  	switch runtime.GOOS {
   418  	case "plan9":
   419  		// Profiling unimplemented.
   420  		return true
   421  	case "aix":
   422  		// See https://golang.org/issue/45170.
   423  		return true
   424  	case "ios", "dragonfly", "netbsd", "illumos", "solaris":
   425  		// See https://golang.org/issue/13841.
   426  		return true
   427  	case "openbsd":
   428  		if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
   429  			// See https://golang.org/issue/13841.
   430  			return true
   431  		}
   432  	}
   433  
   434  	return false
   435  }
   436  
   437  // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
   438  // as interpreted by matches, and returns the parsed profile.
   439  func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
   440  	switch runtime.GOOS {
   441  	case "darwin":
   442  		out, err := testenv.Command(t, "uname", "-a").CombinedOutput()
   443  		if err != nil {
   444  			t.Fatal(err)
   445  		}
   446  		vers := string(out)
   447  		t.Logf("uname -a: %v", vers)
   448  	case "plan9":
   449  		t.Skip("skipping on plan9")
   450  	case "wasip1":
   451  		t.Skip("skipping on wasip1")
   452  	}
   453  
   454  	broken := cpuProfilingBroken()
   455  
   456  	deadline, ok := t.Deadline()
   457  	if broken || !ok {
   458  		if broken && testing.Short() {
   459  			// If it's expected to be broken, no point waiting around.
   460  			deadline = time.Now().Add(1 * time.Second)
   461  		} else {
   462  			deadline = time.Now().Add(10 * time.Second)
   463  		}
   464  	}
   465  
   466  	// If we're running a long test, start with a long duration
   467  	// for tests that try to make sure something *doesn't* happen.
   468  	duration := 5 * time.Second
   469  	if testing.Short() {
   470  		duration = 100 * time.Millisecond
   471  	}
   472  
   473  	// Profiling tests are inherently flaky, especially on a
   474  	// loaded system, such as when this test is running with
   475  	// several others under go test std. If a test fails in a way
   476  	// that could mean it just didn't run long enough, try with a
   477  	// longer duration.
   478  	for {
   479  		var prof bytes.Buffer
   480  		if err := StartCPUProfile(&prof); err != nil {
   481  			t.Fatal(err)
   482  		}
   483  		f(duration)
   484  		StopCPUProfile()
   485  
   486  		if p, ok := profileOk(t, matches, prof, duration); ok {
   487  			return p
   488  		}
   489  
   490  		duration *= 2
   491  		if time.Until(deadline) < duration {
   492  			break
   493  		}
   494  		t.Logf("retrying with %s duration", duration)
   495  	}
   496  
   497  	if broken {
   498  		t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
   499  	}
   500  
   501  	// Ignore the failure if the tests are running in a QEMU-based emulator,
   502  	// QEMU is not perfect at emulating everything.
   503  	// IN_QEMU environmental variable is set by some of the Go builders.
   504  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
   505  	if os.Getenv("IN_QEMU") == "1" {
   506  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
   507  	}
   508  	t.FailNow()
   509  	return nil
   510  }
   511  
   512  var diffCPUTimeImpl func(f func()) (user, system time.Duration)
   513  
   514  func diffCPUTime(t *testing.T, f func()) (user, system time.Duration) {
   515  	if fn := diffCPUTimeImpl; fn != nil {
   516  		return fn(f)
   517  	}
   518  	t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH)
   519  	return 0, 0
   520  }
   521  
   522  func contains(slice []string, s string) bool {
   523  	for i := range slice {
   524  		if slice[i] == s {
   525  			return true
   526  		}
   527  	}
   528  	return false
   529  }
   530  
   531  // stackContains matches if a function named spec appears anywhere in the stack trace.
   532  func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   533  	for _, loc := range stk {
   534  		for _, line := range loc.Line {
   535  			if strings.Contains(line.Function.Name, spec) {
   536  				return true
   537  			}
   538  		}
   539  	}
   540  	return false
   541  }
   542  
   543  type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
   544  
   545  func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
   546  	ok = true
   547  
   548  	var samples uintptr
   549  	var buf strings.Builder
   550  	p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
   551  		fmt.Fprintf(&buf, "%d:", count)
   552  		fprintStack(&buf, stk)
   553  		fmt.Fprintf(&buf, " labels: %v\n", labels)
   554  		samples += count
   555  		fmt.Fprintf(&buf, "\n")
   556  	})
   557  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
   558  
   559  	if samples < 10 && runtime.GOOS == "windows" {
   560  		// On some windows machines we end up with
   561  		// not enough samples due to coarse timer
   562  		// resolution. Let it go.
   563  		t.Log("too few samples on Windows (golang.org/issue/10842)")
   564  		return p, false
   565  	}
   566  
   567  	// Check that we got a reasonable number of samples.
   568  	// We used to always require at least ideal/4 samples,
   569  	// but that is too hard to guarantee on a loaded system.
   570  	// Now we accept 10 or more samples, which we take to be
   571  	// enough to show that at least some profiling is occurring.
   572  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
   573  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
   574  		ok = false
   575  	}
   576  
   577  	if matches != nil && !matches(t, p) {
   578  		ok = false
   579  	}
   580  
   581  	return p, ok
   582  }
   583  
   584  type profileMatchFunc func(*testing.T, *profile.Profile) bool
   585  
   586  func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {
   587  	return func(t *testing.T, p *profile.Profile) (ok bool) {
   588  		ok = true
   589  
   590  		// Check that profile is well formed, contains 'need', and does not contain
   591  		// anything from 'avoid'.
   592  		have := make([]uintptr, len(need))
   593  		avoidSamples := make([]uintptr, len(avoid))
   594  
   595  		for _, sample := range p.Sample {
   596  			count := uintptr(sample.Value[0])
   597  			for i, spec := range need {
   598  				if matches(spec, count, sample.Location, sample.Label) {
   599  					have[i] += count
   600  				}
   601  			}
   602  			for i, name := range avoid {
   603  				for _, loc := range sample.Location {
   604  					for _, line := range loc.Line {
   605  						if strings.Contains(line.Function.Name, name) {
   606  							avoidSamples[i] += count
   607  						}
   608  					}
   609  				}
   610  			}
   611  		}
   612  
   613  		for i, name := range avoid {
   614  			bad := avoidSamples[i]
   615  			if bad != 0 {
   616  				t.Logf("found %d samples in avoid-function %s\n", bad, name)
   617  				ok = false
   618  			}
   619  		}
   620  
   621  		if len(need) == 0 {
   622  			return
   623  		}
   624  
   625  		var total uintptr
   626  		for i, name := range need {
   627  			total += have[i]
   628  			t.Logf("found %d samples in expected function %s\n", have[i], name)
   629  		}
   630  		if total == 0 {
   631  			t.Logf("no samples in expected functions")
   632  			ok = false
   633  		}
   634  
   635  		// We'd like to check a reasonable minimum, like
   636  		// total / len(have) / smallconstant, but this test is
   637  		// pretty flaky (see bug 7095).  So we'll just test to
   638  		// make sure we got at least one sample.
   639  		min := uintptr(1)
   640  		for i, name := range need {
   641  			if have[i] < min {
   642  				t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
   643  				ok = false
   644  			}
   645  		}
   646  		return
   647  	}
   648  }
   649  
   650  // Fork can hang if preempted with signals frequently enough (see issue 5517).
   651  // Ensure that we do not do this.
   652  func TestCPUProfileWithFork(t *testing.T) {
   653  	testenv.MustHaveExec(t)
   654  
   655  	exe, err := os.Executable()
   656  	if err != nil {
   657  		t.Fatal(err)
   658  	}
   659  
   660  	heap := 1 << 30
   661  	if runtime.GOOS == "android" {
   662  		// Use smaller size for Android to avoid crash.
   663  		heap = 100 << 20
   664  	}
   665  	if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
   666  		// Use smaller heap for Windows/ARM to avoid crash.
   667  		heap = 100 << 20
   668  	}
   669  	if testing.Short() {
   670  		heap = 100 << 20
   671  	}
   672  	// This makes fork slower.
   673  	garbage := make([]byte, heap)
   674  	// Need to touch the slice, otherwise it won't be paged in.
   675  	done := make(chan bool)
   676  	go func() {
   677  		for i := range garbage {
   678  			garbage[i] = 42
   679  		}
   680  		done <- true
   681  	}()
   682  	<-done
   683  
   684  	var prof bytes.Buffer
   685  	if err := StartCPUProfile(&prof); err != nil {
   686  		t.Fatal(err)
   687  	}
   688  	defer StopCPUProfile()
   689  
   690  	for i := 0; i < 10; i++ {
   691  		testenv.Command(t, exe, "-h").CombinedOutput()
   692  	}
   693  }
   694  
   695  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
   696  // If it did, it would see inconsistent state and would either record an incorrect stack
   697  // or crash because the stack was malformed.
   698  func TestGoroutineSwitch(t *testing.T) {
   699  	if runtime.Compiler == "gccgo" {
   700  		t.Skip("not applicable for gccgo")
   701  	}
   702  	// How much to try. These defaults take about 1 seconds
   703  	// on a 2012 MacBook Pro. The ones in short mode take
   704  	// about 0.1 seconds.
   705  	tries := 10
   706  	count := 1000000
   707  	if testing.Short() {
   708  		tries = 1
   709  	}
   710  	for try := 0; try < tries; try++ {
   711  		var prof bytes.Buffer
   712  		if err := StartCPUProfile(&prof); err != nil {
   713  			t.Fatal(err)
   714  		}
   715  		for i := 0; i < count; i++ {
   716  			runtime.Gosched()
   717  		}
   718  		StopCPUProfile()
   719  
   720  		// Read profile to look for entries for gogo with an attempt at a traceback.
   721  		// "runtime.gogo" is OK, because that's the part of the context switch
   722  		// before the actual switch begins. But we should not see "gogo",
   723  		// aka "gogo<>(SB)", which does the actual switch and is marked SPWRITE.
   724  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
   725  			// An entry with two frames with 'System' in its top frame
   726  			// exists to record a PC without a traceback. Those are okay.
   727  			if len(stk) == 2 {
   728  				name := stk[1].Line[0].Function.Name
   729  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
   730  					return
   731  				}
   732  			}
   733  
   734  			// An entry with just one frame is OK too:
   735  			// it knew to stop at gogo.
   736  			if len(stk) == 1 {
   737  				return
   738  			}
   739  
   740  			// Otherwise, should not see gogo.
   741  			// The place we'd see it would be the inner most frame.
   742  			name := stk[0].Line[0].Function.Name
   743  			if name == "gogo" {
   744  				var buf strings.Builder
   745  				fprintStack(&buf, stk)
   746  				t.Fatalf("found profile entry for gogo:\n%s", buf.String())
   747  			}
   748  		})
   749  	}
   750  }
   751  
   752  func fprintStack(w io.Writer, stk []*profile.Location) {
   753  	if len(stk) == 0 {
   754  		fmt.Fprintf(w, " (stack empty)")
   755  	}
   756  	for _, loc := range stk {
   757  		fmt.Fprintf(w, " %#x", loc.Address)
   758  		fmt.Fprintf(w, " (")
   759  		for i, line := range loc.Line {
   760  			if i > 0 {
   761  				fmt.Fprintf(w, " ")
   762  			}
   763  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
   764  		}
   765  		fmt.Fprintf(w, ")")
   766  	}
   767  }
   768  
   769  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
   770  func TestMathBigDivide(t *testing.T) {
   771  	testCPUProfile(t, nil, func(duration time.Duration) {
   772  		t := time.After(duration)
   773  		pi := new(big.Int)
   774  		for {
   775  			for i := 0; i < 100; i++ {
   776  				n := big.NewInt(2646693125139304345)
   777  				d := big.NewInt(842468587426513207)
   778  				pi.Div(n, d)
   779  			}
   780  			select {
   781  			case <-t:
   782  				return
   783  			default:
   784  			}
   785  		}
   786  	})
   787  }
   788  
   789  // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
   790  func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   791  	for _, f := range strings.Split(spec, ",") {
   792  		if !stackContains(f, count, stk, labels) {
   793  			return false
   794  		}
   795  	}
   796  	return true
   797  }
   798  
   799  func TestMorestack(t *testing.T) {
   800  	matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions())
   801  	testCPUProfile(t, matches, func(duration time.Duration) {
   802  		t := time.After(duration)
   803  		c := make(chan bool)
   804  		for {
   805  			go func() {
   806  				growstack1()
   807  				c <- true
   808  			}()
   809  			select {
   810  			case <-t:
   811  				return
   812  			case <-c:
   813  			}
   814  		}
   815  	})
   816  }
   817  
   818  //go:noinline
   819  func growstack1() {
   820  	growstack(10)
   821  }
   822  
   823  //go:noinline
   824  func growstack(n int) {
   825  	var buf [8 << 18]byte
   826  	use(buf)
   827  	if n > 0 {
   828  		growstack(n - 1)
   829  	}
   830  }
   831  
   832  //go:noinline
   833  func use(x [8 << 18]byte) {}
   834  
   835  func TestBlockProfile(t *testing.T) {
   836  	type TestCase struct {
   837  		name string
   838  		f    func(*testing.T)
   839  		stk  []string
   840  		re   string
   841  	}
   842  	tests := [...]TestCase{
   843  		{
   844  			name: "chan recv",
   845  			f:    blockChanRecv,
   846  			stk: []string{
   847  				"runtime.chanrecv1",
   848  				"runtime/pprof.blockChanRecv",
   849  				"runtime/pprof.TestBlockProfile",
   850  			},
   851  			re: `
   852  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   853  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*runtime/chan.go:[0-9]+
   854  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   855  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   856  `},
   857  		{
   858  			name: "chan send",
   859  			f:    blockChanSend,
   860  			stk: []string{
   861  				"runtime.chansend1",
   862  				"runtime/pprof.blockChanSend",
   863  				"runtime/pprof.TestBlockProfile",
   864  			},
   865  			re: `
   866  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   867  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*runtime/chan.go:[0-9]+
   868  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   869  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   870  `},
   871  		{
   872  			name: "chan close",
   873  			f:    blockChanClose,
   874  			stk: []string{
   875  				"runtime.chanrecv1",
   876  				"runtime/pprof.blockChanClose",
   877  				"runtime/pprof.TestBlockProfile",
   878  			},
   879  			re: `
   880  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   881  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*runtime/chan.go:[0-9]+
   882  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   883  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   884  `},
   885  		{
   886  			name: "select recv async",
   887  			f:    blockSelectRecvAsync,
   888  			stk: []string{
   889  				"runtime.selectgo",
   890  				"runtime/pprof.blockSelectRecvAsync",
   891  				"runtime/pprof.TestBlockProfile",
   892  			},
   893  			re: `
   894  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   895  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*runtime/select.go:[0-9]+
   896  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   897  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   898  `},
   899  		{
   900  			name: "select send sync",
   901  			f:    blockSelectSendSync,
   902  			stk: []string{
   903  				"runtime.selectgo",
   904  				"runtime/pprof.blockSelectSendSync",
   905  				"runtime/pprof.TestBlockProfile",
   906  			},
   907  			re: `
   908  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   909  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*runtime/select.go:[0-9]+
   910  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   911  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   912  `},
   913  		{
   914  			name: "mutex",
   915  			f:    blockMutex,
   916  			stk: []string{
   917  				"sync.(*Mutex).Lock",
   918  				"runtime/pprof.blockMutex",
   919  				"runtime/pprof.TestBlockProfile",
   920  			},
   921  			re: `
   922  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   923  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*sync/mutex\.go:[0-9]+
   924  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   925  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   926  `},
   927  		{
   928  			name: "cond",
   929  			f:    blockCond,
   930  			stk: []string{
   931  				"sync.(*Cond).Wait",
   932  				"runtime/pprof.blockCond",
   933  				"runtime/pprof.TestBlockProfile",
   934  			},
   935  			re: `
   936  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   937  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*sync/cond\.go:[0-9]+
   938  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   939  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   940  `},
   941  	}
   942  
   943  	// Generate block profile
   944  	runtime.SetBlockProfileRate(1)
   945  	defer runtime.SetBlockProfileRate(0)
   946  	for _, test := range tests {
   947  		test.f(t)
   948  	}
   949  
   950  	t.Run("debug=1", func(t *testing.T) {
   951  		var w strings.Builder
   952  		Lookup("block").WriteTo(&w, 1)
   953  		prof := w.String()
   954  
   955  		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
   956  			t.Fatalf("Bad profile header:\n%v", prof)
   957  		}
   958  
   959  		if strings.HasSuffix(prof, "#\t0x0\n\n") {
   960  			t.Errorf("Useless 0 suffix:\n%v", prof)
   961  		}
   962  
   963  		for _, test := range tests {
   964  			if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
   965  				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
   966  			}
   967  		}
   968  	})
   969  
   970  	t.Run("proto", func(t *testing.T) {
   971  		// proto format
   972  		var w bytes.Buffer
   973  		Lookup("block").WriteTo(&w, 0)
   974  		p, err := profile.Parse(&w)
   975  		if err != nil {
   976  			t.Fatalf("failed to parse profile: %v", err)
   977  		}
   978  		t.Logf("parsed proto: %s", p)
   979  		if err := p.CheckValid(); err != nil {
   980  			t.Fatalf("invalid profile: %v", err)
   981  		}
   982  
   983  		stks := stacks(p)
   984  		for _, test := range tests {
   985  			if !containsStack(stks, test.stk) {
   986  				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
   987  			}
   988  		}
   989  	})
   990  
   991  }
   992  
   993  func stacks(p *profile.Profile) (res [][]string) {
   994  	for _, s := range p.Sample {
   995  		var stk []string
   996  		for _, l := range s.Location {
   997  			for _, line := range l.Line {
   998  				stk = append(stk, line.Function.Name)
   999  			}
  1000  		}
  1001  		res = append(res, stk)
  1002  	}
  1003  	return res
  1004  }
  1005  
  1006  func containsStack(got [][]string, want []string) bool {
  1007  	for _, stk := range got {
  1008  		if len(stk) < len(want) {
  1009  			continue
  1010  		}
  1011  		for i, f := range want {
  1012  			if f != stk[i] {
  1013  				break
  1014  			}
  1015  			if i == len(want)-1 {
  1016  				return true
  1017  			}
  1018  		}
  1019  	}
  1020  	return false
  1021  }
  1022  
  1023  // awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
  1024  // shows a goroutine in the given state with a stack frame in
  1025  // runtime/pprof.<fName>.
  1026  func awaitBlockedGoroutine(t *testing.T, state, fName string, count int) {
  1027  	re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
  1028  	r := regexp.MustCompile(re)
  1029  
  1030  	if deadline, ok := t.Deadline(); ok {
  1031  		if d := time.Until(deadline); d > 1*time.Second {
  1032  			timer := time.AfterFunc(d-1*time.Second, func() {
  1033  				debug.SetTraceback("all")
  1034  				panic(fmt.Sprintf("timed out waiting for %#q", re))
  1035  			})
  1036  			defer timer.Stop()
  1037  		}
  1038  	}
  1039  
  1040  	buf := make([]byte, 64<<10)
  1041  	for {
  1042  		runtime.Gosched()
  1043  		n := runtime.Stack(buf, true)
  1044  		if n == len(buf) {
  1045  			// Buffer wasn't large enough for a full goroutine dump.
  1046  			// Resize it and try again.
  1047  			buf = make([]byte, 2*len(buf))
  1048  			continue
  1049  		}
  1050  		if len(r.FindAll(buf[:n], -1)) >= count {
  1051  			return
  1052  		}
  1053  	}
  1054  }
  1055  
  1056  func blockChanRecv(t *testing.T) {
  1057  	c := make(chan bool)
  1058  	go func() {
  1059  		awaitBlockedGoroutine(t, "chan receive", "blockChanRecv", 1)
  1060  		c <- true
  1061  	}()
  1062  	<-c
  1063  }
  1064  
  1065  func blockChanSend(t *testing.T) {
  1066  	c := make(chan bool)
  1067  	go func() {
  1068  		awaitBlockedGoroutine(t, "chan send", "blockChanSend", 1)
  1069  		<-c
  1070  	}()
  1071  	c <- true
  1072  }
  1073  
  1074  func blockChanClose(t *testing.T) {
  1075  	c := make(chan bool)
  1076  	go func() {
  1077  		awaitBlockedGoroutine(t, "chan receive", "blockChanClose", 1)
  1078  		close(c)
  1079  	}()
  1080  	<-c
  1081  }
  1082  
  1083  func blockSelectRecvAsync(t *testing.T) {
  1084  	const numTries = 3
  1085  	c := make(chan bool, 1)
  1086  	c2 := make(chan bool, 1)
  1087  	go func() {
  1088  		for i := 0; i < numTries; i++ {
  1089  			awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync", 1)
  1090  			c <- true
  1091  		}
  1092  	}()
  1093  	for i := 0; i < numTries; i++ {
  1094  		select {
  1095  		case <-c:
  1096  		case <-c2:
  1097  		}
  1098  	}
  1099  }
  1100  
  1101  func blockSelectSendSync(t *testing.T) {
  1102  	c := make(chan bool)
  1103  	c2 := make(chan bool)
  1104  	go func() {
  1105  		awaitBlockedGoroutine(t, "select", "blockSelectSendSync", 1)
  1106  		<-c
  1107  	}()
  1108  	select {
  1109  	case c <- true:
  1110  	case c2 <- true:
  1111  	}
  1112  }
  1113  
  1114  func blockMutex(t *testing.T) {
  1115  	var mu sync.Mutex
  1116  	mu.Lock()
  1117  	go func() {
  1118  		awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", 1)
  1119  		mu.Unlock()
  1120  	}()
  1121  	// Note: Unlock releases mu before recording the mutex event,
  1122  	// so it's theoretically possible for this to proceed and
  1123  	// capture the profile before the event is recorded. As long
  1124  	// as this is blocked before the unlock happens, it's okay.
  1125  	mu.Lock()
  1126  }
  1127  
  1128  func blockMutexN(t *testing.T, n int, d time.Duration) {
  1129  	var wg sync.WaitGroup
  1130  	var mu sync.Mutex
  1131  	mu.Lock()
  1132  	go func() {
  1133  		awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", n)
  1134  		time.Sleep(d)
  1135  		mu.Unlock()
  1136  	}()
  1137  	// Note: Unlock releases mu before recording the mutex event,
  1138  	// so it's theoretically possible for this to proceed and
  1139  	// capture the profile before the event is recorded. As long
  1140  	// as this is blocked before the unlock happens, it's okay.
  1141  	for i := 0; i < n; i++ {
  1142  		wg.Add(1)
  1143  		go func() {
  1144  			defer wg.Done()
  1145  			mu.Lock()
  1146  			mu.Unlock()
  1147  		}()
  1148  	}
  1149  	wg.Wait()
  1150  }
  1151  
  1152  func blockCond(t *testing.T) {
  1153  	var mu sync.Mutex
  1154  	c := sync.NewCond(&mu)
  1155  	mu.Lock()
  1156  	go func() {
  1157  		awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond", 1)
  1158  		mu.Lock()
  1159  		c.Signal()
  1160  		mu.Unlock()
  1161  	}()
  1162  	c.Wait()
  1163  	mu.Unlock()
  1164  }
  1165  
  1166  // See http://golang.org/cl/299991.
  1167  func TestBlockProfileBias(t *testing.T) {
  1168  	rate := int(1000) // arbitrary value
  1169  	runtime.SetBlockProfileRate(rate)
  1170  	defer runtime.SetBlockProfileRate(0)
  1171  
  1172  	// simulate blocking events
  1173  	blockFrequentShort(rate)
  1174  	blockInfrequentLong(rate)
  1175  
  1176  	var w bytes.Buffer
  1177  	Lookup("block").WriteTo(&w, 0)
  1178  	p, err := profile.Parse(&w)
  1179  	if err != nil {
  1180  		t.Fatalf("failed to parse profile: %v", err)
  1181  	}
  1182  	t.Logf("parsed proto: %s", p)
  1183  
  1184  	il := float64(-1) // blockInfrequentLong duration
  1185  	fs := float64(-1) // blockFrequentShort duration
  1186  	for _, s := range p.Sample {
  1187  		for _, l := range s.Location {
  1188  			for _, line := range l.Line {
  1189  				if len(s.Value) < 2 {
  1190  					t.Fatal("block profile has less than 2 sample types")
  1191  				}
  1192  
  1193  				if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
  1194  					il = float64(s.Value[1])
  1195  				} else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
  1196  					fs = float64(s.Value[1])
  1197  				}
  1198  			}
  1199  		}
  1200  	}
  1201  	if il == -1 || fs == -1 {
  1202  		t.Fatal("block profile is missing expected functions")
  1203  	}
  1204  
  1205  	// stddev of bias from 100 runs on local machine multiplied by 10x
  1206  	const threshold = 0.2
  1207  	if bias := (il - fs) / il; math.Abs(bias) > threshold {
  1208  		t.Fatalf("bias: abs(%f) > %f", bias, threshold)
  1209  	} else {
  1210  		t.Logf("bias: abs(%f) < %f", bias, threshold)
  1211  	}
  1212  }
  1213  
  1214  // blockFrequentShort produces 100000 block events with an average duration of
  1215  // rate / 10.
  1216  func blockFrequentShort(rate int) {
  1217  	for i := 0; i < 100000; i++ {
  1218  		blockevent(int64(rate/10), 1)
  1219  	}
  1220  }
  1221  
  1222  // blockFrequentShort produces 10000 block events with an average duration of
  1223  // rate.
  1224  func blockInfrequentLong(rate int) {
  1225  	for i := 0; i < 10000; i++ {
  1226  		blockevent(int64(rate), 1)
  1227  	}
  1228  }
  1229  
  1230  // Used by TestBlockProfileBias.
  1231  //
  1232  //go:linkname blockevent runtime.blockevent
  1233  func blockevent(cycles int64, skip int)
  1234  
  1235  func TestMutexProfile(t *testing.T) {
  1236  	// Generate mutex profile
  1237  
  1238  	old := runtime.SetMutexProfileFraction(1)
  1239  	defer runtime.SetMutexProfileFraction(old)
  1240  	if old != 0 {
  1241  		t.Fatalf("need MutexProfileRate 0, got %d", old)
  1242  	}
  1243  
  1244  	const (
  1245  		N = 100
  1246  		D = 100 * time.Millisecond
  1247  	)
  1248  	start := time.Now()
  1249  	blockMutexN(t, N, D)
  1250  	blockMutexNTime := time.Since(start)
  1251  
  1252  	t.Run("debug=1", func(t *testing.T) {
  1253  		var w strings.Builder
  1254  		Lookup("mutex").WriteTo(&w, 1)
  1255  		prof := w.String()
  1256  		t.Logf("received profile: %v", prof)
  1257  
  1258  		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
  1259  			t.Errorf("Bad profile header:\n%v", prof)
  1260  		}
  1261  		prof = strings.Trim(prof, "\n")
  1262  		lines := strings.Split(prof, "\n")
  1263  		if len(lines) < 6 {
  1264  			t.Fatalf("expected >=6 lines, got %d %q\n%s", len(lines), prof, prof)
  1265  		}
  1266  		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
  1267  		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
  1268  		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
  1269  			t.Errorf("%q didn't match %q", lines[3], r2)
  1270  		}
  1271  		r3 := "^#.*runtime/pprof.blockMutex.*$"
  1272  		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
  1273  			t.Errorf("%q didn't match %q", lines[5], r3)
  1274  		}
  1275  		t.Logf(prof)
  1276  	})
  1277  	t.Run("proto", func(t *testing.T) {
  1278  		// proto format
  1279  		var w bytes.Buffer
  1280  		Lookup("mutex").WriteTo(&w, 0)
  1281  		p, err := profile.Parse(&w)
  1282  		if err != nil {
  1283  			t.Fatalf("failed to parse profile: %v", err)
  1284  		}
  1285  		t.Logf("parsed proto: %s", p)
  1286  		if err := p.CheckValid(); err != nil {
  1287  			t.Fatalf("invalid profile: %v", err)
  1288  		}
  1289  
  1290  		stks := stacks(p)
  1291  		for _, want := range [][]string{
  1292  			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
  1293  		} {
  1294  			if !containsStack(stks, want) {
  1295  				t.Errorf("No matching stack entry for %+v", want)
  1296  			}
  1297  		}
  1298  
  1299  		i := 0
  1300  		for ; i < len(p.SampleType); i++ {
  1301  			if p.SampleType[i].Unit == "nanoseconds" {
  1302  				break
  1303  			}
  1304  		}
  1305  		if i >= len(p.SampleType) {
  1306  			t.Fatalf("profile did not contain nanoseconds sample")
  1307  		}
  1308  		total := int64(0)
  1309  		for _, s := range p.Sample {
  1310  			total += s.Value[i]
  1311  		}
  1312  		// Want d to be at least N*D, but give some wiggle-room to avoid
  1313  		// a test flaking. Set an upper-bound proportional to the total
  1314  		// wall time spent in blockMutexN. Generally speaking, the total
  1315  		// contention time could be arbitrarily high when considering
  1316  		// OS scheduler delays, or any other delays from the environment:
  1317  		// time keeps ticking during these delays. By making the upper
  1318  		// bound proportional to the wall time in blockMutexN, in theory
  1319  		// we're accounting for all these possible delays.
  1320  		d := time.Duration(total)
  1321  		lo := time.Duration(N * D * 9 / 10)
  1322  		hi := time.Duration(N) * blockMutexNTime * 11 / 10
  1323  		if d < lo || d > hi {
  1324  			for _, s := range p.Sample {
  1325  				t.Logf("sample: %s", time.Duration(s.Value[i]))
  1326  			}
  1327  			t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
  1328  		}
  1329  	})
  1330  }
  1331  
  1332  func TestMutexProfileRateAdjust(t *testing.T) {
  1333  	old := runtime.SetMutexProfileFraction(1)
  1334  	defer runtime.SetMutexProfileFraction(old)
  1335  	if old != 0 {
  1336  		t.Fatalf("need MutexProfileRate 0, got %d", old)
  1337  	}
  1338  
  1339  	readProfile := func() (contentions int64, delay int64) {
  1340  		var w bytes.Buffer
  1341  		Lookup("mutex").WriteTo(&w, 0)
  1342  		p, err := profile.Parse(&w)
  1343  		if err != nil {
  1344  			t.Fatalf("failed to parse profile: %v", err)
  1345  		}
  1346  		t.Logf("parsed proto: %s", p)
  1347  		if err := p.CheckValid(); err != nil {
  1348  			t.Fatalf("invalid profile: %v", err)
  1349  		}
  1350  
  1351  		for _, s := range p.Sample {
  1352  			for _, l := range s.Location {
  1353  				for _, line := range l.Line {
  1354  					if line.Function.Name == "runtime/pprof.blockMutex.func1" {
  1355  						contentions += s.Value[0]
  1356  						delay += s.Value[1]
  1357  					}
  1358  				}
  1359  			}
  1360  		}
  1361  		return
  1362  	}
  1363  
  1364  	blockMutex(t)
  1365  	contentions, delay := readProfile()
  1366  	if contentions == 0 || delay == 0 {
  1367  		t.Fatal("did not see expected function in profile")
  1368  	}
  1369  	runtime.SetMutexProfileFraction(0)
  1370  	newContentions, newDelay := readProfile()
  1371  	if newContentions != contentions || newDelay != delay {
  1372  		t.Fatalf("sample value changed: got [%d, %d], want [%d, %d]", newContentions, newDelay, contentions, delay)
  1373  	}
  1374  }
  1375  
  1376  func func1(c chan int) { <-c }
  1377  func func2(c chan int) { <-c }
  1378  func func3(c chan int) { <-c }
  1379  func func4(c chan int) { <-c }
  1380  
  1381  func TestGoroutineCounts(t *testing.T) {
  1382  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
  1383  	// desired blocking point.
  1384  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
  1385  
  1386  	c := make(chan int)
  1387  	for i := 0; i < 100; i++ {
  1388  		switch {
  1389  		case i%10 == 0:
  1390  			go func1(c)
  1391  		case i%2 == 0:
  1392  			go func2(c)
  1393  		default:
  1394  			go func3(c)
  1395  		}
  1396  		// Let goroutines block on channel
  1397  		for j := 0; j < 5; j++ {
  1398  			runtime.Gosched()
  1399  		}
  1400  	}
  1401  	ctx := context.Background()
  1402  
  1403  	// ... and again, with labels this time (just with fewer iterations to keep
  1404  	// sorting deterministic).
  1405  	Do(ctx, Labels("label", "value"), func(context.Context) {
  1406  		for i := 0; i < 89; i++ {
  1407  			switch {
  1408  			case i%10 == 0:
  1409  				go func1(c)
  1410  			case i%2 == 0:
  1411  				go func2(c)
  1412  			default:
  1413  				go func3(c)
  1414  			}
  1415  			// Let goroutines block on channel
  1416  			for j := 0; j < 5; j++ {
  1417  				runtime.Gosched()
  1418  			}
  1419  		}
  1420  	})
  1421  
  1422  	SetGoroutineLabels(WithLabels(context.Background(), Labels("self-label", "self-value")))
  1423  	defer SetGoroutineLabels(context.Background())
  1424  
  1425  	garbage := new(*int)
  1426  	fingReady := make(chan struct{})
  1427  	runtime.SetFinalizer(garbage, func(v **int) {
  1428  		Do(context.Background(), Labels("fing-label", "fing-value"), func(ctx context.Context) {
  1429  			close(fingReady)
  1430  			<-c
  1431  		})
  1432  	})
  1433  	garbage = nil
  1434  	for i := 0; i < 2; i++ {
  1435  		runtime.GC()
  1436  	}
  1437  	<-fingReady
  1438  
  1439  	var w bytes.Buffer
  1440  	goroutineProf := Lookup("goroutine")
  1441  
  1442  	// Check debug profile
  1443  	goroutineProf.WriteTo(&w, 1)
  1444  	prof := w.String()
  1445  
  1446  	labels := labelMap{"label": "value"}
  1447  	labelStr := "\n# labels: " + labels.String()
  1448  	selfLabel := labelMap{"self-label": "self-value"}
  1449  	selfLabelStr := "\n# labels: " + selfLabel.String()
  1450  	fingLabel := labelMap{"fing-label": "fing-value"}
  1451  	fingLabelStr := "\n# labels: " + fingLabel.String()
  1452  	orderedPrefix := []string{
  1453  		"\n50 @ ",
  1454  		"\n44 @", labelStr,
  1455  		"\n40 @",
  1456  		"\n36 @", labelStr,
  1457  		"\n10 @",
  1458  		"\n9 @", labelStr,
  1459  		"\n1 @"}
  1460  	if !containsInOrder(prof, append(orderedPrefix, selfLabelStr)...) {
  1461  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
  1462  	}
  1463  	if !containsInOrder(prof, append(orderedPrefix, fingLabelStr)...) {
  1464  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
  1465  	}
  1466  
  1467  	// Check proto profile
  1468  	w.Reset()
  1469  	goroutineProf.WriteTo(&w, 0)
  1470  	p, err := profile.Parse(&w)
  1471  	if err != nil {
  1472  		t.Errorf("error parsing protobuf profile: %v", err)
  1473  	}
  1474  	if err := p.CheckValid(); err != nil {
  1475  		t.Errorf("protobuf profile is invalid: %v", err)
  1476  	}
  1477  	expectedLabels := map[int64]map[string]string{
  1478  		50: {},
  1479  		44: {"label": "value"},
  1480  		40: {},
  1481  		36: {"label": "value"},
  1482  		10: {},
  1483  		9:  {"label": "value"},
  1484  		1:  {"self-label": "self-value", "fing-label": "fing-value"},
  1485  	}
  1486  	if !containsCountsLabels(p, expectedLabels) {
  1487  		t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
  1488  			expectedLabels, p)
  1489  	}
  1490  
  1491  	close(c)
  1492  
  1493  	time.Sleep(10 * time.Millisecond) // let goroutines exit
  1494  }
  1495  
  1496  func containsInOrder(s string, all ...string) bool {
  1497  	for _, t := range all {
  1498  		var ok bool
  1499  		if _, s, ok = strings.Cut(s, t); !ok {
  1500  			return false
  1501  		}
  1502  	}
  1503  	return true
  1504  }
  1505  
  1506  func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
  1507  	m := make(map[int64]int)
  1508  	type nkey struct {
  1509  		count    int64
  1510  		key, val string
  1511  	}
  1512  	n := make(map[nkey]int)
  1513  	for c, kv := range countLabels {
  1514  		m[c]++
  1515  		for k, v := range kv {
  1516  			n[nkey{
  1517  				count: c,
  1518  				key:   k,
  1519  				val:   v,
  1520  			}]++
  1521  
  1522  		}
  1523  	}
  1524  	for _, s := range prof.Sample {
  1525  		// The count is the single value in the sample
  1526  		if len(s.Value) != 1 {
  1527  			return false
  1528  		}
  1529  		m[s.Value[0]]--
  1530  		for k, vs := range s.Label {
  1531  			for _, v := range vs {
  1532  				n[nkey{
  1533  					count: s.Value[0],
  1534  					key:   k,
  1535  					val:   v,
  1536  				}]--
  1537  			}
  1538  		}
  1539  	}
  1540  	for _, n := range m {
  1541  		if n > 0 {
  1542  			return false
  1543  		}
  1544  	}
  1545  	for _, ncnt := range n {
  1546  		if ncnt != 0 {
  1547  			return false
  1548  		}
  1549  	}
  1550  	return true
  1551  }
  1552  
  1553  func TestGoroutineProfileConcurrency(t *testing.T) {
  1554  	testenv.MustHaveParallelism(t)
  1555  
  1556  	goroutineProf := Lookup("goroutine")
  1557  
  1558  	profilerCalls := func(s string) int {
  1559  		return strings.Count(s, "\truntime/pprof.runtime_goroutineProfileWithLabels+")
  1560  	}
  1561  
  1562  	includesFinalizer := func(s string) bool {
  1563  		return strings.Contains(s, "runtime.runfinq")
  1564  	}
  1565  
  1566  	// Concurrent calls to the goroutine profiler should not trigger data races
  1567  	// or corruption.
  1568  	t.Run("overlapping profile requests", func(t *testing.T) {
  1569  		ctx := context.Background()
  1570  		ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
  1571  		defer cancel()
  1572  
  1573  		var wg sync.WaitGroup
  1574  		for i := 0; i < 2; i++ {
  1575  			wg.Add(1)
  1576  			Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) {
  1577  				go func() {
  1578  					defer wg.Done()
  1579  					for ctx.Err() == nil {
  1580  						var w strings.Builder
  1581  						goroutineProf.WriteTo(&w, 1)
  1582  						prof := w.String()
  1583  						count := profilerCalls(prof)
  1584  						if count >= 2 {
  1585  							t.Logf("prof %d\n%s", count, prof)
  1586  							cancel()
  1587  						}
  1588  					}
  1589  				}()
  1590  			})
  1591  		}
  1592  		wg.Wait()
  1593  	})
  1594  
  1595  	// The finalizer goroutine should not show up in most profiles, since it's
  1596  	// marked as a system goroutine when idle.
  1597  	t.Run("finalizer not present", func(t *testing.T) {
  1598  		var w strings.Builder
  1599  		goroutineProf.WriteTo(&w, 1)
  1600  		prof := w.String()
  1601  		if includesFinalizer(prof) {
  1602  			t.Errorf("profile includes finalizer (but finalizer should be marked as system):\n%s", prof)
  1603  		}
  1604  	})
  1605  
  1606  	// The finalizer goroutine should show up when it's running user code.
  1607  	t.Run("finalizer present", func(t *testing.T) {
  1608  		obj := new(byte)
  1609  		ch1, ch2 := make(chan int), make(chan int)
  1610  		defer close(ch2)
  1611  		runtime.SetFinalizer(obj, func(_ interface{}) {
  1612  			close(ch1)
  1613  			<-ch2
  1614  		})
  1615  		obj = nil
  1616  		for i := 10; i >= 0; i-- {
  1617  			select {
  1618  			case <-ch1:
  1619  			default:
  1620  				if i == 0 {
  1621  					t.Fatalf("finalizer did not run")
  1622  				}
  1623  				runtime.GC()
  1624  			}
  1625  		}
  1626  		var w strings.Builder
  1627  		goroutineProf.WriteTo(&w, 1)
  1628  		prof := w.String()
  1629  		if !includesFinalizer(prof) {
  1630  			t.Errorf("profile does not include finalizer (and it should be marked as user):\n%s", prof)
  1631  		}
  1632  	})
  1633  
  1634  	// Check that new goroutines only show up in order.
  1635  	testLaunches := func(t *testing.T) {
  1636  		var done sync.WaitGroup
  1637  		defer done.Wait()
  1638  
  1639  		ctx := context.Background()
  1640  		ctx, cancel := context.WithCancel(ctx)
  1641  		defer cancel()
  1642  
  1643  		ch := make(chan int)
  1644  		defer close(ch)
  1645  
  1646  		var ready sync.WaitGroup
  1647  
  1648  		// These goroutines all survive until the end of the subtest, so we can
  1649  		// check that a (numbered) goroutine appearing in the profile implies
  1650  		// that all older goroutines also appear in the profile.
  1651  		ready.Add(1)
  1652  		done.Add(1)
  1653  		go func() {
  1654  			defer done.Done()
  1655  			for i := 0; ctx.Err() == nil; i++ {
  1656  				// Use SetGoroutineLabels rather than Do we can always expect an
  1657  				// extra goroutine (this one) with most recent label.
  1658  				SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-loop-i", fmt.Sprint(i))))
  1659  				done.Add(1)
  1660  				go func() {
  1661  					<-ch
  1662  					done.Done()
  1663  				}()
  1664  				for j := 0; j < i; j++ {
  1665  					// Spin for longer and longer as the test goes on. This
  1666  					// goroutine will do O(N^2) work with the number of
  1667  					// goroutines it launches. This should be slow relative to
  1668  					// the work involved in collecting a goroutine profile,
  1669  					// which is O(N) with the high-water mark of the number of
  1670  					// goroutines in this process (in the allgs slice).
  1671  					runtime.Gosched()
  1672  				}
  1673  				if i == 0 {
  1674  					ready.Done()
  1675  				}
  1676  			}
  1677  		}()
  1678  
  1679  		// Short-lived goroutines exercise different code paths (goroutines with
  1680  		// status _Gdead, for instance). This churn doesn't have behavior that
  1681  		// we can test directly, but does help to shake out data races.
  1682  		ready.Add(1)
  1683  		var churn func(i int)
  1684  		churn = func(i int) {
  1685  			SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-churn-i", fmt.Sprint(i))))
  1686  			if i == 0 {
  1687  				ready.Done()
  1688  			} else if i%16 == 0 {
  1689  				// Yield on occasion so this sequence of goroutine launches
  1690  				// doesn't monopolize a P. See issue #52934.
  1691  				runtime.Gosched()
  1692  			}
  1693  			if ctx.Err() == nil {
  1694  				go churn(i + 1)
  1695  			}
  1696  		}
  1697  		go func() {
  1698  			churn(0)
  1699  		}()
  1700  
  1701  		ready.Wait()
  1702  
  1703  		var w [3]bytes.Buffer
  1704  		for i := range w {
  1705  			goroutineProf.WriteTo(&w[i], 0)
  1706  		}
  1707  		for i := range w {
  1708  			p, err := profile.Parse(bytes.NewReader(w[i].Bytes()))
  1709  			if err != nil {
  1710  				t.Errorf("error parsing protobuf profile: %v", err)
  1711  			}
  1712  
  1713  			// High-numbered loop-i goroutines imply that every lower-numbered
  1714  			// loop-i goroutine should be present in the profile too.
  1715  			counts := make(map[string]int)
  1716  			for _, s := range p.Sample {
  1717  				label := s.Label[t.Name()+"-loop-i"]
  1718  				if len(label) > 0 {
  1719  					counts[label[0]]++
  1720  				}
  1721  			}
  1722  			for j, max := 0, len(counts)-1; j <= max; j++ {
  1723  				n := counts[fmt.Sprint(j)]
  1724  				if n == 1 || (n == 2 && j == max) {
  1725  					continue
  1726  				}
  1727  				t.Errorf("profile #%d's goroutines with label loop-i:%d; %d != 1 (or 2 for the last entry, %d)",
  1728  					i+1, j, n, max)
  1729  				t.Logf("counts %v", counts)
  1730  				break
  1731  			}
  1732  		}
  1733  	}
  1734  
  1735  	runs := 100
  1736  	if testing.Short() {
  1737  		runs = 5
  1738  	}
  1739  	for i := 0; i < runs; i++ {
  1740  		// Run multiple times to shake out data races
  1741  		t.Run("goroutine launches", testLaunches)
  1742  	}
  1743  }
  1744  
  1745  func BenchmarkGoroutine(b *testing.B) {
  1746  	withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
  1747  		return func(b *testing.B) {
  1748  			c := make(chan int)
  1749  			var ready, done sync.WaitGroup
  1750  			defer func() {
  1751  				close(c)
  1752  				done.Wait()
  1753  			}()
  1754  
  1755  			for i := 0; i < n; i++ {
  1756  				ready.Add(1)
  1757  				done.Add(1)
  1758  				go func() {
  1759  					ready.Done()
  1760  					<-c
  1761  					done.Done()
  1762  				}()
  1763  			}
  1764  			// Let goroutines block on channel
  1765  			ready.Wait()
  1766  			for i := 0; i < 5; i++ {
  1767  				runtime.Gosched()
  1768  			}
  1769  
  1770  			fn(b)
  1771  		}
  1772  	}
  1773  
  1774  	withChurn := func(fn func(b *testing.B)) func(b *testing.B) {
  1775  		return func(b *testing.B) {
  1776  			ctx := context.Background()
  1777  			ctx, cancel := context.WithCancel(ctx)
  1778  			defer cancel()
  1779  
  1780  			var ready sync.WaitGroup
  1781  			ready.Add(1)
  1782  			var count int64
  1783  			var churn func(i int)
  1784  			churn = func(i int) {
  1785  				SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
  1786  				atomic.AddInt64(&count, 1)
  1787  				if i == 0 {
  1788  					ready.Done()
  1789  				}
  1790  				if ctx.Err() == nil {
  1791  					go churn(i + 1)
  1792  				}
  1793  			}
  1794  			go func() {
  1795  				churn(0)
  1796  			}()
  1797  			ready.Wait()
  1798  
  1799  			fn(b)
  1800  			b.ReportMetric(float64(atomic.LoadInt64(&count))/float64(b.N), "concurrent_launches/op")
  1801  		}
  1802  	}
  1803  
  1804  	benchWriteTo := func(b *testing.B) {
  1805  		goroutineProf := Lookup("goroutine")
  1806  		b.ResetTimer()
  1807  		for i := 0; i < b.N; i++ {
  1808  			goroutineProf.WriteTo(io.Discard, 0)
  1809  		}
  1810  		b.StopTimer()
  1811  	}
  1812  
  1813  	benchGoroutineProfile := func(b *testing.B) {
  1814  		p := make([]runtime.StackRecord, 10000)
  1815  		b.ResetTimer()
  1816  		for i := 0; i < b.N; i++ {
  1817  			runtime.GoroutineProfile(p)
  1818  		}
  1819  		b.StopTimer()
  1820  	}
  1821  
  1822  	// Note that some costs of collecting a goroutine profile depend on the
  1823  	// length of the runtime.allgs slice, which never shrinks. Stay within race
  1824  	// detector's 8k-goroutine limit
  1825  	for _, n := range []int{50, 500, 5000} {
  1826  		b.Run(fmt.Sprintf("Profile.WriteTo idle %d", n), withIdle(n, benchWriteTo))
  1827  		b.Run(fmt.Sprintf("Profile.WriteTo churn %d", n), withIdle(n, withChurn(benchWriteTo)))
  1828  		b.Run(fmt.Sprintf("runtime.GoroutineProfile churn %d", n), withIdle(n, withChurn(benchGoroutineProfile)))
  1829  	}
  1830  }
  1831  
  1832  var emptyCallStackTestRun int64
  1833  
  1834  // Issue 18836.
  1835  func TestEmptyCallStack(t *testing.T) {
  1836  	name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
  1837  	emptyCallStackTestRun++
  1838  
  1839  	t.Parallel()
  1840  	var buf strings.Builder
  1841  	p := NewProfile(name)
  1842  
  1843  	p.Add("foo", 47674)
  1844  	p.WriteTo(&buf, 1)
  1845  	p.Remove("foo")
  1846  	got := buf.String()
  1847  	prefix := name + " profile: total 1\n"
  1848  	if !strings.HasPrefix(got, prefix) {
  1849  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
  1850  	}
  1851  	lostevent := "lostProfileEvent"
  1852  	if !strings.Contains(got, lostevent) {
  1853  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
  1854  	}
  1855  }
  1856  
  1857  // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
  1858  // and value and has funcname somewhere in the stack.
  1859  func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
  1860  	base, kv, ok := strings.Cut(spec, ";")
  1861  	if !ok {
  1862  		panic("no semicolon in key/value spec")
  1863  	}
  1864  	k, v, ok := strings.Cut(kv, "=")
  1865  	if !ok {
  1866  		panic("missing = in key/value spec")
  1867  	}
  1868  	if !contains(labels[k], v) {
  1869  		return false
  1870  	}
  1871  	return stackContains(base, count, stk, labels)
  1872  }
  1873  
  1874  func TestCPUProfileLabel(t *testing.T) {
  1875  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions())
  1876  	testCPUProfile(t, matches, func(dur time.Duration) {
  1877  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1878  			cpuHogger(cpuHog1, &salt1, dur)
  1879  		})
  1880  	})
  1881  }
  1882  
  1883  func TestLabelRace(t *testing.T) {
  1884  	testenv.MustHaveParallelism(t)
  1885  	// Test the race detector annotations for synchronization
  1886  	// between setting labels and consuming them from the
  1887  	// profile.
  1888  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil)
  1889  	testCPUProfile(t, matches, func(dur time.Duration) {
  1890  		start := time.Now()
  1891  		var wg sync.WaitGroup
  1892  		for time.Since(start) < dur {
  1893  			var salts [10]int
  1894  			for i := 0; i < 10; i++ {
  1895  				wg.Add(1)
  1896  				go func(j int) {
  1897  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1898  						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
  1899  					})
  1900  					wg.Done()
  1901  				}(i)
  1902  			}
  1903  			wg.Wait()
  1904  		}
  1905  	})
  1906  }
  1907  
  1908  func TestGoroutineProfileLabelRace(t *testing.T) {
  1909  	testenv.MustHaveParallelism(t)
  1910  	// Test the race detector annotations for synchronization
  1911  	// between setting labels and consuming them from the
  1912  	// goroutine profile. See issue #50292.
  1913  
  1914  	t.Run("reset", func(t *testing.T) {
  1915  		ctx := context.Background()
  1916  		ctx, cancel := context.WithCancel(ctx)
  1917  		defer cancel()
  1918  
  1919  		go func() {
  1920  			goroutineProf := Lookup("goroutine")
  1921  			for ctx.Err() == nil {
  1922  				var w strings.Builder
  1923  				goroutineProf.WriteTo(&w, 1)
  1924  				prof := w.String()
  1925  				if strings.Contains(prof, "loop-i") {
  1926  					cancel()
  1927  				}
  1928  			}
  1929  		}()
  1930  
  1931  		for i := 0; ctx.Err() == nil; i++ {
  1932  			Do(ctx, Labels("loop-i", fmt.Sprint(i)), func(ctx context.Context) {
  1933  			})
  1934  		}
  1935  	})
  1936  
  1937  	t.Run("churn", func(t *testing.T) {
  1938  		ctx := context.Background()
  1939  		ctx, cancel := context.WithCancel(ctx)
  1940  		defer cancel()
  1941  
  1942  		var ready sync.WaitGroup
  1943  		ready.Add(1)
  1944  		var churn func(i int)
  1945  		churn = func(i int) {
  1946  			SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
  1947  			if i == 0 {
  1948  				ready.Done()
  1949  			}
  1950  			if ctx.Err() == nil {
  1951  				go churn(i + 1)
  1952  			}
  1953  		}
  1954  		go func() {
  1955  			churn(0)
  1956  		}()
  1957  		ready.Wait()
  1958  
  1959  		goroutineProf := Lookup("goroutine")
  1960  		for i := 0; i < 10; i++ {
  1961  			goroutineProf.WriteTo(io.Discard, 1)
  1962  		}
  1963  	})
  1964  }
  1965  
  1966  // TestLabelSystemstack makes sure CPU profiler samples of goroutines running
  1967  // on systemstack include the correct pprof labels. See issue #48577
  1968  func TestLabelSystemstack(t *testing.T) {
  1969  	// Grab and re-set the initial value before continuing to ensure
  1970  	// GOGC doesn't actually change following the test.
  1971  	gogc := debug.SetGCPercent(100)
  1972  	debug.SetGCPercent(gogc)
  1973  
  1974  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions())
  1975  	p := testCPUProfile(t, matches, func(dur time.Duration) {
  1976  		Do(context.Background(), Labels("key", "value"), func(ctx context.Context) {
  1977  			parallelLabelHog(ctx, dur, gogc)
  1978  		})
  1979  	})
  1980  
  1981  	// Two conditions to check:
  1982  	// * labelHog should always be labeled.
  1983  	// * The label should _only_ appear on labelHog and the Do call above.
  1984  	for _, s := range p.Sample {
  1985  		isLabeled := s.Label != nil && contains(s.Label["key"], "value")
  1986  		var (
  1987  			mayBeLabeled     bool
  1988  			mustBeLabeled    string
  1989  			mustNotBeLabeled string
  1990  		)
  1991  		for _, loc := range s.Location {
  1992  			for _, l := range loc.Line {
  1993  				switch l.Function.Name {
  1994  				case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1":
  1995  					mustBeLabeled = l.Function.Name
  1996  				case "runtime/pprof.Do":
  1997  					// Do sets the labels, so samples may
  1998  					// or may not be labeled depending on
  1999  					// which part of the function they are
  2000  					// at.
  2001  					mayBeLabeled = true
  2002  				case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runfinq", "runtime.sysmon":
  2003  					// Runtime system goroutines or threads
  2004  					// (such as those identified by
  2005  					// runtime.isSystemGoroutine). These
  2006  					// should never be labeled.
  2007  					mustNotBeLabeled = l.Function.Name
  2008  				case "gogo", "gosave_systemstack_switch", "racecall":
  2009  					// These are context switch/race
  2010  					// critical that we can't do a full
  2011  					// traceback from. Typically this would
  2012  					// be covered by the runtime check
  2013  					// below, but these symbols don't have
  2014  					// the package name.
  2015  					mayBeLabeled = true
  2016  				}
  2017  
  2018  				if strings.HasPrefix(l.Function.Name, "runtime.") {
  2019  					// There are many places in the runtime
  2020  					// where we can't do a full traceback.
  2021  					// Ideally we'd list them all, but
  2022  					// barring that allow anything in the
  2023  					// runtime, unless explicitly excluded
  2024  					// above.
  2025  					mayBeLabeled = true
  2026  				}
  2027  			}
  2028  		}
  2029  		errorStack := func(f string, args ...any) {
  2030  			var buf strings.Builder
  2031  			fprintStack(&buf, s.Location)
  2032  			t.Errorf("%s: %s", fmt.Sprintf(f, args...), buf.String())
  2033  		}
  2034  		if mustBeLabeled != "" && mustNotBeLabeled != "" {
  2035  			errorStack("sample contains both %s, which must be labeled, and %s, which must not be labeled", mustBeLabeled, mustNotBeLabeled)
  2036  			continue
  2037  		}
  2038  		if mustBeLabeled != "" || mustNotBeLabeled != "" {
  2039  			// We found a definitive frame, so mayBeLabeled hints are not relevant.
  2040  			mayBeLabeled = false
  2041  		}
  2042  		if mayBeLabeled {
  2043  			// This sample may or may not be labeled, so there's nothing we can check.
  2044  			continue
  2045  		}
  2046  		if mustBeLabeled != "" && !isLabeled {
  2047  			errorStack("sample must be labeled because of %s, but is not", mustBeLabeled)
  2048  		}
  2049  		if mustNotBeLabeled != "" && isLabeled {
  2050  			errorStack("sample must not be labeled because of %s, but is", mustNotBeLabeled)
  2051  		}
  2052  	}
  2053  }
  2054  
  2055  // labelHog is designed to burn CPU time in a way that a high number of CPU
  2056  // samples end up running on systemstack.
  2057  func labelHog(stop chan struct{}, gogc int) {
  2058  	// Regression test for issue 50032. We must give GC an opportunity to
  2059  	// be initially triggered by a labelled goroutine.
  2060  	runtime.GC()
  2061  
  2062  	for i := 0; ; i++ {
  2063  		select {
  2064  		case <-stop:
  2065  			return
  2066  		default:
  2067  			debug.SetGCPercent(gogc)
  2068  		}
  2069  	}
  2070  }
  2071  
  2072  // parallelLabelHog runs GOMAXPROCS goroutines running labelHog.
  2073  func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {
  2074  	var wg sync.WaitGroup
  2075  	stop := make(chan struct{})
  2076  	for i := 0; i < runtime.GOMAXPROCS(0); i++ {
  2077  		wg.Add(1)
  2078  		go func() {
  2079  			defer wg.Done()
  2080  			labelHog(stop, gogc)
  2081  		}()
  2082  	}
  2083  
  2084  	time.Sleep(dur)
  2085  	close(stop)
  2086  	wg.Wait()
  2087  }
  2088  
  2089  // Check that there is no deadlock when the program receives SIGPROF while in
  2090  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
  2091  func TestAtomicLoadStore64(t *testing.T) {
  2092  	f, err := os.CreateTemp("", "profatomic")
  2093  	if err != nil {
  2094  		t.Fatalf("TempFile: %v", err)
  2095  	}
  2096  	defer os.Remove(f.Name())
  2097  	defer f.Close()
  2098  
  2099  	if err := StartCPUProfile(f); err != nil {
  2100  		t.Fatal(err)
  2101  	}
  2102  	defer StopCPUProfile()
  2103  
  2104  	var flag uint64
  2105  	done := make(chan bool, 1)
  2106  
  2107  	go func() {
  2108  		for atomic.LoadUint64(&flag) == 0 {
  2109  			runtime.Gosched()
  2110  		}
  2111  		done <- true
  2112  	}()
  2113  	time.Sleep(50 * time.Millisecond)
  2114  	atomic.StoreUint64(&flag, 1)
  2115  	<-done
  2116  }
  2117  
  2118  func TestTracebackAll(t *testing.T) {
  2119  	// With gccgo, if a profiling signal arrives at the wrong time
  2120  	// during traceback, it may crash or hang. See issue #29448.
  2121  	f, err := os.CreateTemp("", "proftraceback")
  2122  	if err != nil {
  2123  		t.Fatalf("TempFile: %v", err)
  2124  	}
  2125  	defer os.Remove(f.Name())
  2126  	defer f.Close()
  2127  
  2128  	if err := StartCPUProfile(f); err != nil {
  2129  		t.Fatal(err)
  2130  	}
  2131  	defer StopCPUProfile()
  2132  
  2133  	ch := make(chan int)
  2134  	defer close(ch)
  2135  
  2136  	count := 10
  2137  	for i := 0; i < count; i++ {
  2138  		go func() {
  2139  			<-ch // block
  2140  		}()
  2141  	}
  2142  
  2143  	N := 10000
  2144  	if testing.Short() {
  2145  		N = 500
  2146  	}
  2147  	buf := make([]byte, 10*1024)
  2148  	for i := 0; i < N; i++ {
  2149  		runtime.Stack(buf, true)
  2150  	}
  2151  }
  2152  
  2153  // TestTryAdd tests the cases that are hard to test with real program execution.
  2154  //
  2155  // For example, the current go compilers may not always inline functions
  2156  // involved in recursion but that may not be true in the future compilers. This
  2157  // tests such cases by using fake call sequences and forcing the profile build
  2158  // utilizing translateCPUProfile defined in proto_test.go
  2159  func TestTryAdd(t *testing.T) {
  2160  	if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
  2161  		t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
  2162  	}
  2163  
  2164  	// inlinedCallerDump
  2165  	//   inlinedCalleeDump
  2166  	pcs := make([]uintptr, 2)
  2167  	inlinedCallerDump(pcs)
  2168  	inlinedCallerStack := make([]uint64, 2)
  2169  	for i := range pcs {
  2170  		inlinedCallerStack[i] = uint64(pcs[i])
  2171  	}
  2172  	wrapperPCs := make([]uintptr, 1)
  2173  	inlinedWrapperCallerDump(wrapperPCs)
  2174  
  2175  	if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
  2176  		t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
  2177  	}
  2178  
  2179  	// recursionChainTop
  2180  	//   recursionChainMiddle
  2181  	//     recursionChainBottom
  2182  	//       recursionChainTop
  2183  	//         recursionChainMiddle
  2184  	//           recursionChainBottom
  2185  	pcs = make([]uintptr, 6)
  2186  	recursionChainTop(1, pcs)
  2187  	recursionStack := make([]uint64, len(pcs))
  2188  	for i := range pcs {
  2189  		recursionStack[i] = uint64(pcs[i])
  2190  	}
  2191  
  2192  	period := int64(2000 * 1000) // 1/500*1e9 nanosec.
  2193  
  2194  	testCases := []struct {
  2195  		name        string
  2196  		input       []uint64          // following the input format assumed by profileBuilder.addCPUData.
  2197  		count       int               // number of records in input.
  2198  		wantLocs    [][]string        // ordered location entries with function names.
  2199  		wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
  2200  	}{{
  2201  		// Sanity test for a normal, complete stack trace.
  2202  		name: "full_stack_trace",
  2203  		input: []uint64{
  2204  			3, 0, 500, // hz = 500. Must match the period.
  2205  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  2206  		},
  2207  		count: 2,
  2208  		wantLocs: [][]string{
  2209  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  2210  		},
  2211  		wantSamples: []*profile.Sample{
  2212  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  2213  		},
  2214  	}, {
  2215  		name: "bug35538",
  2216  		input: []uint64{
  2217  			3, 0, 500, // hz = 500. Must match the period.
  2218  			// Fake frame: tryAdd will have inlinedCallerDump
  2219  			// (stack[1]) on the deck when it encounters the next
  2220  			// inline function. It should accept this.
  2221  			7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
  2222  			5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
  2223  		},
  2224  		count:    3,
  2225  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2226  		wantSamples: []*profile.Sample{
  2227  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
  2228  			{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
  2229  		},
  2230  	}, {
  2231  		name: "bug38096",
  2232  		input: []uint64{
  2233  			3, 0, 500, // hz = 500. Must match the period.
  2234  			// count (data[2]) == 0 && len(stk) == 1 is an overflow
  2235  			// entry. The "stk" entry is actually the count.
  2236  			4, 0, 0, 4242,
  2237  		},
  2238  		count:    2,
  2239  		wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
  2240  		wantSamples: []*profile.Sample{
  2241  			{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
  2242  		},
  2243  	}, {
  2244  		// If a function is directly called recursively then it must
  2245  		// not be inlined in the caller.
  2246  		//
  2247  		// N.B. We're generating an impossible profile here, with a
  2248  		// recursive inlineCalleeDump call. This is simulating a non-Go
  2249  		// function that looks like an inlined Go function other than
  2250  		// its recursive property. See pcDeck.tryAdd.
  2251  		name: "directly_recursive_func_is_not_inlined",
  2252  		input: []uint64{
  2253  			3, 0, 500, // hz = 500. Must match the period.
  2254  			5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
  2255  			4, 0, 40, inlinedCallerStack[0],
  2256  		},
  2257  		count: 3,
  2258  		// inlinedCallerDump shows up here because
  2259  		// runtime_expandFinalInlineFrame adds it to the stack frame.
  2260  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
  2261  		wantSamples: []*profile.Sample{
  2262  			{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
  2263  			{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
  2264  		},
  2265  	}, {
  2266  		name: "recursion_chain_inline",
  2267  		input: []uint64{
  2268  			3, 0, 500, // hz = 500. Must match the period.
  2269  			9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
  2270  		},
  2271  		count: 2,
  2272  		wantLocs: [][]string{
  2273  			{"runtime/pprof.recursionChainBottom"},
  2274  			{
  2275  				"runtime/pprof.recursionChainMiddle",
  2276  				"runtime/pprof.recursionChainTop",
  2277  				"runtime/pprof.recursionChainBottom",
  2278  			},
  2279  			{
  2280  				"runtime/pprof.recursionChainMiddle",
  2281  				"runtime/pprof.recursionChainTop",
  2282  				"runtime/pprof.TestTryAdd", // inlined into the test.
  2283  			},
  2284  		},
  2285  		wantSamples: []*profile.Sample{
  2286  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
  2287  		},
  2288  	}, {
  2289  		name: "truncated_stack_trace_later",
  2290  		input: []uint64{
  2291  			3, 0, 500, // hz = 500. Must match the period.
  2292  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  2293  			4, 0, 60, inlinedCallerStack[0],
  2294  		},
  2295  		count:    3,
  2296  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2297  		wantSamples: []*profile.Sample{
  2298  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  2299  			{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
  2300  		},
  2301  	}, {
  2302  		name: "truncated_stack_trace_first",
  2303  		input: []uint64{
  2304  			3, 0, 500, // hz = 500. Must match the period.
  2305  			4, 0, 70, inlinedCallerStack[0],
  2306  			5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
  2307  		},
  2308  		count:    3,
  2309  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2310  		wantSamples: []*profile.Sample{
  2311  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  2312  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
  2313  		},
  2314  	}, {
  2315  		// We can recover the inlined caller from a truncated stack.
  2316  		name: "truncated_stack_trace_only",
  2317  		input: []uint64{
  2318  			3, 0, 500, // hz = 500. Must match the period.
  2319  			4, 0, 70, inlinedCallerStack[0],
  2320  		},
  2321  		count:    2,
  2322  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2323  		wantSamples: []*profile.Sample{
  2324  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  2325  		},
  2326  	}, {
  2327  		// The same location is used for duplicated stacks.
  2328  		name: "truncated_stack_trace_twice",
  2329  		input: []uint64{
  2330  			3, 0, 500, // hz = 500. Must match the period.
  2331  			4, 0, 70, inlinedCallerStack[0],
  2332  			// Fake frame: add a fake call to
  2333  			// inlinedCallerDump to prevent this sample
  2334  			// from getting merged into above.
  2335  			5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
  2336  		},
  2337  		count: 3,
  2338  		wantLocs: [][]string{
  2339  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  2340  			{"runtime/pprof.inlinedCallerDump"},
  2341  		},
  2342  		wantSamples: []*profile.Sample{
  2343  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  2344  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
  2345  		},
  2346  	}, {
  2347  		name: "expand_wrapper_function",
  2348  		input: []uint64{
  2349  			3, 0, 500, // hz = 500. Must match the period.
  2350  			4, 0, 50, uint64(wrapperPCs[0]),
  2351  		},
  2352  		count:    2,
  2353  		wantLocs: [][]string{{"runtime/pprof.inlineWrapper.dump"}},
  2354  		wantSamples: []*profile.Sample{
  2355  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  2356  		},
  2357  	}}
  2358  
  2359  	for _, tc := range testCases {
  2360  		t.Run(tc.name, func(t *testing.T) {
  2361  			p, err := translateCPUProfile(tc.input, tc.count)
  2362  			if err != nil {
  2363  				t.Fatalf("translating profile: %v", err)
  2364  			}
  2365  			t.Logf("Profile: %v\n", p)
  2366  
  2367  			// One location entry with all inlined functions.
  2368  			var gotLoc [][]string
  2369  			for _, loc := range p.Location {
  2370  				var names []string
  2371  				for _, line := range loc.Line {
  2372  					names = append(names, line.Function.Name)
  2373  				}
  2374  				gotLoc = append(gotLoc, names)
  2375  			}
  2376  			if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
  2377  				t.Errorf("Got Location = %+v\n\twant %+v", got, want)
  2378  			}
  2379  			// All samples should point to one location.
  2380  			var gotSamples []*profile.Sample
  2381  			for _, sample := range p.Sample {
  2382  				var locs []*profile.Location
  2383  				for _, loc := range sample.Location {
  2384  					locs = append(locs, &profile.Location{ID: loc.ID})
  2385  				}
  2386  				gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
  2387  			}
  2388  			if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
  2389  				t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
  2390  			}
  2391  		})
  2392  	}
  2393  }
  2394  
  2395  func TestTimeVDSO(t *testing.T) {
  2396  	// Test that time functions have the right stack trace. In particular,
  2397  	// it shouldn't be recursive.
  2398  
  2399  	if runtime.GOOS == "android" {
  2400  		// Flaky on Android, issue 48655. VDSO may not be enabled.
  2401  		testenv.SkipFlaky(t, 48655)
  2402  	}
  2403  
  2404  	matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions())
  2405  	p := testCPUProfile(t, matches, func(dur time.Duration) {
  2406  		t0 := time.Now()
  2407  		for {
  2408  			t := time.Now()
  2409  			if t.Sub(t0) >= dur {
  2410  				return
  2411  			}
  2412  		}
  2413  	})
  2414  
  2415  	// Check for recursive time.now sample.
  2416  	for _, sample := range p.Sample {
  2417  		var seenNow bool
  2418  		for _, loc := range sample.Location {
  2419  			for _, line := range loc.Line {
  2420  				if line.Function.Name == "time.now" {
  2421  					if seenNow {
  2422  						t.Fatalf("unexpected recursive time.now")
  2423  					}
  2424  					seenNow = true
  2425  				}
  2426  			}
  2427  		}
  2428  	}
  2429  }
  2430  

View as plain text