...
Run Format

Source file src/runtime/stack_test.go

Documentation: runtime

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"os"
    11  	"reflect"
    12  	"regexp"
    13  	. "runtime"
    14  	"strconv"
    15  	"strings"
    16  	"sync"
    17  	"sync/atomic"
    18  	"testing"
    19  	"time"
    20  )
    21  
    22  // TestStackMem measures per-thread stack segment cache behavior.
    23  // The test consumed up to 500MB in the past.
    24  func TestStackMem(t *testing.T) {
    25  	const (
    26  		BatchSize      = 32
    27  		BatchCount     = 256
    28  		ArraySize      = 1024
    29  		RecursionDepth = 128
    30  	)
    31  	if testing.Short() {
    32  		return
    33  	}
    34  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
    35  	s0 := new(MemStats)
    36  	ReadMemStats(s0)
    37  	for b := 0; b < BatchCount; b++ {
    38  		c := make(chan bool, BatchSize)
    39  		for i := 0; i < BatchSize; i++ {
    40  			go func() {
    41  				var f func(k int, a [ArraySize]byte)
    42  				f = func(k int, a [ArraySize]byte) {
    43  					if k == 0 {
    44  						time.Sleep(time.Millisecond)
    45  						return
    46  					}
    47  					f(k-1, a)
    48  				}
    49  				f(RecursionDepth, [ArraySize]byte{})
    50  				c <- true
    51  			}()
    52  		}
    53  		for i := 0; i < BatchSize; i++ {
    54  			<-c
    55  		}
    56  
    57  		// The goroutines have signaled via c that they are ready to exit.
    58  		// Give them a chance to exit by sleeping. If we don't wait, we
    59  		// might not reuse them on the next batch.
    60  		time.Sleep(10 * time.Millisecond)
    61  	}
    62  	s1 := new(MemStats)
    63  	ReadMemStats(s1)
    64  	consumed := int64(s1.StackSys - s0.StackSys)
    65  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
    66  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
    67  	if consumed > estimate {
    68  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    69  	}
    70  	// Due to broken stack memory accounting (https://golang.org/issue/7468),
    71  	// StackInuse can decrease during function execution, so we cast the values to int64.
    72  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    73  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    74  	if inuse > 4<<20 {
    75  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
    76  	}
    77  }
    78  
    79  // Test stack growing in different contexts.
    80  func TestStackGrowth(t *testing.T) {
    81  	if GOARCH == "wasm" {
    82  		t.Skip("fails on wasm (too slow?)")
    83  	}
    84  
    85  	// Don't make this test parallel as this makes the 20 second
    86  	// timeout unreliable on slow builders. (See issue #19381.)
    87  
    88  	var wg sync.WaitGroup
    89  
    90  	// in a normal goroutine
    91  	var growDuration time.Duration // For debugging failures
    92  	wg.Add(1)
    93  	go func() {
    94  		defer wg.Done()
    95  		start := time.Now()
    96  		growStack(nil)
    97  		growDuration = time.Since(start)
    98  	}()
    99  	wg.Wait()
   100  
   101  	// in locked goroutine
   102  	wg.Add(1)
   103  	go func() {
   104  		defer wg.Done()
   105  		LockOSThread()
   106  		growStack(nil)
   107  		UnlockOSThread()
   108  	}()
   109  	wg.Wait()
   110  
   111  	// in finalizer
   112  	wg.Add(1)
   113  	go func() {
   114  		defer wg.Done()
   115  		done := make(chan bool)
   116  		var startTime time.Time
   117  		var started, progress uint32
   118  		go func() {
   119  			s := new(string)
   120  			SetFinalizer(s, func(ss *string) {
   121  				startTime = time.Now()
   122  				atomic.StoreUint32(&started, 1)
   123  				growStack(&progress)
   124  				done <- true
   125  			})
   126  			s = nil
   127  			done <- true
   128  		}()
   129  		<-done
   130  		GC()
   131  
   132  		timeout := 20 * time.Second
   133  		if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
   134  			scale, err := strconv.Atoi(s)
   135  			if err == nil {
   136  				timeout *= time.Duration(scale)
   137  			}
   138  		}
   139  
   140  		select {
   141  		case <-done:
   142  		case <-time.After(timeout):
   143  			if atomic.LoadUint32(&started) == 0 {
   144  				t.Log("finalizer did not start")
   145  			} else {
   146  				t.Logf("finalizer started %s ago and finished %d iterations", time.Since(startTime), atomic.LoadUint32(&progress))
   147  			}
   148  			t.Log("first growStack took", growDuration)
   149  			t.Error("finalizer did not run")
   150  			return
   151  		}
   152  	}()
   153  	wg.Wait()
   154  }
   155  
   156  // ... and in init
   157  //func init() {
   158  //	growStack()
   159  //}
   160  
   161  func growStack(progress *uint32) {
   162  	n := 1 << 10
   163  	if testing.Short() {
   164  		n = 1 << 8
   165  	}
   166  	for i := 0; i < n; i++ {
   167  		x := 0
   168  		growStackIter(&x, i)
   169  		if x != i+1 {
   170  			panic("stack is corrupted")
   171  		}
   172  		if progress != nil {
   173  			atomic.StoreUint32(progress, uint32(i))
   174  		}
   175  	}
   176  	GC()
   177  }
   178  
   179  // This function is not an anonymous func, so that the compiler can do escape
   180  // analysis and place x on stack (and subsequently stack growth update the pointer).
   181  func growStackIter(p *int, n int) {
   182  	if n == 0 {
   183  		*p = n + 1
   184  		GC()
   185  		return
   186  	}
   187  	*p = n + 1
   188  	x := 0
   189  	growStackIter(&x, n-1)
   190  	if x != n {
   191  		panic("stack is corrupted")
   192  	}
   193  }
   194  
   195  func TestStackGrowthCallback(t *testing.T) {
   196  	t.Parallel()
   197  	var wg sync.WaitGroup
   198  
   199  	// test stack growth at chan op
   200  	wg.Add(1)
   201  	go func() {
   202  		defer wg.Done()
   203  		c := make(chan int, 1)
   204  		growStackWithCallback(func() {
   205  			c <- 1
   206  			<-c
   207  		})
   208  	}()
   209  
   210  	// test stack growth at map op
   211  	wg.Add(1)
   212  	go func() {
   213  		defer wg.Done()
   214  		m := make(map[int]int)
   215  		growStackWithCallback(func() {
   216  			_, _ = m[1]
   217  			m[1] = 1
   218  		})
   219  	}()
   220  
   221  	// test stack growth at goroutine creation
   222  	wg.Add(1)
   223  	go func() {
   224  		defer wg.Done()
   225  		growStackWithCallback(func() {
   226  			done := make(chan bool)
   227  			go func() {
   228  				done <- true
   229  			}()
   230  			<-done
   231  		})
   232  	}()
   233  	wg.Wait()
   234  }
   235  
   236  func growStackWithCallback(cb func()) {
   237  	var f func(n int)
   238  	f = func(n int) {
   239  		if n == 0 {
   240  			cb()
   241  			return
   242  		}
   243  		f(n - 1)
   244  	}
   245  	for i := 0; i < 1<<10; i++ {
   246  		f(i)
   247  	}
   248  }
   249  
   250  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
   251  // during a stack copy.
   252  func set(p *int, x int) {
   253  	*p = x
   254  }
   255  func TestDeferPtrs(t *testing.T) {
   256  	var y int
   257  
   258  	defer func() {
   259  		if y != 42 {
   260  			t.Errorf("defer's stack references were not adjusted appropriately")
   261  		}
   262  	}()
   263  	defer set(&y, 42)
   264  	growStack(nil)
   265  }
   266  
   267  type bigBuf [4 * 1024]byte
   268  
   269  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
   270  // stack grows as part of starting the deferred function. It calls Goexit at various
   271  // stack depths, forcing the deferred function (with >4kB of args) to be run at
   272  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
   273  // the end of the stack. Each trial runs in a different goroutine so that an earlier
   274  // stack growth does not invalidate a later attempt.
   275  func TestDeferPtrsGoexit(t *testing.T) {
   276  	for i := 0; i < 100; i++ {
   277  		c := make(chan int, 1)
   278  		go testDeferPtrsGoexit(c, i)
   279  		if n := <-c; n != 42 {
   280  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   281  		}
   282  	}
   283  }
   284  
   285  func testDeferPtrsGoexit(c chan int, i int) {
   286  	var y int
   287  	defer func() {
   288  		c <- y
   289  	}()
   290  	defer setBig(&y, 42, bigBuf{})
   291  	useStackAndCall(i, Goexit)
   292  }
   293  
   294  func setBig(p *int, x int, b bigBuf) {
   295  	*p = x
   296  }
   297  
   298  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
   299  // of Goexit to run the Defers. Those two are different execution paths
   300  // in the runtime.
   301  func TestDeferPtrsPanic(t *testing.T) {
   302  	for i := 0; i < 100; i++ {
   303  		c := make(chan int, 1)
   304  		go testDeferPtrsGoexit(c, i)
   305  		if n := <-c; n != 42 {
   306  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   307  		}
   308  	}
   309  }
   310  
   311  func testDeferPtrsPanic(c chan int, i int) {
   312  	var y int
   313  	defer func() {
   314  		if recover() == nil {
   315  			c <- -1
   316  			return
   317  		}
   318  		c <- y
   319  	}()
   320  	defer setBig(&y, 42, bigBuf{})
   321  	useStackAndCall(i, func() { panic(1) })
   322  }
   323  
   324  //go:noinline
   325  func testDeferLeafSigpanic1() {
   326  	// Cause a sigpanic to be injected in this frame.
   327  	//
   328  	// This function has to be declared before
   329  	// TestDeferLeafSigpanic so the runtime will crash if we think
   330  	// this function's continuation PC is in
   331  	// TestDeferLeafSigpanic.
   332  	*(*int)(nil) = 0
   333  }
   334  
   335  // TestDeferLeafSigpanic tests defer matching around leaf functions
   336  // that sigpanic. This is tricky because on LR machines the outer
   337  // function and the inner function have the same SP, but it's critical
   338  // that we match up the defer correctly to get the right liveness map.
   339  // See issue #25499.
   340  func TestDeferLeafSigpanic(t *testing.T) {
   341  	// Push a defer that will walk the stack.
   342  	defer func() {
   343  		if err := recover(); err == nil {
   344  			t.Fatal("expected panic from nil pointer")
   345  		}
   346  		GC()
   347  	}()
   348  	// Call a leaf function. We must set up the exact call stack:
   349  	//
   350  	//  defering function -> leaf function -> sigpanic
   351  	//
   352  	// On LR machines, the leaf function will have the same SP as
   353  	// the SP pushed for the defer frame.
   354  	testDeferLeafSigpanic1()
   355  }
   356  
   357  // TestPanicUseStack checks that a chain of Panic structs on the stack are
   358  // updated correctly if the stack grows during the deferred execution that
   359  // happens as a result of the panic.
   360  func TestPanicUseStack(t *testing.T) {
   361  	pc := make([]uintptr, 10000)
   362  	defer func() {
   363  		recover()
   364  		Callers(0, pc) // force stack walk
   365  		useStackAndCall(100, func() {
   366  			defer func() {
   367  				recover()
   368  				Callers(0, pc) // force stack walk
   369  				useStackAndCall(200, func() {
   370  					defer func() {
   371  						recover()
   372  						Callers(0, pc) // force stack walk
   373  					}()
   374  					panic(3)
   375  				})
   376  			}()
   377  			panic(2)
   378  		})
   379  	}()
   380  	panic(1)
   381  }
   382  
   383  func TestPanicFar(t *testing.T) {
   384  	var xtree *xtreeNode
   385  	pc := make([]uintptr, 10000)
   386  	defer func() {
   387  		// At this point we created a large stack and unwound
   388  		// it via recovery. Force a stack walk, which will
   389  		// check the stack's consistency.
   390  		Callers(0, pc)
   391  	}()
   392  	defer func() {
   393  		recover()
   394  	}()
   395  	useStackAndCall(100, func() {
   396  		// Kick off the GC and make it do something nontrivial.
   397  		// (This used to force stack barriers to stick around.)
   398  		xtree = makeTree(18)
   399  		// Give the GC time to start scanning stacks.
   400  		time.Sleep(time.Millisecond)
   401  		panic(1)
   402  	})
   403  	_ = xtree
   404  }
   405  
   406  type xtreeNode struct {
   407  	l, r *xtreeNode
   408  }
   409  
   410  func makeTree(d int) *xtreeNode {
   411  	if d == 0 {
   412  		return new(xtreeNode)
   413  	}
   414  	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
   415  }
   416  
   417  // use about n KB of stack and call f
   418  func useStackAndCall(n int, f func()) {
   419  	if n == 0 {
   420  		f()
   421  		return
   422  	}
   423  	var b [1024]byte // makes frame about 1KB
   424  	useStackAndCall(n-1+int(b[99]), f)
   425  }
   426  
   427  func useStack(n int) {
   428  	useStackAndCall(n, func() {})
   429  }
   430  
   431  func growing(c chan int, done chan struct{}) {
   432  	for n := range c {
   433  		useStack(n)
   434  		done <- struct{}{}
   435  	}
   436  	done <- struct{}{}
   437  }
   438  
   439  func TestStackCache(t *testing.T) {
   440  	// Allocate a bunch of goroutines and grow their stacks.
   441  	// Repeat a few times to test the stack cache.
   442  	const (
   443  		R = 4
   444  		G = 200
   445  		S = 5
   446  	)
   447  	for i := 0; i < R; i++ {
   448  		var reqchans [G]chan int
   449  		done := make(chan struct{})
   450  		for j := 0; j < G; j++ {
   451  			reqchans[j] = make(chan int)
   452  			go growing(reqchans[j], done)
   453  		}
   454  		for s := 0; s < S; s++ {
   455  			for j := 0; j < G; j++ {
   456  				reqchans[j] <- 1 << uint(s)
   457  			}
   458  			for j := 0; j < G; j++ {
   459  				<-done
   460  			}
   461  		}
   462  		for j := 0; j < G; j++ {
   463  			close(reqchans[j])
   464  		}
   465  		for j := 0; j < G; j++ {
   466  			<-done
   467  		}
   468  	}
   469  }
   470  
   471  func TestStackOutput(t *testing.T) {
   472  	b := make([]byte, 1024)
   473  	stk := string(b[:Stack(b, false)])
   474  	if !strings.HasPrefix(stk, "goroutine ") {
   475  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   476  		t.Errorf("Stack output should begin with \"goroutine \"")
   477  	}
   478  }
   479  
   480  func TestStackAllOutput(t *testing.T) {
   481  	b := make([]byte, 1024)
   482  	stk := string(b[:Stack(b, true)])
   483  	if !strings.HasPrefix(stk, "goroutine ") {
   484  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   485  		t.Errorf("Stack output should begin with \"goroutine \"")
   486  	}
   487  }
   488  
   489  func TestStackPanic(t *testing.T) {
   490  	// Test that stack copying copies panics correctly. This is difficult
   491  	// to test because it is very unlikely that the stack will be copied
   492  	// in the middle of gopanic. But it can happen.
   493  	// To make this test effective, edit panic.go:gopanic and uncomment
   494  	// the GC() call just before freedefer(d).
   495  	defer func() {
   496  		if x := recover(); x == nil {
   497  			t.Errorf("recover failed")
   498  		}
   499  	}()
   500  	useStack(32)
   501  	panic("test panic")
   502  }
   503  
   504  func BenchmarkStackCopyPtr(b *testing.B) {
   505  	c := make(chan bool)
   506  	for i := 0; i < b.N; i++ {
   507  		go func() {
   508  			i := 1000000
   509  			countp(&i)
   510  			c <- true
   511  		}()
   512  		<-c
   513  	}
   514  }
   515  
   516  func countp(n *int) {
   517  	if *n == 0 {
   518  		return
   519  	}
   520  	*n--
   521  	countp(n)
   522  }
   523  
   524  func BenchmarkStackCopy(b *testing.B) {
   525  	c := make(chan bool)
   526  	for i := 0; i < b.N; i++ {
   527  		go func() {
   528  			count(1000000)
   529  			c <- true
   530  		}()
   531  		<-c
   532  	}
   533  }
   534  
   535  func count(n int) int {
   536  	if n == 0 {
   537  		return 0
   538  	}
   539  	return 1 + count(n-1)
   540  }
   541  
   542  func BenchmarkStackCopyNoCache(b *testing.B) {
   543  	c := make(chan bool)
   544  	for i := 0; i < b.N; i++ {
   545  		go func() {
   546  			count1(1000000)
   547  			c <- true
   548  		}()
   549  		<-c
   550  	}
   551  }
   552  
   553  func count1(n int) int {
   554  	if n <= 0 {
   555  		return 0
   556  	}
   557  	return 1 + count2(n-1)
   558  }
   559  
   560  func count2(n int) int  { return 1 + count3(n-1) }
   561  func count3(n int) int  { return 1 + count4(n-1) }
   562  func count4(n int) int  { return 1 + count5(n-1) }
   563  func count5(n int) int  { return 1 + count6(n-1) }
   564  func count6(n int) int  { return 1 + count7(n-1) }
   565  func count7(n int) int  { return 1 + count8(n-1) }
   566  func count8(n int) int  { return 1 + count9(n-1) }
   567  func count9(n int) int  { return 1 + count10(n-1) }
   568  func count10(n int) int { return 1 + count11(n-1) }
   569  func count11(n int) int { return 1 + count12(n-1) }
   570  func count12(n int) int { return 1 + count13(n-1) }
   571  func count13(n int) int { return 1 + count14(n-1) }
   572  func count14(n int) int { return 1 + count15(n-1) }
   573  func count15(n int) int { return 1 + count16(n-1) }
   574  func count16(n int) int { return 1 + count17(n-1) }
   575  func count17(n int) int { return 1 + count18(n-1) }
   576  func count18(n int) int { return 1 + count19(n-1) }
   577  func count19(n int) int { return 1 + count20(n-1) }
   578  func count20(n int) int { return 1 + count21(n-1) }
   579  func count21(n int) int { return 1 + count22(n-1) }
   580  func count22(n int) int { return 1 + count23(n-1) }
   581  func count23(n int) int { return 1 + count1(n-1) }
   582  
   583  type structWithMethod struct{}
   584  
   585  func (s structWithMethod) caller() string {
   586  	_, file, line, ok := Caller(1)
   587  	if !ok {
   588  		panic("Caller failed")
   589  	}
   590  	return fmt.Sprintf("%s:%d", file, line)
   591  }
   592  
   593  func (s structWithMethod) callers() []uintptr {
   594  	pc := make([]uintptr, 16)
   595  	return pc[:Callers(0, pc)]
   596  }
   597  
   598  func (s structWithMethod) stack() string {
   599  	buf := make([]byte, 4<<10)
   600  	return string(buf[:Stack(buf, false)])
   601  }
   602  
   603  func (s structWithMethod) nop() {}
   604  
   605  func TestStackWrapperCaller(t *testing.T) {
   606  	var d structWithMethod
   607  	// Force the compiler to construct a wrapper method.
   608  	wrapper := (*structWithMethod).caller
   609  	// Check that the wrapper doesn't affect the stack trace.
   610  	if dc, ic := d.caller(), wrapper(&d); dc != ic {
   611  		t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
   612  	}
   613  }
   614  
   615  func TestStackWrapperCallers(t *testing.T) {
   616  	var d structWithMethod
   617  	wrapper := (*structWithMethod).callers
   618  	// Check that <autogenerated> doesn't appear in the stack trace.
   619  	pcs := wrapper(&d)
   620  	frames := CallersFrames(pcs)
   621  	for {
   622  		fr, more := frames.Next()
   623  		if fr.File == "<autogenerated>" {
   624  			t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
   625  		}
   626  		if !more {
   627  			break
   628  		}
   629  	}
   630  }
   631  
   632  func TestStackWrapperStack(t *testing.T) {
   633  	var d structWithMethod
   634  	wrapper := (*structWithMethod).stack
   635  	// Check that <autogenerated> doesn't appear in the stack trace.
   636  	stk := wrapper(&d)
   637  	if strings.Contains(stk, "<autogenerated>") {
   638  		t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
   639  	}
   640  }
   641  
   642  type I interface {
   643  	M()
   644  }
   645  
   646  func TestStackWrapperStackPanic(t *testing.T) {
   647  	t.Run("sigpanic", func(t *testing.T) {
   648  		// nil calls to interface methods cause a sigpanic.
   649  		testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
   650  	})
   651  	t.Run("panicwrap", func(t *testing.T) {
   652  		// Nil calls to value method wrappers call panicwrap.
   653  		wrapper := (*structWithMethod).nop
   654  		testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
   655  	})
   656  }
   657  
   658  func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
   659  	// Test that the stack trace from a panicking wrapper includes
   660  	// the wrapper, even though elide these when they don't panic.
   661  	t.Run("CallersFrames", func(t *testing.T) {
   662  		defer func() {
   663  			err := recover()
   664  			if err == nil {
   665  				t.Fatalf("expected panic")
   666  			}
   667  			pcs := make([]uintptr, 10)
   668  			n := Callers(0, pcs)
   669  			frames := CallersFrames(pcs[:n])
   670  			for {
   671  				frame, more := frames.Next()
   672  				t.Log(frame.Function)
   673  				if frame.Function == expect {
   674  					return
   675  				}
   676  				if !more {
   677  					break
   678  				}
   679  			}
   680  			t.Fatalf("panicking wrapper %s missing from stack trace", expect)
   681  		}()
   682  		cb()
   683  	})
   684  	t.Run("Stack", func(t *testing.T) {
   685  		defer func() {
   686  			err := recover()
   687  			if err == nil {
   688  				t.Fatalf("expected panic")
   689  			}
   690  			buf := make([]byte, 4<<10)
   691  			stk := string(buf[:Stack(buf, false)])
   692  			if !strings.Contains(stk, "\n"+expect) {
   693  				t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
   694  			}
   695  		}()
   696  		cb()
   697  	})
   698  }
   699  
   700  func TestCallersFromWrapper(t *testing.T) {
   701  	// Test that invoking CallersFrames on a stack where the first
   702  	// PC is an autogenerated wrapper keeps the wrapper in the
   703  	// trace. Normally we elide these, assuming that the wrapper
   704  	// calls the thing you actually wanted to see, but in this
   705  	// case we need to keep it.
   706  	pc := reflect.ValueOf(I.M).Pointer()
   707  	frames := CallersFrames([]uintptr{pc})
   708  	frame, more := frames.Next()
   709  	if frame.Function != "runtime_test.I.M" {
   710  		t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
   711  	}
   712  	if more {
   713  		t.Fatalf("want 1 frame, got > 1")
   714  	}
   715  }
   716  
   717  func TestTracebackSystemstack(t *testing.T) {
   718  	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
   719  		t.Skip("systemstack tail call not implemented on ppc64x")
   720  	}
   721  
   722  	// Test that profiles correctly jump over systemstack,
   723  	// including nested systemstack calls.
   724  	pcs := make([]uintptr, 20)
   725  	pcs = pcs[:TracebackSystemstack(pcs, 5)]
   726  	// Check that runtime.TracebackSystemstack appears five times
   727  	// and that we see TestTracebackSystemstack.
   728  	countIn, countOut := 0, 0
   729  	frames := CallersFrames(pcs)
   730  	var tb bytes.Buffer
   731  	for {
   732  		frame, more := frames.Next()
   733  		fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
   734  		switch frame.Function {
   735  		case "runtime.TracebackSystemstack":
   736  			countIn++
   737  		case "runtime_test.TestTracebackSystemstack":
   738  			countOut++
   739  		}
   740  		if !more {
   741  			break
   742  		}
   743  	}
   744  	if countIn != 5 || countOut != 1 {
   745  		t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
   746  	}
   747  }
   748  
   749  func TestTracebackAncestors(t *testing.T) {
   750  	goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
   751  	for _, tracebackDepth := range []int{0, 1, 5, 50} {
   752  		output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
   753  
   754  		numGoroutines := 3
   755  		numFrames := 2
   756  		ancestorsExpected := numGoroutines
   757  		if numGoroutines > tracebackDepth {
   758  			ancestorsExpected = tracebackDepth
   759  		}
   760  
   761  		matches := goroutineRegex.FindAllStringSubmatch(output, -1)
   762  		if len(matches) != 2 {
   763  			t.Fatalf("want 2 goroutines, got:\n%s", output)
   764  		}
   765  
   766  		// Check functions in the traceback.
   767  		fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
   768  		for _, fn := range fns {
   769  			if !strings.Contains(output, "\n"+fn+"(") {
   770  				t.Fatalf("expected %q function in traceback:\n%s", fn, output)
   771  			}
   772  		}
   773  
   774  		if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
   775  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   776  		}
   777  
   778  		if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
   779  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   780  		}
   781  
   782  		if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
   783  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   784  		}
   785  	}
   786  }
   787  

View as plain text