Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/stack.go

Documentation: runtime

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/cpu"
     9  	"runtime/internal/atomic"
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  /*
    15  Stack layout parameters.
    16  Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    17  
    18  The per-goroutine g->stackguard is set to point StackGuard bytes
    19  above the bottom of the stack.  Each function compares its stack
    20  pointer against g->stackguard to check for overflow.  To cut one
    21  instruction from the check sequence for functions with tiny frames,
    22  the stack is allowed to protrude StackSmall bytes below the stack
    23  guard.  Functions with large frames don't bother with the check and
    24  always call morestack.  The sequences are (for amd64, others are
    25  similar):
    26  
    27  	guard = g->stackguard
    28  	frame = function's stack frame size
    29  	argsize = size of function arguments (call + return)
    30  
    31  	stack frame size <= StackSmall:
    32  		CMPQ guard, SP
    33  		JHI 3(PC)
    34  		MOVQ m->morearg, $(argsize << 32)
    35  		CALL morestack(SB)
    36  
    37  	stack frame size > StackSmall but < StackBig
    38  		LEAQ (frame-StackSmall)(SP), R0
    39  		CMPQ guard, R0
    40  		JHI 3(PC)
    41  		MOVQ m->morearg, $(argsize << 32)
    42  		CALL morestack(SB)
    43  
    44  	stack frame size >= StackBig:
    45  		MOVQ m->morearg, $((argsize << 32) | frame)
    46  		CALL morestack(SB)
    47  
    48  The bottom StackGuard - StackSmall bytes are important: there has
    49  to be enough room to execute functions that refuse to check for
    50  stack overflow, either because they need to be adjacent to the
    51  actual caller's frame (deferproc) or because they handle the imminent
    52  stack overflow (morestack).
    53  
    54  For example, deferproc might call malloc, which does one of the
    55  above checks (without allocating a full frame), which might trigger
    56  a call to morestack.  This sequence needs to fit in the bottom
    57  section of the stack.  On amd64, morestack's frame is 40 bytes, and
    58  deferproc's frame is 56 bytes.  That fits well within the
    59  StackGuard - StackSmall bytes at the bottom.
    60  The linkers explore all possible call traces involving non-splitting
    61  functions to make sure that this limit cannot be violated.
    62  */
    63  
    64  const (
    65  	// StackSystem is a number of additional bytes to add
    66  	// to each stack below the usual guard area for OS-specific
    67  	// purposes like signal handling. Used on Windows, Plan 9,
    68  	// and iOS because they do not use a separate stack.
    69  	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
    70  
    71  	// The minimum size of stack used by Go code
    72  	_StackMin = 2048
    73  
    74  	// The minimum stack size to allocate.
    75  	// The hackery here rounds FixedStack0 up to a power of 2.
    76  	_FixedStack0 = _StackMin + _StackSystem
    77  	_FixedStack1 = _FixedStack0 - 1
    78  	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
    79  	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
    80  	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
    81  	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
    82  	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
    83  	_FixedStack  = _FixedStack6 + 1
    84  
    85  	// Functions that need frames bigger than this use an extra
    86  	// instruction to do the stack split check, to avoid overflow
    87  	// in case SP - framesize wraps below zero.
    88  	// This value can be no bigger than the size of the unmapped
    89  	// space at zero.
    90  	_StackBig = 4096
    91  
    92  	// The stack guard is a pointer this many bytes above the
    93  	// bottom of the stack.
    94  	_StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
    95  
    96  	// After a stack split check the SP is allowed to be this
    97  	// many bytes below the stack guard. This saves an instruction
    98  	// in the checking sequence for tiny frames.
    99  	_StackSmall = 128
   100  
   101  	// The maximum number of bytes that a chain of NOSPLIT
   102  	// functions can use.
   103  	_StackLimit = _StackGuard - _StackSystem - _StackSmall
   104  )
   105  
   106  const (
   107  	// stackDebug == 0: no logging
   108  	//            == 1: logging of per-stack operations
   109  	//            == 2: logging of per-frame operations
   110  	//            == 3: logging of per-word updates
   111  	//            == 4: logging of per-word reads
   112  	stackDebug       = 0
   113  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   114  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   115  	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   116  	stackNoCache     = 0 // disable per-P small stack caches
   117  
   118  	// check the BP links during traceback.
   119  	debugCheckBP = false
   120  )
   121  
   122  const (
   123  	uintptrMask = 1<<(8*sys.PtrSize) - 1
   124  
   125  	// Goroutine preemption request.
   126  	// Stored into g->stackguard0 to cause split stack check failure.
   127  	// Must be greater than any real sp.
   128  	// 0xfffffade in hex.
   129  	stackPreempt = uintptrMask & -1314
   130  
   131  	// Thread is forking.
   132  	// Stored into g->stackguard0 to cause split stack check failure.
   133  	// Must be greater than any real sp.
   134  	stackFork = uintptrMask & -1234
   135  )
   136  
   137  // Global pool of spans that have free stacks.
   138  // Stacks are assigned an order according to size.
   139  //     order = log_2(size/FixedStack)
   140  // There is a free list for each order.
   141  var stackpool [_NumStackOrders]struct {
   142  	item stackpoolItem
   143  	_    [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
   144  }
   145  
   146  //go:notinheap
   147  type stackpoolItem struct {
   148  	mu   mutex
   149  	span mSpanList
   150  }
   151  
   152  // Global pool of large stack spans.
   153  var stackLarge struct {
   154  	lock mutex
   155  	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
   156  }
   157  
   158  func stackinit() {
   159  	if _StackCacheSize&_PageMask != 0 {
   160  		throw("cache size must be a multiple of page size")
   161  	}
   162  	for i := range stackpool {
   163  		stackpool[i].item.span.init()
   164  		lockInit(&stackpool[i].item.mu, lockRankStackpool)
   165  	}
   166  	for i := range stackLarge.free {
   167  		stackLarge.free[i].init()
   168  		lockInit(&stackLarge.lock, lockRankStackLarge)
   169  	}
   170  }
   171  
   172  // stacklog2 returns ⌊log_2(n)⌋.
   173  func stacklog2(n uintptr) int {
   174  	log2 := 0
   175  	for n > 1 {
   176  		n >>= 1
   177  		log2++
   178  	}
   179  	return log2
   180  }
   181  
   182  // Allocates a stack from the free pool. Must be called with
   183  // stackpool[order].item.mu held.
   184  func stackpoolalloc(order uint8) gclinkptr {
   185  	list := &stackpool[order].item.span
   186  	s := list.first
   187  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   188  	if s == nil {
   189  		// no free stacks. Allocate another span worth.
   190  		s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
   191  		if s == nil {
   192  			throw("out of memory")
   193  		}
   194  		if s.allocCount != 0 {
   195  			throw("bad allocCount")
   196  		}
   197  		if s.manualFreeList.ptr() != nil {
   198  			throw("bad manualFreeList")
   199  		}
   200  		osStackAlloc(s)
   201  		s.elemsize = _FixedStack << order
   202  		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
   203  			x := gclinkptr(s.base() + i)
   204  			x.ptr().next = s.manualFreeList
   205  			s.manualFreeList = x
   206  		}
   207  		list.insert(s)
   208  	}
   209  	x := s.manualFreeList
   210  	if x.ptr() == nil {
   211  		throw("span has no free stacks")
   212  	}
   213  	s.manualFreeList = x.ptr().next
   214  	s.allocCount++
   215  	if s.manualFreeList.ptr() == nil {
   216  		// all stacks in s are allocated.
   217  		list.remove(s)
   218  	}
   219  	return x
   220  }
   221  
   222  // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
   223  func stackpoolfree(x gclinkptr, order uint8) {
   224  	s := spanOfUnchecked(uintptr(x))
   225  	if s.state.get() != mSpanManual {
   226  		throw("freeing stack not in a stack span")
   227  	}
   228  	if s.manualFreeList.ptr() == nil {
   229  		// s will now have a free stack
   230  		stackpool[order].item.span.insert(s)
   231  	}
   232  	x.ptr().next = s.manualFreeList
   233  	s.manualFreeList = x
   234  	s.allocCount--
   235  	if gcphase == _GCoff && s.allocCount == 0 {
   236  		// Span is completely free. Return it to the heap
   237  		// immediately if we're sweeping.
   238  		//
   239  		// If GC is active, we delay the free until the end of
   240  		// GC to avoid the following type of situation:
   241  		//
   242  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   243  		// 2) The stack that pointer points to is copied
   244  		// 3) The old stack is freed
   245  		// 4) The containing span is marked free
   246  		// 5) GC attempts to mark the SudoG.elem pointer. The
   247  		//    marking fails because the pointer looks like a
   248  		//    pointer into a free span.
   249  		//
   250  		// By not freeing, we prevent step #4 until GC is done.
   251  		stackpool[order].item.span.remove(s)
   252  		s.manualFreeList = 0
   253  		osStackFree(s)
   254  		mheap_.freeManual(s, &memstats.stacks_inuse)
   255  	}
   256  }
   257  
   258  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   259  // The pool is required to prevent unlimited growth of per-thread caches.
   260  //
   261  //go:systemstack
   262  func stackcacherefill(c *mcache, order uint8) {
   263  	if stackDebug >= 1 {
   264  		print("stackcacherefill order=", order, "\n")
   265  	}
   266  
   267  	// Grab some stacks from the global cache.
   268  	// Grab half of the allowed capacity (to prevent thrashing).
   269  	var list gclinkptr
   270  	var size uintptr
   271  	lock(&stackpool[order].item.mu)
   272  	for size < _StackCacheSize/2 {
   273  		x := stackpoolalloc(order)
   274  		x.ptr().next = list
   275  		list = x
   276  		size += _FixedStack << order
   277  	}
   278  	unlock(&stackpool[order].item.mu)
   279  	c.stackcache[order].list = list
   280  	c.stackcache[order].size = size
   281  }
   282  
   283  //go:systemstack
   284  func stackcacherelease(c *mcache, order uint8) {
   285  	if stackDebug >= 1 {
   286  		print("stackcacherelease order=", order, "\n")
   287  	}
   288  	x := c.stackcache[order].list
   289  	size := c.stackcache[order].size
   290  	lock(&stackpool[order].item.mu)
   291  	for size > _StackCacheSize/2 {
   292  		y := x.ptr().next
   293  		stackpoolfree(x, order)
   294  		x = y
   295  		size -= _FixedStack << order
   296  	}
   297  	unlock(&stackpool[order].item.mu)
   298  	c.stackcache[order].list = x
   299  	c.stackcache[order].size = size
   300  }
   301  
   302  //go:systemstack
   303  func stackcache_clear(c *mcache) {
   304  	if stackDebug >= 1 {
   305  		print("stackcache clear\n")
   306  	}
   307  	for order := uint8(0); order < _NumStackOrders; order++ {
   308  		lock(&stackpool[order].item.mu)
   309  		x := c.stackcache[order].list
   310  		for x.ptr() != nil {
   311  			y := x.ptr().next
   312  			stackpoolfree(x, order)
   313  			x = y
   314  		}
   315  		c.stackcache[order].list = 0
   316  		c.stackcache[order].size = 0
   317  		unlock(&stackpool[order].item.mu)
   318  	}
   319  }
   320  
   321  // stackalloc allocates an n byte stack.
   322  //
   323  // stackalloc must run on the system stack because it uses per-P
   324  // resources and must not split the stack.
   325  //
   326  //go:systemstack
   327  func stackalloc(n uint32) stack {
   328  	// Stackalloc must be called on scheduler stack, so that we
   329  	// never try to grow the stack during the code that stackalloc runs.
   330  	// Doing so would cause a deadlock (issue 1547).
   331  	thisg := getg()
   332  	if thisg != thisg.m.g0 {
   333  		throw("stackalloc not on scheduler stack")
   334  	}
   335  	if n&(n-1) != 0 {
   336  		throw("stack size not a power of 2")
   337  	}
   338  	if stackDebug >= 1 {
   339  		print("stackalloc ", n, "\n")
   340  	}
   341  
   342  	if debug.efence != 0 || stackFromSystem != 0 {
   343  		n = uint32(alignUp(uintptr(n), physPageSize))
   344  		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
   345  		if v == nil {
   346  			throw("out of memory (stackalloc)")
   347  		}
   348  		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   349  	}
   350  
   351  	// Small stacks are allocated with a fixed-size free-list allocator.
   352  	// If we need a stack of a bigger size, we fall back on allocating
   353  	// a dedicated span.
   354  	var v unsafe.Pointer
   355  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   356  		order := uint8(0)
   357  		n2 := n
   358  		for n2 > _FixedStack {
   359  			order++
   360  			n2 >>= 1
   361  		}
   362  		var x gclinkptr
   363  		if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
   364  			// thisg.m.p == 0 can happen in the guts of exitsyscall
   365  			// or procresize. Just get a stack from the global pool.
   366  			// Also don't touch stackcache during gc
   367  			// as it's flushed concurrently.
   368  			lock(&stackpool[order].item.mu)
   369  			x = stackpoolalloc(order)
   370  			unlock(&stackpool[order].item.mu)
   371  		} else {
   372  			c := thisg.m.p.ptr().mcache
   373  			x = c.stackcache[order].list
   374  			if x.ptr() == nil {
   375  				stackcacherefill(c, order)
   376  				x = c.stackcache[order].list
   377  			}
   378  			c.stackcache[order].list = x.ptr().next
   379  			c.stackcache[order].size -= uintptr(n)
   380  		}
   381  		v = unsafe.Pointer(x)
   382  	} else {
   383  		var s *mspan
   384  		npage := uintptr(n) >> _PageShift
   385  		log2npage := stacklog2(npage)
   386  
   387  		// Try to get a stack from the large stack cache.
   388  		lock(&stackLarge.lock)
   389  		if !stackLarge.free[log2npage].isEmpty() {
   390  			s = stackLarge.free[log2npage].first
   391  			stackLarge.free[log2npage].remove(s)
   392  		}
   393  		unlock(&stackLarge.lock)
   394  
   395  		lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   396  
   397  		if s == nil {
   398  			// Allocate a new stack from the heap.
   399  			s = mheap_.allocManual(npage, &memstats.stacks_inuse)
   400  			if s == nil {
   401  				throw("out of memory")
   402  			}
   403  			osStackAlloc(s)
   404  			s.elemsize = uintptr(n)
   405  		}
   406  		v = unsafe.Pointer(s.base())
   407  	}
   408  
   409  	if raceenabled {
   410  		racemalloc(v, uintptr(n))
   411  	}
   412  	if msanenabled {
   413  		msanmalloc(v, uintptr(n))
   414  	}
   415  	if stackDebug >= 1 {
   416  		print("  allocated ", v, "\n")
   417  	}
   418  	return stack{uintptr(v), uintptr(v) + uintptr(n)}
   419  }
   420  
   421  // stackfree frees an n byte stack allocation at stk.
   422  //
   423  // stackfree must run on the system stack because it uses per-P
   424  // resources and must not split the stack.
   425  //
   426  //go:systemstack
   427  func stackfree(stk stack) {
   428  	gp := getg()
   429  	v := unsafe.Pointer(stk.lo)
   430  	n := stk.hi - stk.lo
   431  	if n&(n-1) != 0 {
   432  		throw("stack not a power of 2")
   433  	}
   434  	if stk.lo+n < stk.hi {
   435  		throw("bad stack size")
   436  	}
   437  	if stackDebug >= 1 {
   438  		println("stackfree", v, n)
   439  		memclrNoHeapPointers(v, n) // for testing, clobber stack data
   440  	}
   441  	if debug.efence != 0 || stackFromSystem != 0 {
   442  		if debug.efence != 0 || stackFaultOnFree != 0 {
   443  			sysFault(v, n)
   444  		} else {
   445  			sysFree(v, n, &memstats.stacks_sys)
   446  		}
   447  		return
   448  	}
   449  	if msanenabled {
   450  		msanfree(v, n)
   451  	}
   452  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   453  		order := uint8(0)
   454  		n2 := n
   455  		for n2 > _FixedStack {
   456  			order++
   457  			n2 >>= 1
   458  		}
   459  		x := gclinkptr(v)
   460  		if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
   461  			lock(&stackpool[order].item.mu)
   462  			stackpoolfree(x, order)
   463  			unlock(&stackpool[order].item.mu)
   464  		} else {
   465  			c := gp.m.p.ptr().mcache
   466  			if c.stackcache[order].size >= _StackCacheSize {
   467  				stackcacherelease(c, order)
   468  			}
   469  			x.ptr().next = c.stackcache[order].list
   470  			c.stackcache[order].list = x
   471  			c.stackcache[order].size += n
   472  		}
   473  	} else {
   474  		s := spanOfUnchecked(uintptr(v))
   475  		if s.state.get() != mSpanManual {
   476  			println(hex(s.base()), v)
   477  			throw("bad span state")
   478  		}
   479  		if gcphase == _GCoff {
   480  			// Free the stack immediately if we're
   481  			// sweeping.
   482  			osStackFree(s)
   483  			mheap_.freeManual(s, &memstats.stacks_inuse)
   484  		} else {
   485  			// If the GC is running, we can't return a
   486  			// stack span to the heap because it could be
   487  			// reused as a heap span, and this state
   488  			// change would race with GC. Add it to the
   489  			// large stack cache instead.
   490  			log2npage := stacklog2(s.npages)
   491  			lock(&stackLarge.lock)
   492  			stackLarge.free[log2npage].insert(s)
   493  			unlock(&stackLarge.lock)
   494  		}
   495  	}
   496  }
   497  
   498  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   499  
   500  var ptrnames = []string{
   501  	0: "scalar",
   502  	1: "ptr",
   503  }
   504  
   505  // Stack frame layout
   506  //
   507  // (x86)
   508  // +------------------+
   509  // | args from caller |
   510  // +------------------+ <- frame->argp
   511  // |  return address  |
   512  // +------------------+
   513  // |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
   514  // +------------------+ <- frame->varp
   515  // |     locals       |
   516  // +------------------+
   517  // |  args to callee  |
   518  // +------------------+ <- frame->sp
   519  //
   520  // (arm)
   521  // +------------------+
   522  // | args from caller |
   523  // +------------------+ <- frame->argp
   524  // | caller's retaddr |
   525  // +------------------+ <- frame->varp
   526  // |     locals       |
   527  // +------------------+
   528  // |  args to callee  |
   529  // +------------------+
   530  // |  return address  |
   531  // +------------------+ <- frame->sp
   532  
   533  type adjustinfo struct {
   534  	old   stack
   535  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   536  	cache pcvalueCache
   537  
   538  	// sghi is the highest sudog.elem on the stack.
   539  	sghi uintptr
   540  }
   541  
   542  // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   543  // If so, it rewrites *vpp to point into the new stack.
   544  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   545  	pp := (*uintptr)(vpp)
   546  	p := *pp
   547  	if stackDebug >= 4 {
   548  		print("        ", pp, ":", hex(p), "\n")
   549  	}
   550  	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
   551  		*pp = p + adjinfo.delta
   552  		if stackDebug >= 3 {
   553  			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
   554  		}
   555  	}
   556  }
   557  
   558  // Information from the compiler about the layout of stack frames.
   559  // Note: this type must agree with reflect.bitVector.
   560  type bitvector struct {
   561  	n        int32 // # of bits
   562  	bytedata *uint8
   563  }
   564  
   565  // ptrbit returns the i'th bit in bv.
   566  // ptrbit is less efficient than iterating directly over bitvector bits,
   567  // and should only be used in non-performance-critical code.
   568  // See adjustpointers for an example of a high-efficiency walk of a bitvector.
   569  func (bv *bitvector) ptrbit(i uintptr) uint8 {
   570  	b := *(addb(bv.bytedata, i/8))
   571  	return (b >> (i % 8)) & 1
   572  }
   573  
   574  // bv describes the memory starting at address scanp.
   575  // Adjust any pointers contained therein.
   576  func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
   577  	minp := adjinfo.old.lo
   578  	maxp := adjinfo.old.hi
   579  	delta := adjinfo.delta
   580  	num := uintptr(bv.n)
   581  	// If this frame might contain channel receive slots, use CAS
   582  	// to adjust pointers. If the slot hasn't been received into
   583  	// yet, it may contain stack pointers and a concurrent send
   584  	// could race with adjusting those pointers. (The sent value
   585  	// itself can never contain stack pointers.)
   586  	useCAS := uintptr(scanp) < adjinfo.sghi
   587  	for i := uintptr(0); i < num; i += 8 {
   588  		if stackDebug >= 4 {
   589  			for j := uintptr(0); j < 8; j++ {
   590  				print("        ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
   591  			}
   592  		}
   593  		b := *(addb(bv.bytedata, i/8))
   594  		for b != 0 {
   595  			j := uintptr(sys.Ctz8(b))
   596  			b &= b - 1
   597  			pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
   598  		retry:
   599  			p := *pp
   600  			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
   601  				// Looks like a junk value in a pointer slot.
   602  				// Live analysis wrong?
   603  				getg().m.traceback = 2
   604  				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   605  				throw("invalid pointer found on stack")
   606  			}
   607  			if minp <= p && p < maxp {
   608  				if stackDebug >= 3 {
   609  					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
   610  				}
   611  				if useCAS {
   612  					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
   613  					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
   614  						goto retry
   615  					}
   616  				} else {
   617  					*pp = p + delta
   618  				}
   619  			}
   620  		}
   621  	}
   622  }
   623  
   624  // Note: the argument/return area is adjusted by the callee.
   625  func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
   626  	adjinfo := (*adjustinfo)(arg)
   627  	if frame.continpc == 0 {
   628  		// Frame is dead.
   629  		return true
   630  	}
   631  	f := frame.fn
   632  	if stackDebug >= 2 {
   633  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   634  	}
   635  	if f.funcID == funcID_systemstack_switch {
   636  		// A special routine at the bottom of stack of a goroutine that does a systemstack call.
   637  		// We will allow it to be copied even though we don't
   638  		// have full GC info for it (because it is written in asm).
   639  		return true
   640  	}
   641  
   642  	locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
   643  
   644  	// Adjust local variables if stack frame has been allocated.
   645  	if locals.n > 0 {
   646  		size := uintptr(locals.n) * sys.PtrSize
   647  		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
   648  	}
   649  
   650  	// Adjust saved base pointer if there is one.
   651  	if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
   652  		if !framepointer_enabled {
   653  			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
   654  			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
   655  			throw("bad frame layout")
   656  		}
   657  		if stackDebug >= 3 {
   658  			print("      saved bp\n")
   659  		}
   660  		if debugCheckBP {
   661  			// Frame pointers should always point to the next higher frame on
   662  			// the Go stack (or be nil, for the top frame on the stack).
   663  			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
   664  			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   665  				println("runtime: found invalid frame pointer")
   666  				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   667  				throw("bad frame pointer")
   668  			}
   669  		}
   670  		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   671  	}
   672  
   673  	// Adjust arguments.
   674  	if args.n > 0 {
   675  		if stackDebug >= 3 {
   676  			print("      args\n")
   677  		}
   678  		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
   679  	}
   680  
   681  	// Adjust pointers in all stack objects (whether they are live or not).
   682  	// See comments in mgcmark.go:scanframeworker.
   683  	if frame.varp != 0 {
   684  		for _, obj := range objs {
   685  			off := obj.off
   686  			base := frame.varp // locals base pointer
   687  			if off >= 0 {
   688  				base = frame.argp // arguments and return values base pointer
   689  			}
   690  			p := base + uintptr(off)
   691  			if p < frame.sp {
   692  				// Object hasn't been allocated in the frame yet.
   693  				// (Happens when the stack bounds check fails and
   694  				// we call into morestack.)
   695  				continue
   696  			}
   697  			t := obj.typ
   698  			gcdata := t.gcdata
   699  			var s *mspan
   700  			if t.kind&kindGCProg != 0 {
   701  				// See comments in mgcmark.go:scanstack
   702  				s = materializeGCProg(t.ptrdata, gcdata)
   703  				gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   704  			}
   705  			for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
   706  				if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
   707  					adjustpointer(adjinfo, unsafe.Pointer(p+i))
   708  				}
   709  			}
   710  			if s != nil {
   711  				dematerializeGCProg(s)
   712  			}
   713  		}
   714  	}
   715  
   716  	return true
   717  }
   718  
   719  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   720  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   721  	if !framepointer_enabled {
   722  		return
   723  	}
   724  	if debugCheckBP {
   725  		bp := gp.sched.bp
   726  		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   727  			println("runtime: found invalid top frame pointer")
   728  			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   729  			throw("bad top frame pointer")
   730  		}
   731  	}
   732  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
   733  }
   734  
   735  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   736  	// Adjust pointers in the Defer structs.
   737  	// We need to do this first because we need to adjust the
   738  	// defer.link fields so we always work on the new stack.
   739  	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
   740  	for d := gp._defer; d != nil; d = d.link {
   741  		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   742  		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   743  		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
   744  		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
   745  		adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
   746  		adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
   747  	}
   748  
   749  	// Adjust defer argument blocks the same way we adjust active stack frames.
   750  	// Note: this code is after the loop above, so that if a defer record is
   751  	// stack allocated, we work on the copy in the new stack.
   752  	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
   753  }
   754  
   755  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   756  	// Panics are on stack and already adjusted.
   757  	// Update pointer to head of list in G.
   758  	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   759  }
   760  
   761  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   762  	// the data elements pointed to by a SudoG structure
   763  	// might be in the stack.
   764  	for s := gp.waiting; s != nil; s = s.waitlink {
   765  		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   766  	}
   767  }
   768  
   769  func fillstack(stk stack, b byte) {
   770  	for p := stk.lo; p < stk.hi; p++ {
   771  		*(*byte)(unsafe.Pointer(p)) = b
   772  	}
   773  }
   774  
   775  func findsghi(gp *g, stk stack) uintptr {
   776  	var sghi uintptr
   777  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   778  		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
   779  		if stk.lo <= p && p < stk.hi && p > sghi {
   780  			sghi = p
   781  		}
   782  	}
   783  	return sghi
   784  }
   785  
   786  // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
   787  // stack they refer to while synchronizing with concurrent channel
   788  // operations. It returns the number of bytes of stack copied.
   789  func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
   790  	if gp.waiting == nil {
   791  		return 0
   792  	}
   793  
   794  	// Lock channels to prevent concurrent send/receive.
   795  	var lastc *hchan
   796  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   797  		if sg.c != lastc {
   798  			// There is a ranking cycle here between gscan bit and
   799  			// hchan locks. Normally, we only allow acquiring hchan
   800  			// locks and then getting a gscan bit. In this case, we
   801  			// already have the gscan bit. We allow acquiring hchan
   802  			// locks here as a special case, since a deadlock can't
   803  			// happen because the G involved must already be
   804  			// suspended. So, we get a special hchan lock rank here
   805  			// that is lower than gscan, but doesn't allow acquiring
   806  			// any other locks other than hchan.
   807  			lockWithRank(&sg.c.lock, lockRankHchanLeaf)
   808  		}
   809  		lastc = sg.c
   810  	}
   811  
   812  	// Adjust sudogs.
   813  	adjustsudogs(gp, adjinfo)
   814  
   815  	// Copy the part of the stack the sudogs point in to
   816  	// while holding the lock to prevent races on
   817  	// send/receive slots.
   818  	var sgsize uintptr
   819  	if adjinfo.sghi != 0 {
   820  		oldBot := adjinfo.old.hi - used
   821  		newBot := oldBot + adjinfo.delta
   822  		sgsize = adjinfo.sghi - oldBot
   823  		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
   824  	}
   825  
   826  	// Unlock channels.
   827  	lastc = nil
   828  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   829  		if sg.c != lastc {
   830  			unlock(&sg.c.lock)
   831  		}
   832  		lastc = sg.c
   833  	}
   834  
   835  	return sgsize
   836  }
   837  
   838  // Copies gp's stack to a new stack of a different size.
   839  // Caller must have changed gp status to Gcopystack.
   840  func copystack(gp *g, newsize uintptr) {
   841  	if gp.syscallsp != 0 {
   842  		throw("stack growth not allowed in system call")
   843  	}
   844  	old := gp.stack
   845  	if old.lo == 0 {
   846  		throw("nil stackbase")
   847  	}
   848  	used := old.hi - gp.sched.sp
   849  
   850  	// allocate new stack
   851  	new := stackalloc(uint32(newsize))
   852  	if stackPoisonCopy != 0 {
   853  		fillstack(new, 0xfd)
   854  	}
   855  	if stackDebug >= 1 {
   856  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   857  	}
   858  
   859  	// Compute adjustment.
   860  	var adjinfo adjustinfo
   861  	adjinfo.old = old
   862  	adjinfo.delta = new.hi - old.hi
   863  
   864  	// Adjust sudogs, synchronizing with channel ops if necessary.
   865  	ncopy := used
   866  	if !gp.activeStackChans {
   867  		if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
   868  			// It's not safe for someone to shrink this stack while we're actively
   869  			// parking on a channel, but it is safe to grow since we do that
   870  			// ourselves and explicitly don't want to synchronize with channels
   871  			// since we could self-deadlock.
   872  			throw("racy sudog adjustment due to parking on channel")
   873  		}
   874  		adjustsudogs(gp, &adjinfo)
   875  	} else {
   876  		// sudogs may be pointing in to the stack and gp has
   877  		// released channel locks, so other goroutines could
   878  		// be writing to gp's stack. Find the highest such
   879  		// pointer so we can handle everything there and below
   880  		// carefully. (This shouldn't be far from the bottom
   881  		// of the stack, so there's little cost in handling
   882  		// everything below it carefully.)
   883  		adjinfo.sghi = findsghi(gp, old)
   884  
   885  		// Synchronize with channel ops and copy the part of
   886  		// the stack they may interact with.
   887  		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
   888  	}
   889  
   890  	// Copy the stack (or the rest of it) to the new location
   891  	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
   892  
   893  	// Adjust remaining structures that have pointers into stacks.
   894  	// We have to do most of these before we traceback the new
   895  	// stack because gentraceback uses them.
   896  	adjustctxt(gp, &adjinfo)
   897  	adjustdefers(gp, &adjinfo)
   898  	adjustpanics(gp, &adjinfo)
   899  	if adjinfo.sghi != 0 {
   900  		adjinfo.sghi += adjinfo.delta
   901  	}
   902  
   903  	// Swap out old stack for new one
   904  	gp.stack = new
   905  	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
   906  	gp.sched.sp = new.hi - used
   907  	gp.stktopsp += adjinfo.delta
   908  
   909  	// Adjust pointers in the new stack.
   910  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
   911  
   912  	// free old stack
   913  	if stackPoisonCopy != 0 {
   914  		fillstack(old, 0xfc)
   915  	}
   916  	stackfree(old)
   917  }
   918  
   919  // round x up to a power of 2.
   920  func round2(x int32) int32 {
   921  	s := uint(0)
   922  	for 1<<s < x {
   923  		s++
   924  	}
   925  	return 1 << s
   926  }
   927  
   928  // Called from runtime·morestack when more stack is needed.
   929  // Allocate larger stack and relocate to new stack.
   930  // Stack growth is multiplicative, for constant amortized cost.
   931  //
   932  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   933  // If the scheduler is trying to stop this g, then it will set preemptStop.
   934  //
   935  // This must be nowritebarrierrec because it can be called as part of
   936  // stack growth from other nowritebarrierrec functions, but the
   937  // compiler doesn't check this.
   938  //
   939  //go:nowritebarrierrec
   940  func newstack() {
   941  	thisg := getg()
   942  	// TODO: double check all gp. shouldn't be getg().
   943  	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   944  		throw("stack growth after fork")
   945  	}
   946  	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   947  		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   948  		morebuf := thisg.m.morebuf
   949  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   950  		throw("runtime: wrong goroutine in newstack")
   951  	}
   952  
   953  	gp := thisg.m.curg
   954  
   955  	if thisg.m.curg.throwsplit {
   956  		// Update syscallsp, syscallpc in case traceback uses them.
   957  		morebuf := thisg.m.morebuf
   958  		gp.syscallsp = morebuf.sp
   959  		gp.syscallpc = morebuf.pc
   960  		pcname, pcoff := "(unknown)", uintptr(0)
   961  		f := findfunc(gp.sched.pc)
   962  		if f.valid() {
   963  			pcname = funcname(f)
   964  			pcoff = gp.sched.pc - f.entry
   965  		}
   966  		print("runtime: newstack at ", pcname, "+", hex(pcoff),
   967  			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   968  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   969  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   970  
   971  		thisg.m.traceback = 2 // Include runtime frames
   972  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
   973  		throw("runtime: stack split at bad time")
   974  	}
   975  
   976  	morebuf := thisg.m.morebuf
   977  	thisg.m.morebuf.pc = 0
   978  	thisg.m.morebuf.lr = 0
   979  	thisg.m.morebuf.sp = 0
   980  	thisg.m.morebuf.g = 0
   981  
   982  	// NOTE: stackguard0 may change underfoot, if another thread
   983  	// is about to try to preempt gp. Read it just once and use that same
   984  	// value now and below.
   985  	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
   986  
   987  	// Be conservative about where we preempt.
   988  	// We are interested in preempting user Go code, not runtime code.
   989  	// If we're holding locks, mallocing, or preemption is disabled, don't
   990  	// preempt.
   991  	// This check is very early in newstack so that even the status change
   992  	// from Grunning to Gwaiting and back doesn't happen in this case.
   993  	// That status change by itself can be viewed as a small preemption,
   994  	// because the GC might change Gwaiting to Gscanwaiting, and then
   995  	// this goroutine has to wait for the GC to finish before continuing.
   996  	// If the GC is in some way dependent on this goroutine (for example,
   997  	// it needs a lock held by the goroutine), that small preemption turns
   998  	// into a real deadlock.
   999  	if preempt {
  1000  		if !canPreemptM(thisg.m) {
  1001  			// Let the goroutine keep running for now.
  1002  			// gp->preempt is set, so it will be preempted next time.
  1003  			gp.stackguard0 = gp.stack.lo + _StackGuard
  1004  			gogo(&gp.sched) // never return
  1005  		}
  1006  	}
  1007  
  1008  	if gp.stack.lo == 0 {
  1009  		throw("missing stack in newstack")
  1010  	}
  1011  	sp := gp.sched.sp
  1012  	if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
  1013  		// The call to morestack cost a word.
  1014  		sp -= sys.PtrSize
  1015  	}
  1016  	if stackDebug >= 1 || sp < gp.stack.lo {
  1017  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
  1018  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
  1019  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
  1020  	}
  1021  	if sp < gp.stack.lo {
  1022  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
  1023  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
  1024  		throw("runtime: split stack overflow")
  1025  	}
  1026  
  1027  	if preempt {
  1028  		if gp == thisg.m.g0 {
  1029  			throw("runtime: preempt g0")
  1030  		}
  1031  		if thisg.m.p == 0 && thisg.m.locks == 0 {
  1032  			throw("runtime: g is running but p is not")
  1033  		}
  1034  
  1035  		if gp.preemptShrink {
  1036  			// We're at a synchronous safe point now, so
  1037  			// do the pending stack shrink.
  1038  			gp.preemptShrink = false
  1039  			shrinkstack(gp)
  1040  		}
  1041  
  1042  		if gp.preemptStop {
  1043  			preemptPark(gp) // never returns
  1044  		}
  1045  
  1046  		// Act like goroutine called runtime.Gosched.
  1047  		gopreempt_m(gp) // never return
  1048  	}
  1049  
  1050  	// Allocate a bigger segment and move the stack.
  1051  	oldsize := gp.stack.hi - gp.stack.lo
  1052  	newsize := oldsize * 2
  1053  
  1054  	// Make sure we grow at least as much as needed to fit the new frame.
  1055  	// (This is just an optimization - the caller of morestack will
  1056  	// recheck the bounds on return.)
  1057  	if f := findfunc(gp.sched.pc); f.valid() {
  1058  		max := uintptr(funcMaxSPDelta(f))
  1059  		for newsize-oldsize < max+_StackGuard {
  1060  			newsize *= 2
  1061  		}
  1062  	}
  1063  
  1064  	if newsize > maxstacksize {
  1065  		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
  1066  		print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1067  		throw("stack overflow")
  1068  	}
  1069  
  1070  	// The goroutine must be executing in order to call newstack,
  1071  	// so it must be Grunning (or Gscanrunning).
  1072  	casgstatus(gp, _Grunning, _Gcopystack)
  1073  
  1074  	// The concurrent GC will not scan the stack while we are doing the copy since
  1075  	// the gp is in a Gcopystack status.
  1076  	copystack(gp, newsize)
  1077  	if stackDebug >= 1 {
  1078  		print("stack grow done\n")
  1079  	}
  1080  	casgstatus(gp, _Gcopystack, _Grunning)
  1081  	gogo(&gp.sched)
  1082  }
  1083  
  1084  //go:nosplit
  1085  func nilfunc() {
  1086  	*(*uint8)(nil) = 0
  1087  }
  1088  
  1089  // adjust Gobuf as if it executed a call to fn
  1090  // and then did an immediate gosave.
  1091  func gostartcallfn(gobuf *gobuf, fv *funcval) {
  1092  	var fn unsafe.Pointer
  1093  	if fv != nil {
  1094  		fn = unsafe.Pointer(fv.fn)
  1095  	} else {
  1096  		fn = unsafe.Pointer(funcPC(nilfunc))
  1097  	}
  1098  	gostartcall(gobuf, fn, unsafe.Pointer(fv))
  1099  }
  1100  
  1101  // isShrinkStackSafe returns whether it's safe to attempt to shrink
  1102  // gp's stack. Shrinking the stack is only safe when we have precise
  1103  // pointer maps for all frames on the stack.
  1104  func isShrinkStackSafe(gp *g) bool {
  1105  	// We can't copy the stack if we're in a syscall.
  1106  	// The syscall might have pointers into the stack and
  1107  	// often we don't have precise pointer maps for the innermost
  1108  	// frames.
  1109  	//
  1110  	// We also can't copy the stack if we're at an asynchronous
  1111  	// safe-point because we don't have precise pointer maps for
  1112  	// all frames.
  1113  	//
  1114  	// We also can't *shrink* the stack in the window between the
  1115  	// goroutine calling gopark to park on a channel and
  1116  	// gp.activeStackChans being set.
  1117  	return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
  1118  }
  1119  
  1120  // Maybe shrink the stack being used by gp.
  1121  //
  1122  // gp must be stopped and we must own its stack. It may be in
  1123  // _Grunning, but only if this is our own user G.
  1124  func shrinkstack(gp *g) {
  1125  	if gp.stack.lo == 0 {
  1126  		throw("missing stack in shrinkstack")
  1127  	}
  1128  	if s := readgstatus(gp); s&_Gscan == 0 {
  1129  		// We don't own the stack via _Gscan. We could still
  1130  		// own it if this is our own user G and we're on the
  1131  		// system stack.
  1132  		if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
  1133  			// We don't own the stack.
  1134  			throw("bad status in shrinkstack")
  1135  		}
  1136  	}
  1137  	if !isShrinkStackSafe(gp) {
  1138  		throw("shrinkstack at bad time")
  1139  	}
  1140  	// Check for self-shrinks while in a libcall. These may have
  1141  	// pointers into the stack disguised as uintptrs, but these
  1142  	// code paths should all be nosplit.
  1143  	if gp == getg().m.curg && gp.m.libcallsp != 0 {
  1144  		throw("shrinking stack in libcall")
  1145  	}
  1146  
  1147  	if debug.gcshrinkstackoff > 0 {
  1148  		return
  1149  	}
  1150  	f := findfunc(gp.startpc)
  1151  	if f.valid() && f.funcID == funcID_gcBgMarkWorker {
  1152  		// We're not allowed to shrink the gcBgMarkWorker
  1153  		// stack (see gcBgMarkWorker for explanation).
  1154  		return
  1155  	}
  1156  
  1157  	oldsize := gp.stack.hi - gp.stack.lo
  1158  	newsize := oldsize / 2
  1159  	// Don't shrink the allocation below the minimum-sized stack
  1160  	// allocation.
  1161  	if newsize < _FixedStack {
  1162  		return
  1163  	}
  1164  	// Compute how much of the stack is currently in use and only
  1165  	// shrink the stack if gp is using less than a quarter of its
  1166  	// current stack. The currently used stack includes everything
  1167  	// down to the SP plus the stack guard space that ensures
  1168  	// there's room for nosplit functions.
  1169  	avail := gp.stack.hi - gp.stack.lo
  1170  	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
  1171  		return
  1172  	}
  1173  
  1174  	if stackDebug > 0 {
  1175  		print("shrinking stack ", oldsize, "->", newsize, "\n")
  1176  	}
  1177  
  1178  	copystack(gp, newsize)
  1179  }
  1180  
  1181  // freeStackSpans frees unused stack spans at the end of GC.
  1182  func freeStackSpans() {
  1183  
  1184  	// Scan stack pools for empty stack spans.
  1185  	for order := range stackpool {
  1186  		lock(&stackpool[order].item.mu)
  1187  		list := &stackpool[order].item.span
  1188  		for s := list.first; s != nil; {
  1189  			next := s.next
  1190  			if s.allocCount == 0 {
  1191  				list.remove(s)
  1192  				s.manualFreeList = 0
  1193  				osStackFree(s)
  1194  				mheap_.freeManual(s, &memstats.stacks_inuse)
  1195  			}
  1196  			s = next
  1197  		}
  1198  		unlock(&stackpool[order].item.mu)
  1199  	}
  1200  
  1201  	// Free large stack spans.
  1202  	lock(&stackLarge.lock)
  1203  	for i := range stackLarge.free {
  1204  		for s := stackLarge.free[i].first; s != nil; {
  1205  			next := s.next
  1206  			stackLarge.free[i].remove(s)
  1207  			osStackFree(s)
  1208  			mheap_.freeManual(s, &memstats.stacks_inuse)
  1209  			s = next
  1210  		}
  1211  	}
  1212  	unlock(&stackLarge.lock)
  1213  }
  1214  
  1215  // getStackMap returns the locals and arguments live pointer maps, and
  1216  // stack object list for frame.
  1217  func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
  1218  	targetpc := frame.continpc
  1219  	if targetpc == 0 {
  1220  		// Frame is dead. Return empty bitvectors.
  1221  		return
  1222  	}
  1223  
  1224  	f := frame.fn
  1225  	pcdata := int32(-1)
  1226  	if targetpc != f.entry {
  1227  		// Back up to the CALL. If we're at the function entry
  1228  		// point, we want to use the entry map (-1), even if
  1229  		// the first instruction of the function changes the
  1230  		// stack map.
  1231  		targetpc--
  1232  		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
  1233  	}
  1234  	if pcdata == -1 {
  1235  		// We do not have a valid pcdata value but there might be a
  1236  		// stackmap for this function. It is likely that we are looking
  1237  		// at the function prologue, assume so and hope for the best.
  1238  		pcdata = 0
  1239  	}
  1240  
  1241  	// Local variables.
  1242  	size := frame.varp - frame.sp
  1243  	var minsize uintptr
  1244  	switch sys.ArchFamily {
  1245  	case sys.ARM64:
  1246  		minsize = sys.SpAlign
  1247  	default:
  1248  		minsize = sys.MinFrameSize
  1249  	}
  1250  	if size > minsize {
  1251  		stackid := pcdata
  1252  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  1253  		if stkmap == nil || stkmap.n <= 0 {
  1254  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
  1255  			throw("missing stackmap")
  1256  		}
  1257  		// If nbit == 0, there's no work to do.
  1258  		if stkmap.nbit > 0 {
  1259  			if stackid < 0 || stackid >= stkmap.n {
  1260  				// don't know where we are
  1261  				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1262  				throw("bad symbol table")
  1263  			}
  1264  			locals = stackmapdata(stkmap, stackid)
  1265  			if stackDebug >= 3 && debug {
  1266  				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
  1267  			}
  1268  		} else if stackDebug >= 3 && debug {
  1269  			print("      no locals to adjust\n")
  1270  		}
  1271  	}
  1272  
  1273  	// Arguments.
  1274  	if frame.arglen > 0 {
  1275  		if frame.argmap != nil {
  1276  			// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
  1277  			// In this case, arglen specifies how much of the args section is actually live.
  1278  			// (It could be either all the args + results, or just the args.)
  1279  			args = *frame.argmap
  1280  			n := int32(frame.arglen / sys.PtrSize)
  1281  			if n < args.n {
  1282  				args.n = n // Don't use more of the arguments than arglen.
  1283  			}
  1284  		} else {
  1285  			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
  1286  			if stackmap == nil || stackmap.n <= 0 {
  1287  				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
  1288  				throw("missing stackmap")
  1289  			}
  1290  			if pcdata < 0 || pcdata >= stackmap.n {
  1291  				// don't know where we are
  1292  				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1293  				throw("bad symbol table")
  1294  			}
  1295  			if stackmap.nbit > 0 {
  1296  				args = stackmapdata(stackmap, pcdata)
  1297  			}
  1298  		}
  1299  	}
  1300  
  1301  	// stack objects.
  1302  	p := funcdata(f, _FUNCDATA_StackObjects)
  1303  	if p != nil {
  1304  		n := *(*uintptr)(p)
  1305  		p = add(p, sys.PtrSize)
  1306  		*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
  1307  		// Note: the noescape above is needed to keep
  1308  		// getStackMap from "leaking param content:
  1309  		// frame".  That leak propagates up to getgcmask, then
  1310  		// GCMask, then verifyGCInfo, which converts the stack
  1311  		// gcinfo tests into heap gcinfo tests :(
  1312  	}
  1313  
  1314  	return
  1315  }
  1316  
  1317  // A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
  1318  // This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects.
  1319  type stackObjectRecord struct {
  1320  	// offset in frame
  1321  	// if negative, offset from varp
  1322  	// if non-negative, offset from argp
  1323  	off int
  1324  	typ *_type
  1325  }
  1326  
  1327  // This is exported as ABI0 via linkname so obj can call it.
  1328  //
  1329  //go:nosplit
  1330  //go:linkname morestackc
  1331  func morestackc() {
  1332  	throw("attempt to execute system stack code on user stack")
  1333  }
  1334  

View as plain text