Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/stack.go

Documentation: runtime

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/cpu"
     9  	"runtime/internal/atomic"
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  /*
    15  Stack layout parameters.
    16  Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    17  
    18  The per-goroutine g->stackguard is set to point StackGuard bytes
    19  above the bottom of the stack.  Each function compares its stack
    20  pointer against g->stackguard to check for overflow.  To cut one
    21  instruction from the check sequence for functions with tiny frames,
    22  the stack is allowed to protrude StackSmall bytes below the stack
    23  guard.  Functions with large frames don't bother with the check and
    24  always call morestack.  The sequences are (for amd64, others are
    25  similar):
    26  
    27  	guard = g->stackguard
    28  	frame = function's stack frame size
    29  	argsize = size of function arguments (call + return)
    30  
    31  	stack frame size <= StackSmall:
    32  		CMPQ guard, SP
    33  		JHI 3(PC)
    34  		MOVQ m->morearg, $(argsize << 32)
    35  		CALL morestack(SB)
    36  
    37  	stack frame size > StackSmall but < StackBig
    38  		LEAQ (frame-StackSmall)(SP), R0
    39  		CMPQ guard, R0
    40  		JHI 3(PC)
    41  		MOVQ m->morearg, $(argsize << 32)
    42  		CALL morestack(SB)
    43  
    44  	stack frame size >= StackBig:
    45  		MOVQ m->morearg, $((argsize << 32) | frame)
    46  		CALL morestack(SB)
    47  
    48  The bottom StackGuard - StackSmall bytes are important: there has
    49  to be enough room to execute functions that refuse to check for
    50  stack overflow, either because they need to be adjacent to the
    51  actual caller's frame (deferproc) or because they handle the imminent
    52  stack overflow (morestack).
    53  
    54  For example, deferproc might call malloc, which does one of the
    55  above checks (without allocating a full frame), which might trigger
    56  a call to morestack.  This sequence needs to fit in the bottom
    57  section of the stack.  On amd64, morestack's frame is 40 bytes, and
    58  deferproc's frame is 56 bytes.  That fits well within the
    59  StackGuard - StackSmall bytes at the bottom.
    60  The linkers explore all possible call traces involving non-splitting
    61  functions to make sure that this limit cannot be violated.
    62  */
    63  
    64  const (
    65  	// StackSystem is a number of additional bytes to add
    66  	// to each stack below the usual guard area for OS-specific
    67  	// purposes like signal handling. Used on Windows, Plan 9,
    68  	// and iOS because they do not use a separate stack.
    69  	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
    70  
    71  	// The minimum size of stack used by Go code
    72  	_StackMin = 2048
    73  
    74  	// The minimum stack size to allocate.
    75  	// The hackery here rounds FixedStack0 up to a power of 2.
    76  	_FixedStack0 = _StackMin + _StackSystem
    77  	_FixedStack1 = _FixedStack0 - 1
    78  	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
    79  	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
    80  	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
    81  	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
    82  	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
    83  	_FixedStack  = _FixedStack6 + 1
    84  
    85  	// Functions that need frames bigger than this use an extra
    86  	// instruction to do the stack split check, to avoid overflow
    87  	// in case SP - framesize wraps below zero.
    88  	// This value can be no bigger than the size of the unmapped
    89  	// space at zero.
    90  	_StackBig = 4096
    91  
    92  	// The stack guard is a pointer this many bytes above the
    93  	// bottom of the stack.
    94  	_StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
    95  
    96  	// After a stack split check the SP is allowed to be this
    97  	// many bytes below the stack guard. This saves an instruction
    98  	// in the checking sequence for tiny frames.
    99  	_StackSmall = 128
   100  
   101  	// The maximum number of bytes that a chain of NOSPLIT
   102  	// functions can use.
   103  	_StackLimit = _StackGuard - _StackSystem - _StackSmall
   104  )
   105  
   106  const (
   107  	// stackDebug == 0: no logging
   108  	//            == 1: logging of per-stack operations
   109  	//            == 2: logging of per-frame operations
   110  	//            == 3: logging of per-word updates
   111  	//            == 4: logging of per-word reads
   112  	stackDebug       = 0
   113  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   114  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   115  	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   116  	stackNoCache     = 0 // disable per-P small stack caches
   117  
   118  	// check the BP links during traceback.
   119  	debugCheckBP = false
   120  )
   121  
   122  const (
   123  	uintptrMask = 1<<(8*sys.PtrSize) - 1
   124  
   125  	// Goroutine preemption request.
   126  	// Stored into g->stackguard0 to cause split stack check failure.
   127  	// Must be greater than any real sp.
   128  	// 0xfffffade in hex.
   129  	stackPreempt = uintptrMask & -1314
   130  
   131  	// Thread is forking.
   132  	// Stored into g->stackguard0 to cause split stack check failure.
   133  	// Must be greater than any real sp.
   134  	stackFork = uintptrMask & -1234
   135  )
   136  
   137  // Global pool of spans that have free stacks.
   138  // Stacks are assigned an order according to size.
   139  //     order = log_2(size/FixedStack)
   140  // There is a free list for each order.
   141  var stackpool [_NumStackOrders]struct {
   142  	item stackpoolItem
   143  	_    [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
   144  }
   145  
   146  //go:notinheap
   147  type stackpoolItem struct {
   148  	mu   mutex
   149  	span mSpanList
   150  }
   151  
   152  // Global pool of large stack spans.
   153  var stackLarge struct {
   154  	lock mutex
   155  	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
   156  }
   157  
   158  func stackinit() {
   159  	if _StackCacheSize&_PageMask != 0 {
   160  		throw("cache size must be a multiple of page size")
   161  	}
   162  	for i := range stackpool {
   163  		stackpool[i].item.span.init()
   164  		lockInit(&stackpool[i].item.mu, lockRankStackpool)
   165  	}
   166  	for i := range stackLarge.free {
   167  		stackLarge.free[i].init()
   168  		lockInit(&stackLarge.lock, lockRankStackLarge)
   169  	}
   170  }
   171  
   172  // stacklog2 returns ⌊log_2(n)⌋.
   173  func stacklog2(n uintptr) int {
   174  	log2 := 0
   175  	for n > 1 {
   176  		n >>= 1
   177  		log2++
   178  	}
   179  	return log2
   180  }
   181  
   182  // Allocates a stack from the free pool. Must be called with
   183  // stackpool[order].item.mu held.
   184  func stackpoolalloc(order uint8) gclinkptr {
   185  	list := &stackpool[order].item.span
   186  	s := list.first
   187  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   188  	if s == nil {
   189  		// no free stacks. Allocate another span worth.
   190  		s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
   191  		if s == nil {
   192  			throw("out of memory")
   193  		}
   194  		if s.allocCount != 0 {
   195  			throw("bad allocCount")
   196  		}
   197  		if s.manualFreeList.ptr() != nil {
   198  			throw("bad manualFreeList")
   199  		}
   200  		osStackAlloc(s)
   201  		s.elemsize = _FixedStack << order
   202  		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
   203  			x := gclinkptr(s.base() + i)
   204  			x.ptr().next = s.manualFreeList
   205  			s.manualFreeList = x
   206  		}
   207  		list.insert(s)
   208  	}
   209  	x := s.manualFreeList
   210  	if x.ptr() == nil {
   211  		throw("span has no free stacks")
   212  	}
   213  	s.manualFreeList = x.ptr().next
   214  	s.allocCount++
   215  	if s.manualFreeList.ptr() == nil {
   216  		// all stacks in s are allocated.
   217  		list.remove(s)
   218  	}
   219  	return x
   220  }
   221  
   222  // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
   223  func stackpoolfree(x gclinkptr, order uint8) {
   224  	s := spanOfUnchecked(uintptr(x))
   225  	if s.state.get() != mSpanManual {
   226  		throw("freeing stack not in a stack span")
   227  	}
   228  	if s.manualFreeList.ptr() == nil {
   229  		// s will now have a free stack
   230  		stackpool[order].item.span.insert(s)
   231  	}
   232  	x.ptr().next = s.manualFreeList
   233  	s.manualFreeList = x
   234  	s.allocCount--
   235  	if gcphase == _GCoff && s.allocCount == 0 {
   236  		// Span is completely free. Return it to the heap
   237  		// immediately if we're sweeping.
   238  		//
   239  		// If GC is active, we delay the free until the end of
   240  		// GC to avoid the following type of situation:
   241  		//
   242  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   243  		// 2) The stack that pointer points to is copied
   244  		// 3) The old stack is freed
   245  		// 4) The containing span is marked free
   246  		// 5) GC attempts to mark the SudoG.elem pointer. The
   247  		//    marking fails because the pointer looks like a
   248  		//    pointer into a free span.
   249  		//
   250  		// By not freeing, we prevent step #4 until GC is done.
   251  		stackpool[order].item.span.remove(s)
   252  		s.manualFreeList = 0
   253  		osStackFree(s)
   254  		mheap_.freeManual(s, &memstats.stacks_inuse)
   255  	}
   256  }
   257  
   258  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   259  // The pool is required to prevent unlimited growth of per-thread caches.
   260  //
   261  //go:systemstack
   262  func stackcacherefill(c *mcache, order uint8) {
   263  	if stackDebug >= 1 {
   264  		print("stackcacherefill order=", order, "\n")
   265  	}
   266  
   267  	// Grab some stacks from the global cache.
   268  	// Grab half of the allowed capacity (to prevent thrashing).
   269  	var list gclinkptr
   270  	var size uintptr
   271  	lock(&stackpool[order].item.mu)
   272  	for size < _StackCacheSize/2 {
   273  		x := stackpoolalloc(order)
   274  		x.ptr().next = list
   275  		list = x
   276  		size += _FixedStack << order
   277  	}
   278  	unlock(&stackpool[order].item.mu)
   279  	c.stackcache[order].list = list
   280  	c.stackcache[order].size = size
   281  }
   282  
   283  //go:systemstack
   284  func stackcacherelease(c *mcache, order uint8) {
   285  	if stackDebug >= 1 {
   286  		print("stackcacherelease order=", order, "\n")
   287  	}
   288  	x := c.stackcache[order].list
   289  	size := c.stackcache[order].size
   290  	lock(&stackpool[order].item.mu)
   291  	for size > _StackCacheSize/2 {
   292  		y := x.ptr().next
   293  		stackpoolfree(x, order)
   294  		x = y
   295  		size -= _FixedStack << order
   296  	}
   297  	unlock(&stackpool[order].item.mu)
   298  	c.stackcache[order].list = x
   299  	c.stackcache[order].size = size
   300  }
   301  
   302  //go:systemstack
   303  func stackcache_clear(c *mcache) {
   304  	if stackDebug >= 1 {
   305  		print("stackcache clear\n")
   306  	}
   307  	for order := uint8(0); order < _NumStackOrders; order++ {
   308  		lock(&stackpool[order].item.mu)
   309  		x := c.stackcache[order].list
   310  		for x.ptr() != nil {
   311  			y := x.ptr().next
   312  			stackpoolfree(x, order)
   313  			x = y
   314  		}
   315  		c.stackcache[order].list = 0
   316  		c.stackcache[order].size = 0
   317  		unlock(&stackpool[order].item.mu)
   318  	}
   319  }
   320  
   321  // stackalloc allocates an n byte stack.
   322  //
   323  // stackalloc must run on the system stack because it uses per-P
   324  // resources and must not split the stack.
   325  //
   326  //go:systemstack
   327  func stackalloc(n uint32) stack {
   328  	// Stackalloc must be called on scheduler stack, so that we
   329  	// never try to grow the stack during the code that stackalloc runs.
   330  	// Doing so would cause a deadlock (issue 1547).
   331  	thisg := getg()
   332  	if thisg != thisg.m.g0 {
   333  		throw("stackalloc not on scheduler stack")
   334  	}
   335  	if n&(n-1) != 0 {
   336  		throw("stack size not a power of 2")
   337  	}
   338  	if stackDebug >= 1 {
   339  		print("stackalloc ", n, "\n")
   340  	}
   341  
   342  	if debug.efence != 0 || stackFromSystem != 0 {
   343  		n = uint32(alignUp(uintptr(n), physPageSize))
   344  		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
   345  		if v == nil {
   346  			throw("out of memory (stackalloc)")
   347  		}
   348  		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   349  	}
   350  
   351  	// Small stacks are allocated with a fixed-size free-list allocator.
   352  	// If we need a stack of a bigger size, we fall back on allocating
   353  	// a dedicated span.
   354  	var v unsafe.Pointer
   355  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   356  		order := uint8(0)
   357  		n2 := n
   358  		for n2 > _FixedStack {
   359  			order++
   360  			n2 >>= 1
   361  		}
   362  		var x gclinkptr
   363  		if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
   364  			// thisg.m.p == 0 can happen in the guts of exitsyscall
   365  			// or procresize. Just get a stack from the global pool.
   366  			// Also don't touch stackcache during gc
   367  			// as it's flushed concurrently.
   368  			lock(&stackpool[order].item.mu)
   369  			x = stackpoolalloc(order)
   370  			unlock(&stackpool[order].item.mu)
   371  		} else {
   372  			c := thisg.m.p.ptr().mcache
   373  			x = c.stackcache[order].list
   374  			if x.ptr() == nil {
   375  				stackcacherefill(c, order)
   376  				x = c.stackcache[order].list
   377  			}
   378  			c.stackcache[order].list = x.ptr().next
   379  			c.stackcache[order].size -= uintptr(n)
   380  		}
   381  		v = unsafe.Pointer(x)
   382  	} else {
   383  		var s *mspan
   384  		npage := uintptr(n) >> _PageShift
   385  		log2npage := stacklog2(npage)
   386  
   387  		// Try to get a stack from the large stack cache.
   388  		lock(&stackLarge.lock)
   389  		if !stackLarge.free[log2npage].isEmpty() {
   390  			s = stackLarge.free[log2npage].first
   391  			stackLarge.free[log2npage].remove(s)
   392  		}
   393  		unlock(&stackLarge.lock)
   394  
   395  		lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   396  
   397  		if s == nil {
   398  			// Allocate a new stack from the heap.
   399  			s = mheap_.allocManual(npage, &memstats.stacks_inuse)
   400  			if s == nil {
   401  				throw("out of memory")
   402  			}
   403  			osStackAlloc(s)
   404  			s.elemsize = uintptr(n)
   405  		}
   406  		v = unsafe.Pointer(s.base())
   407  	}
   408  
   409  	if raceenabled {
   410  		racemalloc(v, uintptr(n))
   411  	}
   412  	if msanenabled {
   413  		msanmalloc(v, uintptr(n))
   414  	}
   415  	if stackDebug >= 1 {
   416  		print("  allocated ", v, "\n")
   417  	}
   418  	return stack{uintptr(v), uintptr(v) + uintptr(n)}
   419  }
   420  
   421  // stackfree frees an n byte stack allocation at stk.
   422  //
   423  // stackfree must run on the system stack because it uses per-P
   424  // resources and must not split the stack.
   425  //
   426  //go:systemstack
   427  func stackfree(stk stack) {
   428  	gp := getg()
   429  	v := unsafe.Pointer(stk.lo)
   430  	n := stk.hi - stk.lo
   431  	if n&(n-1) != 0 {
   432  		throw("stack not a power of 2")
   433  	}
   434  	if stk.lo+n < stk.hi {
   435  		throw("bad stack size")
   436  	}
   437  	if stackDebug >= 1 {
   438  		println("stackfree", v, n)
   439  		memclrNoHeapPointers(v, n) // for testing, clobber stack data
   440  	}
   441  	if debug.efence != 0 || stackFromSystem != 0 {
   442  		if debug.efence != 0 || stackFaultOnFree != 0 {
   443  			sysFault(v, n)
   444  		} else {
   445  			sysFree(v, n, &memstats.stacks_sys)
   446  		}
   447  		return
   448  	}
   449  	if msanenabled {
   450  		msanfree(v, n)
   451  	}
   452  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   453  		order := uint8(0)
   454  		n2 := n
   455  		for n2 > _FixedStack {
   456  			order++
   457  			n2 >>= 1
   458  		}
   459  		x := gclinkptr(v)
   460  		if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
   461  			lock(&stackpool[order].item.mu)
   462  			stackpoolfree(x, order)
   463  			unlock(&stackpool[order].item.mu)
   464  		} else {
   465  			c := gp.m.p.ptr().mcache
   466  			if c.stackcache[order].size >= _StackCacheSize {
   467  				stackcacherelease(c, order)
   468  			}
   469  			x.ptr().next = c.stackcache[order].list
   470  			c.stackcache[order].list = x
   471  			c.stackcache[order].size += n
   472  		}
   473  	} else {
   474  		s := spanOfUnchecked(uintptr(v))
   475  		if s.state.get() != mSpanManual {
   476  			println(hex(s.base()), v)
   477  			throw("bad span state")
   478  		}
   479  		if gcphase == _GCoff {
   480  			// Free the stack immediately if we're
   481  			// sweeping.
   482  			osStackFree(s)
   483  			mheap_.freeManual(s, &memstats.stacks_inuse)
   484  		} else {
   485  			// If the GC is running, we can't return a
   486  			// stack span to the heap because it could be
   487  			// reused as a heap span, and this state
   488  			// change would race with GC. Add it to the
   489  			// large stack cache instead.
   490  			log2npage := stacklog2(s.npages)
   491  			lock(&stackLarge.lock)
   492  			stackLarge.free[log2npage].insert(s)
   493  			unlock(&stackLarge.lock)
   494  		}
   495  	}
   496  }
   497  
   498  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   499  
   500  var ptrnames = []string{
   501  	0: "scalar",
   502  	1: "ptr",
   503  }
   504  
   505  // Stack frame layout
   506  //
   507  // (x86)
   508  // +------------------+
   509  // | args from caller |
   510  // +------------------+ <- frame->argp
   511  // |  return address  |
   512  // +------------------+
   513  // |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
   514  // +------------------+ <- frame->varp
   515  // |     locals       |
   516  // +------------------+
   517  // |  args to callee  |
   518  // +------------------+ <- frame->sp
   519  //
   520  // (arm)
   521  // +------------------+
   522  // | args from caller |
   523  // +------------------+ <- frame->argp
   524  // | caller's retaddr |
   525  // +------------------+ <- frame->varp
   526  // |     locals       |
   527  // +------------------+
   528  // |  args to callee  |
   529  // +------------------+
   530  // |  return address  |
   531  // +------------------+ <- frame->sp
   532  
   533  type adjustinfo struct {
   534  	old   stack
   535  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   536  	cache pcvalueCache
   537  
   538  	// sghi is the highest sudog.elem on the stack.
   539  	sghi uintptr
   540  }
   541  
   542  // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   543  // If so, it rewrites *vpp to point into the new stack.
   544  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   545  	pp := (*uintptr)(vpp)
   546  	p := *pp
   547  	if stackDebug >= 4 {
   548  		print("        ", pp, ":", hex(p), "\n")
   549  	}
   550  	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
   551  		*pp = p + adjinfo.delta
   552  		if stackDebug >= 3 {
   553  			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
   554  		}
   555  	}
   556  }
   557  
   558  // Information from the compiler about the layout of stack frames.
   559  // Note: this type must agree with reflect.bitVector.
   560  type bitvector struct {
   561  	n        int32 // # of bits
   562  	bytedata *uint8
   563  }
   564  
   565  // ptrbit returns the i'th bit in bv.
   566  // ptrbit is less efficient than iterating directly over bitvector bits,
   567  // and should only be used in non-performance-critical code.
   568  // See adjustpointers for an example of a high-efficiency walk of a bitvector.
   569  func (bv *bitvector) ptrbit(i uintptr) uint8 {
   570  	b := *(addb(bv.bytedata, i/8))
   571  	return (b >> (i % 8)) & 1
   572  }
   573  
   574  // bv describes the memory starting at address scanp.
   575  // Adjust any pointers contained therein.
   576  func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
   577  	minp := adjinfo.old.lo
   578  	maxp := adjinfo.old.hi
   579  	delta := adjinfo.delta
   580  	num := uintptr(bv.n)
   581  	// If this frame might contain channel receive slots, use CAS
   582  	// to adjust pointers. If the slot hasn't been received into
   583  	// yet, it may contain stack pointers and a concurrent send
   584  	// could race with adjusting those pointers. (The sent value
   585  	// itself can never contain stack pointers.)
   586  	useCAS := uintptr(scanp) < adjinfo.sghi
   587  	for i := uintptr(0); i < num; i += 8 {
   588  		if stackDebug >= 4 {
   589  			for j := uintptr(0); j < 8; j++ {
   590  				print("        ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
   591  			}
   592  		}
   593  		b := *(addb(bv.bytedata, i/8))
   594  		for b != 0 {
   595  			j := uintptr(sys.Ctz8(b))
   596  			b &= b - 1
   597  			pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
   598  		retry:
   599  			p := *pp
   600  			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
   601  				// Looks like a junk value in a pointer slot.
   602  				// Live analysis wrong?
   603  				getg().m.traceback = 2
   604  				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   605  				throw("invalid pointer found on stack")
   606  			}
   607  			if minp <= p && p < maxp {
   608  				if stackDebug >= 3 {
   609  					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
   610  				}
   611  				if useCAS {
   612  					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
   613  					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
   614  						goto retry
   615  					}
   616  				} else {
   617  					*pp = p + delta
   618  				}
   619  			}
   620  		}
   621  	}
   622  }
   623  
   624  // Note: the argument/return area is adjusted by the callee.
   625  func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
   626  	adjinfo := (*adjustinfo)(arg)
   627  	if frame.continpc == 0 {
   628  		// Frame is dead.
   629  		return true
   630  	}
   631  	f := frame.fn
   632  	if stackDebug >= 2 {
   633  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   634  	}
   635  	if f.funcID == funcID_systemstack_switch {
   636  		// A special routine at the bottom of stack of a goroutine that does a systemstack call.
   637  		// We will allow it to be copied even though we don't
   638  		// have full GC info for it (because it is written in asm).
   639  		return true
   640  	}
   641  
   642  	locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
   643  
   644  	// Adjust local variables if stack frame has been allocated.
   645  	if locals.n > 0 {
   646  		size := uintptr(locals.n) * sys.PtrSize
   647  		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
   648  	}
   649  
   650  	// Adjust saved base pointer if there is one.
   651  	if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
   652  		if !framepointer_enabled {
   653  			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
   654  			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
   655  			throw("bad frame layout")
   656  		}
   657  		if stackDebug >= 3 {
   658  			print("      saved bp\n")
   659  		}
   660  		if debugCheckBP {
   661  			// Frame pointers should always point to the next higher frame on
   662  			// the Go stack (or be nil, for the top frame on the stack).
   663  			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
   664  			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   665  				println("runtime: found invalid frame pointer")
   666  				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   667  				throw("bad frame pointer")
   668  			}
   669  		}
   670  		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   671  	}
   672  
   673  	// Adjust arguments.
   674  	if args.n > 0 {
   675  		if stackDebug >= 3 {
   676  			print("      args\n")
   677  		}
   678  		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
   679  	}
   680  
   681  	// Adjust pointers in all stack objects (whether they are live or not).
   682  	// See comments in mgcmark.go:scanframeworker.
   683  	if frame.varp != 0 {
   684  		for _, obj := range objs {
   685  			off := obj.off
   686  			base := frame.varp // locals base pointer
   687  			if off >= 0 {
   688  				base = frame.argp // arguments and return values base pointer
   689  			}
   690  			p := base + uintptr(off)
   691  			if p < frame.sp {
   692  				// Object hasn't been allocated in the frame yet.
   693  				// (Happens when the stack bounds check fails and
   694  				// we call into morestack.)
   695  				continue
   696  			}
   697  			t := obj.typ
   698  			gcdata := t.gcdata
   699  			var s *mspan
   700  			if t.kind&kindGCProg != 0 {
   701  				// See comments in mgcmark.go:scanstack
   702  				s = materializeGCProg(t.ptrdata, gcdata)
   703  				gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   704  			}
   705  			for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
   706  				if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
   707  					adjustpointer(adjinfo, unsafe.Pointer(p+i))
   708  				}
   709  			}
   710  			if s != nil {
   711  				dematerializeGCProg(s)
   712  			}
   713  		}
   714  	}
   715  
   716  	return true
   717  }
   718  
   719  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   720  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   721  	if !framepointer_enabled {
   722  		return
   723  	}
   724  	if debugCheckBP {
   725  		bp := gp.sched.bp
   726  		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   727  			println("runtime: found invalid top frame pointer")
   728  			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   729  			throw("bad top frame pointer")
   730  		}
   731  	}
   732  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
   733  }
   734  
   735  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   736  	// Adjust pointers in the Defer structs.
   737  	// We need to do this first because we need to adjust the
   738  	// defer.link fields so we always work on the new stack.
   739  	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
   740  	for d := gp._defer; d != nil; d = d.link {
   741  		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   742  		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   743  		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
   744  		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
   745  		adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
   746  		adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
   747  	}
   748  
   749  	// Adjust defer argument blocks the same way we adjust active stack frames.
   750  	// Note: this code is after the loop above, so that if a defer record is
   751  	// stack allocated, we work on the copy in the new stack.
   752  	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
   753  }
   754  
   755  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   756  	// Panics are on stack and already adjusted.
   757  	// Update pointer to head of list in G.
   758  	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   759  }
   760  
   761  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   762  	// the data elements pointed to by a SudoG structure
   763  	// might be in the stack.
   764  	for s := gp.waiting; s != nil; s = s.waitlink {
   765  		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   766  	}
   767  }
   768  
   769  func fillstack(stk stack, b byte) {
   770  	for p := stk.lo; p < stk.hi; p++ {
   771  		*(*byte)(unsafe.Pointer(p)) = b
   772  	}
   773  }
   774  
   775  func findsghi(gp *g, stk stack) uintptr {
   776  	var sghi uintptr
   777  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   778  		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
   779  		if stk.lo <= p && p < stk.hi && p > sghi {
   780  			sghi = p
   781  		}
   782  	}
   783  	return sghi
   784  }
   785  
   786  // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
   787  // stack they refer to while synchronizing with concurrent channel
   788  // operations. It returns the number of bytes of stack copied.
   789  func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
   790  	if gp.waiting == nil {
   791  		return 0
   792  	}
   793  
   794  	// Lock channels to prevent concurrent send/receive.
   795  	var lastc *hchan
   796  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   797  		if sg.c != lastc {
   798  			// There is a ranking cycle here between gscan bit and
   799  			// hchan locks. Normally, we only allow acquiring hchan
   800  			// locks and then getting a gscan bit. In this case, we
   801  			// already have the gscan bit. We allow acquiring hchan
   802  			// locks here as a special case, since a deadlock can't
   803  			// happen because the G involved must already be
   804  			// suspended. So, we get a special hchan lock rank here
   805  			// that is lower than gscan, but doesn't allow acquiring
   806  			// any other locks other than hchan.
   807  			lockWithRank(&sg.c.lock, lockRankHchanLeaf)
   808  		}
   809  		lastc = sg.c
   810  	}
   811  
   812  	// Adjust sudogs.
   813  	adjustsudogs(gp, adjinfo)
   814  
   815  	// Copy the part of the stack the sudogs point in to
   816  	// while holding the lock to prevent races on
   817  	// send/receive slots.
   818  	var sgsize uintptr
   819  	if adjinfo.sghi != 0 {
   820  		oldBot := adjinfo.old.hi - used
   821  		newBot := oldBot + adjinfo.delta
   822  		sgsize = adjinfo.sghi - oldBot
   823  		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
   824  	}
   825  
   826  	// Unlock channels.
   827  	lastc = nil
   828  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   829  		if sg.c != lastc {
   830  			unlock(&sg.c.lock)
   831  		}
   832  		lastc = sg.c
   833  	}
   834  
   835  	return sgsize
   836  }
   837  
   838  // Copies gp's stack to a new stack of a different size.
   839  // Caller must have changed gp status to Gcopystack.
   840  func copystack(gp *g, newsize uintptr) {
   841  	if gp.syscallsp != 0 {
   842  		throw("stack growth not allowed in system call")
   843  	}
   844  	old := gp.stack
   845  	if old.lo == 0 {
   846  		throw("nil stackbase")
   847  	}
   848  	used := old.hi - gp.sched.sp
   849  
   850  	// allocate new stack
   851  	new := stackalloc(uint32(newsize))
   852  	if stackPoisonCopy != 0 {
   853  		fillstack(new, 0xfd)
   854  	}
   855  	if stackDebug >= 1 {
   856  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   857  	}
   858  
   859  	// Compute adjustment.
   860  	var adjinfo adjustinfo
   861  	adjinfo.old = old
   862  	adjinfo.delta = new.hi - old.hi
   863  
   864  	// Adjust sudogs, synchronizing with channel ops if necessary.
   865  	ncopy := used
   866  	if !gp.activeStackChans {
   867  		adjustsudogs(gp, &adjinfo)
   868  	} else {
   869  		// sudogs may be pointing in to the stack and gp has
   870  		// released channel locks, so other goroutines could
   871  		// be writing to gp's stack. Find the highest such
   872  		// pointer so we can handle everything there and below
   873  		// carefully. (This shouldn't be far from the bottom
   874  		// of the stack, so there's little cost in handling
   875  		// everything below it carefully.)
   876  		adjinfo.sghi = findsghi(gp, old)
   877  
   878  		// Synchronize with channel ops and copy the part of
   879  		// the stack they may interact with.
   880  		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
   881  	}
   882  
   883  	// Copy the stack (or the rest of it) to the new location
   884  	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
   885  
   886  	// Adjust remaining structures that have pointers into stacks.
   887  	// We have to do most of these before we traceback the new
   888  	// stack because gentraceback uses them.
   889  	adjustctxt(gp, &adjinfo)
   890  	adjustdefers(gp, &adjinfo)
   891  	adjustpanics(gp, &adjinfo)
   892  	if adjinfo.sghi != 0 {
   893  		adjinfo.sghi += adjinfo.delta
   894  	}
   895  
   896  	// Swap out old stack for new one
   897  	gp.stack = new
   898  	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
   899  	gp.sched.sp = new.hi - used
   900  	gp.stktopsp += adjinfo.delta
   901  
   902  	// Adjust pointers in the new stack.
   903  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
   904  
   905  	// free old stack
   906  	if stackPoisonCopy != 0 {
   907  		fillstack(old, 0xfc)
   908  	}
   909  	stackfree(old)
   910  }
   911  
   912  // round x up to a power of 2.
   913  func round2(x int32) int32 {
   914  	s := uint(0)
   915  	for 1<<s < x {
   916  		s++
   917  	}
   918  	return 1 << s
   919  }
   920  
   921  // Called from runtime·morestack when more stack is needed.
   922  // Allocate larger stack and relocate to new stack.
   923  // Stack growth is multiplicative, for constant amortized cost.
   924  //
   925  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   926  // If the scheduler is trying to stop this g, then it will set preemptStop.
   927  //
   928  // This must be nowritebarrierrec because it can be called as part of
   929  // stack growth from other nowritebarrierrec functions, but the
   930  // compiler doesn't check this.
   931  //
   932  //go:nowritebarrierrec
   933  func newstack() {
   934  	thisg := getg()
   935  	// TODO: double check all gp. shouldn't be getg().
   936  	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   937  		throw("stack growth after fork")
   938  	}
   939  	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   940  		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   941  		morebuf := thisg.m.morebuf
   942  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   943  		throw("runtime: wrong goroutine in newstack")
   944  	}
   945  
   946  	gp := thisg.m.curg
   947  
   948  	if thisg.m.curg.throwsplit {
   949  		// Update syscallsp, syscallpc in case traceback uses them.
   950  		morebuf := thisg.m.morebuf
   951  		gp.syscallsp = morebuf.sp
   952  		gp.syscallpc = morebuf.pc
   953  		pcname, pcoff := "(unknown)", uintptr(0)
   954  		f := findfunc(gp.sched.pc)
   955  		if f.valid() {
   956  			pcname = funcname(f)
   957  			pcoff = gp.sched.pc - f.entry
   958  		}
   959  		print("runtime: newstack at ", pcname, "+", hex(pcoff),
   960  			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   961  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   962  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   963  
   964  		thisg.m.traceback = 2 // Include runtime frames
   965  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
   966  		throw("runtime: stack split at bad time")
   967  	}
   968  
   969  	morebuf := thisg.m.morebuf
   970  	thisg.m.morebuf.pc = 0
   971  	thisg.m.morebuf.lr = 0
   972  	thisg.m.morebuf.sp = 0
   973  	thisg.m.morebuf.g = 0
   974  
   975  	// NOTE: stackguard0 may change underfoot, if another thread
   976  	// is about to try to preempt gp. Read it just once and use that same
   977  	// value now and below.
   978  	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
   979  
   980  	// Be conservative about where we preempt.
   981  	// We are interested in preempting user Go code, not runtime code.
   982  	// If we're holding locks, mallocing, or preemption is disabled, don't
   983  	// preempt.
   984  	// This check is very early in newstack so that even the status change
   985  	// from Grunning to Gwaiting and back doesn't happen in this case.
   986  	// That status change by itself can be viewed as a small preemption,
   987  	// because the GC might change Gwaiting to Gscanwaiting, and then
   988  	// this goroutine has to wait for the GC to finish before continuing.
   989  	// If the GC is in some way dependent on this goroutine (for example,
   990  	// it needs a lock held by the goroutine), that small preemption turns
   991  	// into a real deadlock.
   992  	if preempt {
   993  		if !canPreemptM(thisg.m) {
   994  			// Let the goroutine keep running for now.
   995  			// gp->preempt is set, so it will be preempted next time.
   996  			gp.stackguard0 = gp.stack.lo + _StackGuard
   997  			gogo(&gp.sched) // never return
   998  		}
   999  	}
  1000  
  1001  	if gp.stack.lo == 0 {
  1002  		throw("missing stack in newstack")
  1003  	}
  1004  	sp := gp.sched.sp
  1005  	if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
  1006  		// The call to morestack cost a word.
  1007  		sp -= sys.PtrSize
  1008  	}
  1009  	if stackDebug >= 1 || sp < gp.stack.lo {
  1010  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
  1011  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
  1012  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
  1013  	}
  1014  	if sp < gp.stack.lo {
  1015  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
  1016  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
  1017  		throw("runtime: split stack overflow")
  1018  	}
  1019  
  1020  	if preempt {
  1021  		if gp == thisg.m.g0 {
  1022  			throw("runtime: preempt g0")
  1023  		}
  1024  		if thisg.m.p == 0 && thisg.m.locks == 0 {
  1025  			throw("runtime: g is running but p is not")
  1026  		}
  1027  
  1028  		if gp.preemptShrink {
  1029  			// We're at a synchronous safe point now, so
  1030  			// do the pending stack shrink.
  1031  			gp.preemptShrink = false
  1032  			shrinkstack(gp)
  1033  		}
  1034  
  1035  		if gp.preemptStop {
  1036  			preemptPark(gp) // never returns
  1037  		}
  1038  
  1039  		// Act like goroutine called runtime.Gosched.
  1040  		gopreempt_m(gp) // never return
  1041  	}
  1042  
  1043  	// Allocate a bigger segment and move the stack.
  1044  	oldsize := gp.stack.hi - gp.stack.lo
  1045  	newsize := oldsize * 2
  1046  
  1047  	// Make sure we grow at least as much as needed to fit the new frame.
  1048  	// (This is just an optimization - the caller of morestack will
  1049  	// recheck the bounds on return.)
  1050  	if f := findfunc(gp.sched.pc); f.valid() {
  1051  		max := uintptr(funcMaxSPDelta(f))
  1052  		for newsize-oldsize < max+_StackGuard {
  1053  			newsize *= 2
  1054  		}
  1055  	}
  1056  
  1057  	if newsize > maxstacksize {
  1058  		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
  1059  		print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1060  		throw("stack overflow")
  1061  	}
  1062  
  1063  	// The goroutine must be executing in order to call newstack,
  1064  	// so it must be Grunning (or Gscanrunning).
  1065  	casgstatus(gp, _Grunning, _Gcopystack)
  1066  
  1067  	// The concurrent GC will not scan the stack while we are doing the copy since
  1068  	// the gp is in a Gcopystack status.
  1069  	copystack(gp, newsize)
  1070  	if stackDebug >= 1 {
  1071  		print("stack grow done\n")
  1072  	}
  1073  	casgstatus(gp, _Gcopystack, _Grunning)
  1074  	gogo(&gp.sched)
  1075  }
  1076  
  1077  //go:nosplit
  1078  func nilfunc() {
  1079  	*(*uint8)(nil) = 0
  1080  }
  1081  
  1082  // adjust Gobuf as if it executed a call to fn
  1083  // and then did an immediate gosave.
  1084  func gostartcallfn(gobuf *gobuf, fv *funcval) {
  1085  	var fn unsafe.Pointer
  1086  	if fv != nil {
  1087  		fn = unsafe.Pointer(fv.fn)
  1088  	} else {
  1089  		fn = unsafe.Pointer(funcPC(nilfunc))
  1090  	}
  1091  	gostartcall(gobuf, fn, unsafe.Pointer(fv))
  1092  }
  1093  
  1094  // isShrinkStackSafe returns whether it's safe to attempt to shrink
  1095  // gp's stack. Shrinking the stack is only safe when we have precise
  1096  // pointer maps for all frames on the stack.
  1097  func isShrinkStackSafe(gp *g) bool {
  1098  	// We can't copy the stack if we're in a syscall.
  1099  	// The syscall might have pointers into the stack and
  1100  	// often we don't have precise pointer maps for the innermost
  1101  	// frames.
  1102  	//
  1103  	// We also can't copy the stack if we're at an asynchronous
  1104  	// safe-point because we don't have precise pointer maps for
  1105  	// all frames.
  1106  	return gp.syscallsp == 0 && !gp.asyncSafePoint
  1107  }
  1108  
  1109  // Maybe shrink the stack being used by gp.
  1110  //
  1111  // gp must be stopped and we must own its stack. It may be in
  1112  // _Grunning, but only if this is our own user G.
  1113  func shrinkstack(gp *g) {
  1114  	if gp.stack.lo == 0 {
  1115  		throw("missing stack in shrinkstack")
  1116  	}
  1117  	if s := readgstatus(gp); s&_Gscan == 0 {
  1118  		// We don't own the stack via _Gscan. We could still
  1119  		// own it if this is our own user G and we're on the
  1120  		// system stack.
  1121  		if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
  1122  			// We don't own the stack.
  1123  			throw("bad status in shrinkstack")
  1124  		}
  1125  	}
  1126  	if !isShrinkStackSafe(gp) {
  1127  		throw("shrinkstack at bad time")
  1128  	}
  1129  	// Check for self-shrinks while in a libcall. These may have
  1130  	// pointers into the stack disguised as uintptrs, but these
  1131  	// code paths should all be nosplit.
  1132  	if gp == getg().m.curg && gp.m.libcallsp != 0 {
  1133  		throw("shrinking stack in libcall")
  1134  	}
  1135  
  1136  	if debug.gcshrinkstackoff > 0 {
  1137  		return
  1138  	}
  1139  	f := findfunc(gp.startpc)
  1140  	if f.valid() && f.funcID == funcID_gcBgMarkWorker {
  1141  		// We're not allowed to shrink the gcBgMarkWorker
  1142  		// stack (see gcBgMarkWorker for explanation).
  1143  		return
  1144  	}
  1145  
  1146  	oldsize := gp.stack.hi - gp.stack.lo
  1147  	newsize := oldsize / 2
  1148  	// Don't shrink the allocation below the minimum-sized stack
  1149  	// allocation.
  1150  	if newsize < _FixedStack {
  1151  		return
  1152  	}
  1153  	// Compute how much of the stack is currently in use and only
  1154  	// shrink the stack if gp is using less than a quarter of its
  1155  	// current stack. The currently used stack includes everything
  1156  	// down to the SP plus the stack guard space that ensures
  1157  	// there's room for nosplit functions.
  1158  	avail := gp.stack.hi - gp.stack.lo
  1159  	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
  1160  		return
  1161  	}
  1162  
  1163  	if stackDebug > 0 {
  1164  		print("shrinking stack ", oldsize, "->", newsize, "\n")
  1165  	}
  1166  
  1167  	copystack(gp, newsize)
  1168  }
  1169  
  1170  // freeStackSpans frees unused stack spans at the end of GC.
  1171  func freeStackSpans() {
  1172  
  1173  	// Scan stack pools for empty stack spans.
  1174  	for order := range stackpool {
  1175  		lock(&stackpool[order].item.mu)
  1176  		list := &stackpool[order].item.span
  1177  		for s := list.first; s != nil; {
  1178  			next := s.next
  1179  			if s.allocCount == 0 {
  1180  				list.remove(s)
  1181  				s.manualFreeList = 0
  1182  				osStackFree(s)
  1183  				mheap_.freeManual(s, &memstats.stacks_inuse)
  1184  			}
  1185  			s = next
  1186  		}
  1187  		unlock(&stackpool[order].item.mu)
  1188  	}
  1189  
  1190  	// Free large stack spans.
  1191  	lock(&stackLarge.lock)
  1192  	for i := range stackLarge.free {
  1193  		for s := stackLarge.free[i].first; s != nil; {
  1194  			next := s.next
  1195  			stackLarge.free[i].remove(s)
  1196  			osStackFree(s)
  1197  			mheap_.freeManual(s, &memstats.stacks_inuse)
  1198  			s = next
  1199  		}
  1200  	}
  1201  	unlock(&stackLarge.lock)
  1202  }
  1203  
  1204  // getStackMap returns the locals and arguments live pointer maps, and
  1205  // stack object list for frame.
  1206  func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
  1207  	targetpc := frame.continpc
  1208  	if targetpc == 0 {
  1209  		// Frame is dead. Return empty bitvectors.
  1210  		return
  1211  	}
  1212  
  1213  	f := frame.fn
  1214  	pcdata := int32(-1)
  1215  	if targetpc != f.entry {
  1216  		// Back up to the CALL. If we're at the function entry
  1217  		// point, we want to use the entry map (-1), even if
  1218  		// the first instruction of the function changes the
  1219  		// stack map.
  1220  		targetpc--
  1221  		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
  1222  	}
  1223  	if pcdata == -1 {
  1224  		// We do not have a valid pcdata value but there might be a
  1225  		// stackmap for this function. It is likely that we are looking
  1226  		// at the function prologue, assume so and hope for the best.
  1227  		pcdata = 0
  1228  	}
  1229  
  1230  	// Local variables.
  1231  	size := frame.varp - frame.sp
  1232  	var minsize uintptr
  1233  	switch sys.ArchFamily {
  1234  	case sys.ARM64:
  1235  		minsize = sys.SpAlign
  1236  	default:
  1237  		minsize = sys.MinFrameSize
  1238  	}
  1239  	if size > minsize {
  1240  		stackid := pcdata
  1241  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  1242  		if stkmap == nil || stkmap.n <= 0 {
  1243  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
  1244  			throw("missing stackmap")
  1245  		}
  1246  		// If nbit == 0, there's no work to do.
  1247  		if stkmap.nbit > 0 {
  1248  			if stackid < 0 || stackid >= stkmap.n {
  1249  				// don't know where we are
  1250  				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1251  				throw("bad symbol table")
  1252  			}
  1253  			locals = stackmapdata(stkmap, stackid)
  1254  			if stackDebug >= 3 && debug {
  1255  				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
  1256  			}
  1257  		} else if stackDebug >= 3 && debug {
  1258  			print("      no locals to adjust\n")
  1259  		}
  1260  	}
  1261  
  1262  	// Arguments.
  1263  	if frame.arglen > 0 {
  1264  		if frame.argmap != nil {
  1265  			// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
  1266  			// In this case, arglen specifies how much of the args section is actually live.
  1267  			// (It could be either all the args + results, or just the args.)
  1268  			args = *frame.argmap
  1269  			n := int32(frame.arglen / sys.PtrSize)
  1270  			if n < args.n {
  1271  				args.n = n // Don't use more of the arguments than arglen.
  1272  			}
  1273  		} else {
  1274  			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
  1275  			if stackmap == nil || stackmap.n <= 0 {
  1276  				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
  1277  				throw("missing stackmap")
  1278  			}
  1279  			if pcdata < 0 || pcdata >= stackmap.n {
  1280  				// don't know where we are
  1281  				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1282  				throw("bad symbol table")
  1283  			}
  1284  			if stackmap.nbit > 0 {
  1285  				args = stackmapdata(stackmap, pcdata)
  1286  			}
  1287  		}
  1288  	}
  1289  
  1290  	// stack objects.
  1291  	p := funcdata(f, _FUNCDATA_StackObjects)
  1292  	if p != nil {
  1293  		n := *(*uintptr)(p)
  1294  		p = add(p, sys.PtrSize)
  1295  		*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
  1296  		// Note: the noescape above is needed to keep
  1297  		// getStackMap from "leaking param content:
  1298  		// frame".  That leak propagates up to getgcmask, then
  1299  		// GCMask, then verifyGCInfo, which converts the stack
  1300  		// gcinfo tests into heap gcinfo tests :(
  1301  	}
  1302  
  1303  	return
  1304  }
  1305  
  1306  // A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
  1307  // This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects.
  1308  type stackObjectRecord struct {
  1309  	// offset in frame
  1310  	// if negative, offset from varp
  1311  	// if non-negative, offset from argp
  1312  	off int
  1313  	typ *_type
  1314  }
  1315  
  1316  // This is exported as ABI0 via linkname so obj can call it.
  1317  //
  1318  //go:nosplit
  1319  //go:linkname morestackc
  1320  func morestackc() {
  1321  	throw("attempt to execute system stack code on user stack")
  1322  }
  1323  

View as plain text