...
Run Format

Source file src/runtime/stack.go

Documentation: runtime

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  /*
    14  Stack layout parameters.
    15  Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    16  
    17  The per-goroutine g->stackguard is set to point StackGuard bytes
    18  above the bottom of the stack.  Each function compares its stack
    19  pointer against g->stackguard to check for overflow.  To cut one
    20  instruction from the check sequence for functions with tiny frames,
    21  the stack is allowed to protrude StackSmall bytes below the stack
    22  guard.  Functions with large frames don't bother with the check and
    23  always call morestack.  The sequences are (for amd64, others are
    24  similar):
    25  
    26  	guard = g->stackguard
    27  	frame = function's stack frame size
    28  	argsize = size of function arguments (call + return)
    29  
    30  	stack frame size <= StackSmall:
    31  		CMPQ guard, SP
    32  		JHI 3(PC)
    33  		MOVQ m->morearg, $(argsize << 32)
    34  		CALL morestack(SB)
    35  
    36  	stack frame size > StackSmall but < StackBig
    37  		LEAQ (frame-StackSmall)(SP), R0
    38  		CMPQ guard, R0
    39  		JHI 3(PC)
    40  		MOVQ m->morearg, $(argsize << 32)
    41  		CALL morestack(SB)
    42  
    43  	stack frame size >= StackBig:
    44  		MOVQ m->morearg, $((argsize << 32) | frame)
    45  		CALL morestack(SB)
    46  
    47  The bottom StackGuard - StackSmall bytes are important: there has
    48  to be enough room to execute functions that refuse to check for
    49  stack overflow, either because they need to be adjacent to the
    50  actual caller's frame (deferproc) or because they handle the imminent
    51  stack overflow (morestack).
    52  
    53  For example, deferproc might call malloc, which does one of the
    54  above checks (without allocating a full frame), which might trigger
    55  a call to morestack.  This sequence needs to fit in the bottom
    56  section of the stack.  On amd64, morestack's frame is 40 bytes, and
    57  deferproc's frame is 56 bytes.  That fits well within the
    58  StackGuard - StackSmall bytes at the bottom.
    59  The linkers explore all possible call traces involving non-splitting
    60  functions to make sure that this limit cannot be violated.
    61  */
    62  
    63  const (
    64  	// StackSystem is a number of additional bytes to add
    65  	// to each stack below the usual guard area for OS-specific
    66  	// purposes like signal handling. Used on Windows, Plan 9,
    67  	// and iOS because they do not use a separate stack.
    68  	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
    69  
    70  	// The minimum size of stack used by Go code
    71  	_StackMin = 2048
    72  
    73  	// The minimum stack size to allocate.
    74  	// The hackery here rounds FixedStack0 up to a power of 2.
    75  	_FixedStack0 = _StackMin + _StackSystem
    76  	_FixedStack1 = _FixedStack0 - 1
    77  	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
    78  	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
    79  	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
    80  	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
    81  	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
    82  	_FixedStack  = _FixedStack6 + 1
    83  
    84  	// Functions that need frames bigger than this use an extra
    85  	// instruction to do the stack split check, to avoid overflow
    86  	// in case SP - framesize wraps below zero.
    87  	// This value can be no bigger than the size of the unmapped
    88  	// space at zero.
    89  	_StackBig = 4096
    90  
    91  	// The stack guard is a pointer this many bytes above the
    92  	// bottom of the stack.
    93  	_StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
    94  
    95  	// After a stack split check the SP is allowed to be this
    96  	// many bytes below the stack guard. This saves an instruction
    97  	// in the checking sequence for tiny frames.
    98  	_StackSmall = 128
    99  
   100  	// The maximum number of bytes that a chain of NOSPLIT
   101  	// functions can use.
   102  	_StackLimit = _StackGuard - _StackSystem - _StackSmall
   103  )
   104  
   105  const (
   106  	// stackDebug == 0: no logging
   107  	//            == 1: logging of per-stack operations
   108  	//            == 2: logging of per-frame operations
   109  	//            == 3: logging of per-word updates
   110  	//            == 4: logging of per-word reads
   111  	stackDebug       = 0
   112  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   113  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   114  	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   115  	stackNoCache     = 0 // disable per-P small stack caches
   116  
   117  	// check the BP links during traceback.
   118  	debugCheckBP = false
   119  )
   120  
   121  const (
   122  	uintptrMask = 1<<(8*sys.PtrSize) - 1
   123  
   124  	// Goroutine preemption request.
   125  	// Stored into g->stackguard0 to cause split stack check failure.
   126  	// Must be greater than any real sp.
   127  	// 0xfffffade in hex.
   128  	stackPreempt = uintptrMask & -1314
   129  
   130  	// Thread is forking.
   131  	// Stored into g->stackguard0 to cause split stack check failure.
   132  	// Must be greater than any real sp.
   133  	stackFork = uintptrMask & -1234
   134  )
   135  
   136  // Global pool of spans that have free stacks.
   137  // Stacks are assigned an order according to size.
   138  //     order = log_2(size/FixedStack)
   139  // There is a free list for each order.
   140  // TODO: one lock per order?
   141  var stackpool [_NumStackOrders]mSpanList
   142  var stackpoolmu mutex
   143  
   144  // Global pool of large stack spans.
   145  var stackLarge struct {
   146  	lock mutex
   147  	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
   148  }
   149  
   150  func stackinit() {
   151  	if _StackCacheSize&_PageMask != 0 {
   152  		throw("cache size must be a multiple of page size")
   153  	}
   154  	for i := range stackpool {
   155  		stackpool[i].init()
   156  	}
   157  	for i := range stackLarge.free {
   158  		stackLarge.free[i].init()
   159  	}
   160  }
   161  
   162  // stacklog2 returns ⌊log_2(n)⌋.
   163  func stacklog2(n uintptr) int {
   164  	log2 := 0
   165  	for n > 1 {
   166  		n >>= 1
   167  		log2++
   168  	}
   169  	return log2
   170  }
   171  
   172  // Allocates a stack from the free pool. Must be called with
   173  // stackpoolmu held.
   174  func stackpoolalloc(order uint8) gclinkptr {
   175  	list := &stackpool[order]
   176  	s := list.first
   177  	if s == nil {
   178  		// no free stacks. Allocate another span worth.
   179  		s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
   180  		if s == nil {
   181  			throw("out of memory")
   182  		}
   183  		if s.allocCount != 0 {
   184  			throw("bad allocCount")
   185  		}
   186  		if s.manualFreeList.ptr() != nil {
   187  			throw("bad manualFreeList")
   188  		}
   189  		osStackAlloc(s)
   190  		s.elemsize = _FixedStack << order
   191  		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
   192  			x := gclinkptr(s.base() + i)
   193  			x.ptr().next = s.manualFreeList
   194  			s.manualFreeList = x
   195  		}
   196  		list.insert(s)
   197  	}
   198  	x := s.manualFreeList
   199  	if x.ptr() == nil {
   200  		throw("span has no free stacks")
   201  	}
   202  	s.manualFreeList = x.ptr().next
   203  	s.allocCount++
   204  	if s.manualFreeList.ptr() == nil {
   205  		// all stacks in s are allocated.
   206  		list.remove(s)
   207  	}
   208  	return x
   209  }
   210  
   211  // Adds stack x to the free pool. Must be called with stackpoolmu held.
   212  func stackpoolfree(x gclinkptr, order uint8) {
   213  	s := spanOfUnchecked(uintptr(x))
   214  	if s.state != _MSpanManual {
   215  		throw("freeing stack not in a stack span")
   216  	}
   217  	if s.manualFreeList.ptr() == nil {
   218  		// s will now have a free stack
   219  		stackpool[order].insert(s)
   220  	}
   221  	x.ptr().next = s.manualFreeList
   222  	s.manualFreeList = x
   223  	s.allocCount--
   224  	if gcphase == _GCoff && s.allocCount == 0 {
   225  		// Span is completely free. Return it to the heap
   226  		// immediately if we're sweeping.
   227  		//
   228  		// If GC is active, we delay the free until the end of
   229  		// GC to avoid the following type of situation:
   230  		//
   231  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   232  		// 2) The stack that pointer points to is copied
   233  		// 3) The old stack is freed
   234  		// 4) The containing span is marked free
   235  		// 5) GC attempts to mark the SudoG.elem pointer. The
   236  		//    marking fails because the pointer looks like a
   237  		//    pointer into a free span.
   238  		//
   239  		// By not freeing, we prevent step #4 until GC is done.
   240  		stackpool[order].remove(s)
   241  		s.manualFreeList = 0
   242  		osStackFree(s)
   243  		mheap_.freeManual(s, &memstats.stacks_inuse)
   244  	}
   245  }
   246  
   247  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   248  // The pool is required to prevent unlimited growth of per-thread caches.
   249  //
   250  //go:systemstack
   251  func stackcacherefill(c *mcache, order uint8) {
   252  	if stackDebug >= 1 {
   253  		print("stackcacherefill order=", order, "\n")
   254  	}
   255  
   256  	// Grab some stacks from the global cache.
   257  	// Grab half of the allowed capacity (to prevent thrashing).
   258  	var list gclinkptr
   259  	var size uintptr
   260  	lock(&stackpoolmu)
   261  	for size < _StackCacheSize/2 {
   262  		x := stackpoolalloc(order)
   263  		x.ptr().next = list
   264  		list = x
   265  		size += _FixedStack << order
   266  	}
   267  	unlock(&stackpoolmu)
   268  	c.stackcache[order].list = list
   269  	c.stackcache[order].size = size
   270  }
   271  
   272  //go:systemstack
   273  func stackcacherelease(c *mcache, order uint8) {
   274  	if stackDebug >= 1 {
   275  		print("stackcacherelease order=", order, "\n")
   276  	}
   277  	x := c.stackcache[order].list
   278  	size := c.stackcache[order].size
   279  	lock(&stackpoolmu)
   280  	for size > _StackCacheSize/2 {
   281  		y := x.ptr().next
   282  		stackpoolfree(x, order)
   283  		x = y
   284  		size -= _FixedStack << order
   285  	}
   286  	unlock(&stackpoolmu)
   287  	c.stackcache[order].list = x
   288  	c.stackcache[order].size = size
   289  }
   290  
   291  //go:systemstack
   292  func stackcache_clear(c *mcache) {
   293  	if stackDebug >= 1 {
   294  		print("stackcache clear\n")
   295  	}
   296  	lock(&stackpoolmu)
   297  	for order := uint8(0); order < _NumStackOrders; order++ {
   298  		x := c.stackcache[order].list
   299  		for x.ptr() != nil {
   300  			y := x.ptr().next
   301  			stackpoolfree(x, order)
   302  			x = y
   303  		}
   304  		c.stackcache[order].list = 0
   305  		c.stackcache[order].size = 0
   306  	}
   307  	unlock(&stackpoolmu)
   308  }
   309  
   310  // stackalloc allocates an n byte stack.
   311  //
   312  // stackalloc must run on the system stack because it uses per-P
   313  // resources and must not split the stack.
   314  //
   315  //go:systemstack
   316  func stackalloc(n uint32) stack {
   317  	// Stackalloc must be called on scheduler stack, so that we
   318  	// never try to grow the stack during the code that stackalloc runs.
   319  	// Doing so would cause a deadlock (issue 1547).
   320  	thisg := getg()
   321  	if thisg != thisg.m.g0 {
   322  		throw("stackalloc not on scheduler stack")
   323  	}
   324  	if n&(n-1) != 0 {
   325  		throw("stack size not a power of 2")
   326  	}
   327  	if stackDebug >= 1 {
   328  		print("stackalloc ", n, "\n")
   329  	}
   330  
   331  	if debug.efence != 0 || stackFromSystem != 0 {
   332  		n = uint32(round(uintptr(n), physPageSize))
   333  		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
   334  		if v == nil {
   335  			throw("out of memory (stackalloc)")
   336  		}
   337  		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   338  	}
   339  
   340  	// Small stacks are allocated with a fixed-size free-list allocator.
   341  	// If we need a stack of a bigger size, we fall back on allocating
   342  	// a dedicated span.
   343  	var v unsafe.Pointer
   344  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   345  		order := uint8(0)
   346  		n2 := n
   347  		for n2 > _FixedStack {
   348  			order++
   349  			n2 >>= 1
   350  		}
   351  		var x gclinkptr
   352  		c := thisg.m.mcache
   353  		if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
   354  			// c == nil can happen in the guts of exitsyscall or
   355  			// procresize. Just get a stack from the global pool.
   356  			// Also don't touch stackcache during gc
   357  			// as it's flushed concurrently.
   358  			lock(&stackpoolmu)
   359  			x = stackpoolalloc(order)
   360  			unlock(&stackpoolmu)
   361  		} else {
   362  			x = c.stackcache[order].list
   363  			if x.ptr() == nil {
   364  				stackcacherefill(c, order)
   365  				x = c.stackcache[order].list
   366  			}
   367  			c.stackcache[order].list = x.ptr().next
   368  			c.stackcache[order].size -= uintptr(n)
   369  		}
   370  		v = unsafe.Pointer(x)
   371  	} else {
   372  		var s *mspan
   373  		npage := uintptr(n) >> _PageShift
   374  		log2npage := stacklog2(npage)
   375  
   376  		// Try to get a stack from the large stack cache.
   377  		lock(&stackLarge.lock)
   378  		if !stackLarge.free[log2npage].isEmpty() {
   379  			s = stackLarge.free[log2npage].first
   380  			stackLarge.free[log2npage].remove(s)
   381  		}
   382  		unlock(&stackLarge.lock)
   383  
   384  		if s == nil {
   385  			// Allocate a new stack from the heap.
   386  			s = mheap_.allocManual(npage, &memstats.stacks_inuse)
   387  			if s == nil {
   388  				throw("out of memory")
   389  			}
   390  			osStackAlloc(s)
   391  			s.elemsize = uintptr(n)
   392  		}
   393  		v = unsafe.Pointer(s.base())
   394  	}
   395  
   396  	if raceenabled {
   397  		racemalloc(v, uintptr(n))
   398  	}
   399  	if msanenabled {
   400  		msanmalloc(v, uintptr(n))
   401  	}
   402  	if stackDebug >= 1 {
   403  		print("  allocated ", v, "\n")
   404  	}
   405  	return stack{uintptr(v), uintptr(v) + uintptr(n)}
   406  }
   407  
   408  // stackfree frees an n byte stack allocation at stk.
   409  //
   410  // stackfree must run on the system stack because it uses per-P
   411  // resources and must not split the stack.
   412  //
   413  //go:systemstack
   414  func stackfree(stk stack) {
   415  	gp := getg()
   416  	v := unsafe.Pointer(stk.lo)
   417  	n := stk.hi - stk.lo
   418  	if n&(n-1) != 0 {
   419  		throw("stack not a power of 2")
   420  	}
   421  	if stk.lo+n < stk.hi {
   422  		throw("bad stack size")
   423  	}
   424  	if stackDebug >= 1 {
   425  		println("stackfree", v, n)
   426  		memclrNoHeapPointers(v, n) // for testing, clobber stack data
   427  	}
   428  	if debug.efence != 0 || stackFromSystem != 0 {
   429  		if debug.efence != 0 || stackFaultOnFree != 0 {
   430  			sysFault(v, n)
   431  		} else {
   432  			sysFree(v, n, &memstats.stacks_sys)
   433  		}
   434  		return
   435  	}
   436  	if msanenabled {
   437  		msanfree(v, n)
   438  	}
   439  	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   440  		order := uint8(0)
   441  		n2 := n
   442  		for n2 > _FixedStack {
   443  			order++
   444  			n2 >>= 1
   445  		}
   446  		x := gclinkptr(v)
   447  		c := gp.m.mcache
   448  		if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
   449  			lock(&stackpoolmu)
   450  			stackpoolfree(x, order)
   451  			unlock(&stackpoolmu)
   452  		} else {
   453  			if c.stackcache[order].size >= _StackCacheSize {
   454  				stackcacherelease(c, order)
   455  			}
   456  			x.ptr().next = c.stackcache[order].list
   457  			c.stackcache[order].list = x
   458  			c.stackcache[order].size += n
   459  		}
   460  	} else {
   461  		s := spanOfUnchecked(uintptr(v))
   462  		if s.state != _MSpanManual {
   463  			println(hex(s.base()), v)
   464  			throw("bad span state")
   465  		}
   466  		if gcphase == _GCoff {
   467  			// Free the stack immediately if we're
   468  			// sweeping.
   469  			osStackFree(s)
   470  			mheap_.freeManual(s, &memstats.stacks_inuse)
   471  		} else {
   472  			// If the GC is running, we can't return a
   473  			// stack span to the heap because it could be
   474  			// reused as a heap span, and this state
   475  			// change would race with GC. Add it to the
   476  			// large stack cache instead.
   477  			log2npage := stacklog2(s.npages)
   478  			lock(&stackLarge.lock)
   479  			stackLarge.free[log2npage].insert(s)
   480  			unlock(&stackLarge.lock)
   481  		}
   482  	}
   483  }
   484  
   485  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   486  
   487  var ptrnames = []string{
   488  	0: "scalar",
   489  	1: "ptr",
   490  }
   491  
   492  // Stack frame layout
   493  //
   494  // (x86)
   495  // +------------------+
   496  // | args from caller |
   497  // +------------------+ <- frame->argp
   498  // |  return address  |
   499  // +------------------+
   500  // |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
   501  // +------------------+ <- frame->varp
   502  // |     locals       |
   503  // +------------------+
   504  // |  args to callee  |
   505  // +------------------+ <- frame->sp
   506  //
   507  // (arm)
   508  // +------------------+
   509  // | args from caller |
   510  // +------------------+ <- frame->argp
   511  // | caller's retaddr |
   512  // +------------------+ <- frame->varp
   513  // |     locals       |
   514  // +------------------+
   515  // |  args to callee  |
   516  // +------------------+
   517  // |  return address  |
   518  // +------------------+ <- frame->sp
   519  
   520  type adjustinfo struct {
   521  	old   stack
   522  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   523  	cache pcvalueCache
   524  
   525  	// sghi is the highest sudog.elem on the stack.
   526  	sghi uintptr
   527  }
   528  
   529  // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   530  // If so, it rewrites *vpp to point into the new stack.
   531  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   532  	pp := (*uintptr)(vpp)
   533  	p := *pp
   534  	if stackDebug >= 4 {
   535  		print("        ", pp, ":", hex(p), "\n")
   536  	}
   537  	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
   538  		*pp = p + adjinfo.delta
   539  		if stackDebug >= 3 {
   540  			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
   541  		}
   542  	}
   543  }
   544  
   545  // Information from the compiler about the layout of stack frames.
   546  type bitvector struct {
   547  	n        int32 // # of bits
   548  	bytedata *uint8
   549  }
   550  
   551  // ptrbit returns the i'th bit in bv.
   552  // ptrbit is less efficient than iterating directly over bitvector bits,
   553  // and should only be used in non-performance-critical code.
   554  // See adjustpointers for an example of a high-efficiency walk of a bitvector.
   555  func (bv *bitvector) ptrbit(i uintptr) uint8 {
   556  	b := *(addb(bv.bytedata, i/8))
   557  	return (b >> (i % 8)) & 1
   558  }
   559  
   560  // bv describes the memory starting at address scanp.
   561  // Adjust any pointers contained therein.
   562  func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
   563  	minp := adjinfo.old.lo
   564  	maxp := adjinfo.old.hi
   565  	delta := adjinfo.delta
   566  	num := uintptr(bv.n)
   567  	// If this frame might contain channel receive slots, use CAS
   568  	// to adjust pointers. If the slot hasn't been received into
   569  	// yet, it may contain stack pointers and a concurrent send
   570  	// could race with adjusting those pointers. (The sent value
   571  	// itself can never contain stack pointers.)
   572  	useCAS := uintptr(scanp) < adjinfo.sghi
   573  	for i := uintptr(0); i < num; i += 8 {
   574  		if stackDebug >= 4 {
   575  			for j := uintptr(0); j < 8; j++ {
   576  				print("        ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
   577  			}
   578  		}
   579  		b := *(addb(bv.bytedata, i/8))
   580  		for b != 0 {
   581  			j := uintptr(sys.Ctz8(b))
   582  			b &= b - 1
   583  			pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
   584  		retry:
   585  			p := *pp
   586  			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
   587  				// Looks like a junk value in a pointer slot.
   588  				// Live analysis wrong?
   589  				getg().m.traceback = 2
   590  				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   591  				throw("invalid pointer found on stack")
   592  			}
   593  			if minp <= p && p < maxp {
   594  				if stackDebug >= 3 {
   595  					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
   596  				}
   597  				if useCAS {
   598  					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
   599  					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
   600  						goto retry
   601  					}
   602  				} else {
   603  					*pp = p + delta
   604  				}
   605  			}
   606  		}
   607  	}
   608  }
   609  
   610  // Note: the argument/return area is adjusted by the callee.
   611  func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
   612  	adjinfo := (*adjustinfo)(arg)
   613  	if frame.continpc == 0 {
   614  		// Frame is dead.
   615  		return true
   616  	}
   617  	f := frame.fn
   618  	if stackDebug >= 2 {
   619  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   620  	}
   621  	if f.funcID == funcID_systemstack_switch {
   622  		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
   623  		// We will allow it to be copied even though we don't
   624  		// have full GC info for it (because it is written in asm).
   625  		return true
   626  	}
   627  
   628  	locals, args := getStackMap(frame, &adjinfo.cache, true)
   629  
   630  	// Adjust local variables if stack frame has been allocated.
   631  	if locals.n > 0 {
   632  		size := uintptr(locals.n) * sys.PtrSize
   633  		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
   634  	}
   635  
   636  	// Adjust saved base pointer if there is one.
   637  	if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
   638  		if !framepointer_enabled {
   639  			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
   640  			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
   641  			throw("bad frame layout")
   642  		}
   643  		if stackDebug >= 3 {
   644  			print("      saved bp\n")
   645  		}
   646  		if debugCheckBP {
   647  			// Frame pointers should always point to the next higher frame on
   648  			// the Go stack (or be nil, for the top frame on the stack).
   649  			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
   650  			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   651  				println("runtime: found invalid frame pointer")
   652  				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   653  				throw("bad frame pointer")
   654  			}
   655  		}
   656  		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   657  	}
   658  
   659  	// Adjust arguments.
   660  	if args.n > 0 {
   661  		if stackDebug >= 3 {
   662  			print("      args\n")
   663  		}
   664  		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
   665  	}
   666  	return true
   667  }
   668  
   669  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   670  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   671  	if !framepointer_enabled {
   672  		return
   673  	}
   674  	if debugCheckBP {
   675  		bp := gp.sched.bp
   676  		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   677  			println("runtime: found invalid top frame pointer")
   678  			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   679  			throw("bad top frame pointer")
   680  		}
   681  	}
   682  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
   683  }
   684  
   685  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   686  	// Adjust defer argument blocks the same way we adjust active stack frames.
   687  	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
   688  
   689  	// Adjust pointers in the Defer structs.
   690  	// Defer structs themselves are never on the stack.
   691  	for d := gp._defer; d != nil; d = d.link {
   692  		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   693  		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   694  		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
   695  	}
   696  }
   697  
   698  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   699  	// Panics are on stack and already adjusted.
   700  	// Update pointer to head of list in G.
   701  	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   702  }
   703  
   704  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   705  	// the data elements pointed to by a SudoG structure
   706  	// might be in the stack.
   707  	for s := gp.waiting; s != nil; s = s.waitlink {
   708  		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   709  	}
   710  }
   711  
   712  func fillstack(stk stack, b byte) {
   713  	for p := stk.lo; p < stk.hi; p++ {
   714  		*(*byte)(unsafe.Pointer(p)) = b
   715  	}
   716  }
   717  
   718  func findsghi(gp *g, stk stack) uintptr {
   719  	var sghi uintptr
   720  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   721  		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
   722  		if stk.lo <= p && p < stk.hi && p > sghi {
   723  			sghi = p
   724  		}
   725  	}
   726  	return sghi
   727  }
   728  
   729  // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
   730  // stack they refer to while synchronizing with concurrent channel
   731  // operations. It returns the number of bytes of stack copied.
   732  func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
   733  	if gp.waiting == nil {
   734  		return 0
   735  	}
   736  
   737  	// Lock channels to prevent concurrent send/receive.
   738  	// It's important that we *only* do this for async
   739  	// copystack; otherwise, gp may be in the middle of
   740  	// putting itself on wait queues and this would
   741  	// self-deadlock.
   742  	var lastc *hchan
   743  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   744  		if sg.c != lastc {
   745  			lock(&sg.c.lock)
   746  		}
   747  		lastc = sg.c
   748  	}
   749  
   750  	// Adjust sudogs.
   751  	adjustsudogs(gp, adjinfo)
   752  
   753  	// Copy the part of the stack the sudogs point in to
   754  	// while holding the lock to prevent races on
   755  	// send/receive slots.
   756  	var sgsize uintptr
   757  	if adjinfo.sghi != 0 {
   758  		oldBot := adjinfo.old.hi - used
   759  		newBot := oldBot + adjinfo.delta
   760  		sgsize = adjinfo.sghi - oldBot
   761  		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
   762  	}
   763  
   764  	// Unlock channels.
   765  	lastc = nil
   766  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   767  		if sg.c != lastc {
   768  			unlock(&sg.c.lock)
   769  		}
   770  		lastc = sg.c
   771  	}
   772  
   773  	return sgsize
   774  }
   775  
   776  // Copies gp's stack to a new stack of a different size.
   777  // Caller must have changed gp status to Gcopystack.
   778  //
   779  // If sync is true, this is a self-triggered stack growth and, in
   780  // particular, no other G may be writing to gp's stack (e.g., via a
   781  // channel operation). If sync is false, copystack protects against
   782  // concurrent channel operations.
   783  func copystack(gp *g, newsize uintptr, sync bool) {
   784  	if gp.syscallsp != 0 {
   785  		throw("stack growth not allowed in system call")
   786  	}
   787  	old := gp.stack
   788  	if old.lo == 0 {
   789  		throw("nil stackbase")
   790  	}
   791  	used := old.hi - gp.sched.sp
   792  
   793  	// allocate new stack
   794  	new := stackalloc(uint32(newsize))
   795  	if stackPoisonCopy != 0 {
   796  		fillstack(new, 0xfd)
   797  	}
   798  	if stackDebug >= 1 {
   799  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   800  	}
   801  
   802  	// Compute adjustment.
   803  	var adjinfo adjustinfo
   804  	adjinfo.old = old
   805  	adjinfo.delta = new.hi - old.hi
   806  
   807  	// Adjust sudogs, synchronizing with channel ops if necessary.
   808  	ncopy := used
   809  	if sync {
   810  		adjustsudogs(gp, &adjinfo)
   811  	} else {
   812  		// sudogs can point in to the stack. During concurrent
   813  		// shrinking, these areas may be written to. Find the
   814  		// highest such pointer so we can handle everything
   815  		// there and below carefully. (This shouldn't be far
   816  		// from the bottom of the stack, so there's little
   817  		// cost in handling everything below it carefully.)
   818  		adjinfo.sghi = findsghi(gp, old)
   819  
   820  		// Synchronize with channel ops and copy the part of
   821  		// the stack they may interact with.
   822  		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
   823  	}
   824  
   825  	// Copy the stack (or the rest of it) to the new location
   826  	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
   827  
   828  	// Adjust remaining structures that have pointers into stacks.
   829  	// We have to do most of these before we traceback the new
   830  	// stack because gentraceback uses them.
   831  	adjustctxt(gp, &adjinfo)
   832  	adjustdefers(gp, &adjinfo)
   833  	adjustpanics(gp, &adjinfo)
   834  	if adjinfo.sghi != 0 {
   835  		adjinfo.sghi += adjinfo.delta
   836  	}
   837  
   838  	// Swap out old stack for new one
   839  	gp.stack = new
   840  	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
   841  	gp.sched.sp = new.hi - used
   842  	gp.stktopsp += adjinfo.delta
   843  
   844  	// Adjust pointers in the new stack.
   845  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
   846  
   847  	// free old stack
   848  	if stackPoisonCopy != 0 {
   849  		fillstack(old, 0xfc)
   850  	}
   851  	stackfree(old)
   852  }
   853  
   854  // round x up to a power of 2.
   855  func round2(x int32) int32 {
   856  	s := uint(0)
   857  	for 1<<s < x {
   858  		s++
   859  	}
   860  	return 1 << s
   861  }
   862  
   863  // Called from runtime·morestack when more stack is needed.
   864  // Allocate larger stack and relocate to new stack.
   865  // Stack growth is multiplicative, for constant amortized cost.
   866  //
   867  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   868  // If the GC is trying to stop this g then it will set preemptscan to true.
   869  //
   870  // This must be nowritebarrierrec because it can be called as part of
   871  // stack growth from other nowritebarrierrec functions, but the
   872  // compiler doesn't check this.
   873  //
   874  //go:nowritebarrierrec
   875  func newstack() {
   876  	thisg := getg()
   877  	// TODO: double check all gp. shouldn't be getg().
   878  	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   879  		throw("stack growth after fork")
   880  	}
   881  	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   882  		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   883  		morebuf := thisg.m.morebuf
   884  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   885  		throw("runtime: wrong goroutine in newstack")
   886  	}
   887  
   888  	gp := thisg.m.curg
   889  
   890  	if thisg.m.curg.throwsplit {
   891  		// Update syscallsp, syscallpc in case traceback uses them.
   892  		morebuf := thisg.m.morebuf
   893  		gp.syscallsp = morebuf.sp
   894  		gp.syscallpc = morebuf.pc
   895  		pcname, pcoff := "(unknown)", uintptr(0)
   896  		f := findfunc(gp.sched.pc)
   897  		if f.valid() {
   898  			pcname = funcname(f)
   899  			pcoff = gp.sched.pc - f.entry
   900  		}
   901  		print("runtime: newstack at ", pcname, "+", hex(pcoff),
   902  			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   903  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   904  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   905  
   906  		thisg.m.traceback = 2 // Include runtime frames
   907  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
   908  		throw("runtime: stack split at bad time")
   909  	}
   910  
   911  	morebuf := thisg.m.morebuf
   912  	thisg.m.morebuf.pc = 0
   913  	thisg.m.morebuf.lr = 0
   914  	thisg.m.morebuf.sp = 0
   915  	thisg.m.morebuf.g = 0
   916  
   917  	// NOTE: stackguard0 may change underfoot, if another thread
   918  	// is about to try to preempt gp. Read it just once and use that same
   919  	// value now and below.
   920  	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
   921  
   922  	// Be conservative about where we preempt.
   923  	// We are interested in preempting user Go code, not runtime code.
   924  	// If we're holding locks, mallocing, or preemption is disabled, don't
   925  	// preempt.
   926  	// This check is very early in newstack so that even the status change
   927  	// from Grunning to Gwaiting and back doesn't happen in this case.
   928  	// That status change by itself can be viewed as a small preemption,
   929  	// because the GC might change Gwaiting to Gscanwaiting, and then
   930  	// this goroutine has to wait for the GC to finish before continuing.
   931  	// If the GC is in some way dependent on this goroutine (for example,
   932  	// it needs a lock held by the goroutine), that small preemption turns
   933  	// into a real deadlock.
   934  	if preempt {
   935  		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
   936  			// Let the goroutine keep running for now.
   937  			// gp->preempt is set, so it will be preempted next time.
   938  			gp.stackguard0 = gp.stack.lo + _StackGuard
   939  			gogo(&gp.sched) // never return
   940  		}
   941  	}
   942  
   943  	if gp.stack.lo == 0 {
   944  		throw("missing stack in newstack")
   945  	}
   946  	sp := gp.sched.sp
   947  	if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
   948  		// The call to morestack cost a word.
   949  		sp -= sys.PtrSize
   950  	}
   951  	if stackDebug >= 1 || sp < gp.stack.lo {
   952  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   953  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   954  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   955  	}
   956  	if sp < gp.stack.lo {
   957  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
   958  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
   959  		throw("runtime: split stack overflow")
   960  	}
   961  
   962  	if preempt {
   963  		if gp == thisg.m.g0 {
   964  			throw("runtime: preempt g0")
   965  		}
   966  		if thisg.m.p == 0 && thisg.m.locks == 0 {
   967  			throw("runtime: g is running but p is not")
   968  		}
   969  		// Synchronize with scang.
   970  		casgstatus(gp, _Grunning, _Gwaiting)
   971  		if gp.preemptscan {
   972  			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
   973  				// Likely to be racing with the GC as
   974  				// it sees a _Gwaiting and does the
   975  				// stack scan. If so, gcworkdone will
   976  				// be set and gcphasework will simply
   977  				// return.
   978  			}
   979  			if !gp.gcscandone {
   980  				// gcw is safe because we're on the
   981  				// system stack.
   982  				gcw := &gp.m.p.ptr().gcw
   983  				scanstack(gp, gcw)
   984  				if gcBlackenPromptly {
   985  					gcw.dispose()
   986  				}
   987  				gp.gcscandone = true
   988  			}
   989  			gp.preemptscan = false
   990  			gp.preempt = false
   991  			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
   992  			// This clears gcscanvalid.
   993  			casgstatus(gp, _Gwaiting, _Grunning)
   994  			gp.stackguard0 = gp.stack.lo + _StackGuard
   995  			gogo(&gp.sched) // never return
   996  		}
   997  
   998  		// Act like goroutine called runtime.Gosched.
   999  		casgstatus(gp, _Gwaiting, _Grunning)
  1000  		gopreempt_m(gp) // never return
  1001  	}
  1002  
  1003  	// Allocate a bigger segment and move the stack.
  1004  	oldsize := gp.stack.hi - gp.stack.lo
  1005  	newsize := oldsize * 2
  1006  	if newsize > maxstacksize {
  1007  		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
  1008  		throw("stack overflow")
  1009  	}
  1010  
  1011  	// The goroutine must be executing in order to call newstack,
  1012  	// so it must be Grunning (or Gscanrunning).
  1013  	casgstatus(gp, _Grunning, _Gcopystack)
  1014  
  1015  	// The concurrent GC will not scan the stack while we are doing the copy since
  1016  	// the gp is in a Gcopystack status.
  1017  	copystack(gp, newsize, true)
  1018  	if stackDebug >= 1 {
  1019  		print("stack grow done\n")
  1020  	}
  1021  	casgstatus(gp, _Gcopystack, _Grunning)
  1022  	gogo(&gp.sched)
  1023  }
  1024  
  1025  //go:nosplit
  1026  func nilfunc() {
  1027  	*(*uint8)(nil) = 0
  1028  }
  1029  
  1030  // adjust Gobuf as if it executed a call to fn
  1031  // and then did an immediate gosave.
  1032  func gostartcallfn(gobuf *gobuf, fv *funcval) {
  1033  	var fn unsafe.Pointer
  1034  	if fv != nil {
  1035  		fn = unsafe.Pointer(fv.fn)
  1036  	} else {
  1037  		fn = unsafe.Pointer(funcPC(nilfunc))
  1038  	}
  1039  	gostartcall(gobuf, fn, unsafe.Pointer(fv))
  1040  }
  1041  
  1042  // Maybe shrink the stack being used by gp.
  1043  // Called at garbage collection time.
  1044  // gp must be stopped, but the world need not be.
  1045  func shrinkstack(gp *g) {
  1046  	gstatus := readgstatus(gp)
  1047  	if gstatus&^_Gscan == _Gdead {
  1048  		if gp.stack.lo != 0 {
  1049  			// Free whole stack - it will get reallocated
  1050  			// if G is used again.
  1051  			stackfree(gp.stack)
  1052  			gp.stack.lo = 0
  1053  			gp.stack.hi = 0
  1054  		}
  1055  		return
  1056  	}
  1057  	if gp.stack.lo == 0 {
  1058  		throw("missing stack in shrinkstack")
  1059  	}
  1060  	if gstatus&_Gscan == 0 {
  1061  		throw("bad status in shrinkstack")
  1062  	}
  1063  
  1064  	if debug.gcshrinkstackoff > 0 {
  1065  		return
  1066  	}
  1067  	f := findfunc(gp.startpc)
  1068  	if f.valid() && f.funcID == funcID_gcBgMarkWorker {
  1069  		// We're not allowed to shrink the gcBgMarkWorker
  1070  		// stack (see gcBgMarkWorker for explanation).
  1071  		return
  1072  	}
  1073  
  1074  	oldsize := gp.stack.hi - gp.stack.lo
  1075  	newsize := oldsize / 2
  1076  	// Don't shrink the allocation below the minimum-sized stack
  1077  	// allocation.
  1078  	if newsize < _FixedStack {
  1079  		return
  1080  	}
  1081  	// Compute how much of the stack is currently in use and only
  1082  	// shrink the stack if gp is using less than a quarter of its
  1083  	// current stack. The currently used stack includes everything
  1084  	// down to the SP plus the stack guard space that ensures
  1085  	// there's room for nosplit functions.
  1086  	avail := gp.stack.hi - gp.stack.lo
  1087  	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
  1088  		return
  1089  	}
  1090  
  1091  	// We can't copy the stack if we're in a syscall.
  1092  	// The syscall might have pointers into the stack.
  1093  	if gp.syscallsp != 0 {
  1094  		return
  1095  	}
  1096  	if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
  1097  		return
  1098  	}
  1099  
  1100  	if stackDebug > 0 {
  1101  		print("shrinking stack ", oldsize, "->", newsize, "\n")
  1102  	}
  1103  
  1104  	copystack(gp, newsize, false)
  1105  }
  1106  
  1107  // freeStackSpans frees unused stack spans at the end of GC.
  1108  func freeStackSpans() {
  1109  	lock(&stackpoolmu)
  1110  
  1111  	// Scan stack pools for empty stack spans.
  1112  	for order := range stackpool {
  1113  		list := &stackpool[order]
  1114  		for s := list.first; s != nil; {
  1115  			next := s.next
  1116  			if s.allocCount == 0 {
  1117  				list.remove(s)
  1118  				s.manualFreeList = 0
  1119  				osStackFree(s)
  1120  				mheap_.freeManual(s, &memstats.stacks_inuse)
  1121  			}
  1122  			s = next
  1123  		}
  1124  	}
  1125  
  1126  	unlock(&stackpoolmu)
  1127  
  1128  	// Free large stack spans.
  1129  	lock(&stackLarge.lock)
  1130  	for i := range stackLarge.free {
  1131  		for s := stackLarge.free[i].first; s != nil; {
  1132  			next := s.next
  1133  			stackLarge.free[i].remove(s)
  1134  			osStackFree(s)
  1135  			mheap_.freeManual(s, &memstats.stacks_inuse)
  1136  			s = next
  1137  		}
  1138  	}
  1139  	unlock(&stackLarge.lock)
  1140  }
  1141  
  1142  // getStackMap returns the locals and arguments live pointer maps for
  1143  // frame.
  1144  func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector) {
  1145  	targetpc := frame.continpc
  1146  	if targetpc == 0 {
  1147  		// Frame is dead. Return empty bitvectors.
  1148  		return
  1149  	}
  1150  
  1151  	f := frame.fn
  1152  	pcdata := int32(-1)
  1153  	if targetpc != f.entry {
  1154  		// Back up to the CALL. If we're at the function entry
  1155  		// point, we want to use the entry map (-1), even if
  1156  		// the first instruction of the function changes the
  1157  		// stack map.
  1158  		targetpc--
  1159  		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
  1160  	}
  1161  	if pcdata == -1 {
  1162  		// We do not have a valid pcdata value but there might be a
  1163  		// stackmap for this function. It is likely that we are looking
  1164  		// at the function prologue, assume so and hope for the best.
  1165  		pcdata = 0
  1166  	}
  1167  
  1168  	// Local variables.
  1169  	size := frame.varp - frame.sp
  1170  	var minsize uintptr
  1171  	switch sys.ArchFamily {
  1172  	case sys.ARM64:
  1173  		minsize = sys.SpAlign
  1174  	default:
  1175  		minsize = sys.MinFrameSize
  1176  	}
  1177  	if size > minsize {
  1178  		var stkmap *stackmap
  1179  		stackid := pcdata
  1180  		if f.funcID != funcID_debugCallV1 {
  1181  			stkmap = (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  1182  		} else {
  1183  			// debugCallV1's stack map is the register map
  1184  			// at its call site.
  1185  			callerPC := frame.lr
  1186  			caller := findfunc(callerPC)
  1187  			if !caller.valid() {
  1188  				println("runtime: debugCallV1 called by unknown caller", hex(callerPC))
  1189  				throw("bad debugCallV1")
  1190  			}
  1191  			stackid = int32(-1)
  1192  			if callerPC != caller.entry {
  1193  				callerPC--
  1194  				stackid = pcdatavalue(caller, _PCDATA_RegMapIndex, callerPC, cache)
  1195  			}
  1196  			if stackid == -1 {
  1197  				stackid = 0 // in prologue
  1198  			}
  1199  			stkmap = (*stackmap)(funcdata(caller, _FUNCDATA_RegPointerMaps))
  1200  		}
  1201  		if stkmap == nil || stkmap.n <= 0 {
  1202  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
  1203  			throw("missing stackmap")
  1204  		}
  1205  		// If nbit == 0, there's no work to do.
  1206  		if stkmap.nbit > 0 {
  1207  			if stackid < 0 || stackid >= stkmap.n {
  1208  				// don't know where we are
  1209  				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1210  				throw("bad symbol table")
  1211  			}
  1212  			locals = stackmapdata(stkmap, stackid)
  1213  			if stackDebug >= 3 && debug {
  1214  				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
  1215  			}
  1216  		} else if stackDebug >= 3 && debug {
  1217  			print("      no locals to adjust\n")
  1218  		}
  1219  	}
  1220  
  1221  	// Arguments.
  1222  	if frame.arglen > 0 {
  1223  		if frame.argmap != nil {
  1224  			args = *frame.argmap
  1225  		} else {
  1226  			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
  1227  			if stackmap == nil || stackmap.n <= 0 {
  1228  				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
  1229  				throw("missing stackmap")
  1230  			}
  1231  			if pcdata < 0 || pcdata >= stackmap.n {
  1232  				// don't know where we are
  1233  				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1234  				throw("bad symbol table")
  1235  			}
  1236  			if stackmap.nbit > 0 {
  1237  				args = stackmapdata(stackmap, pcdata)
  1238  			}
  1239  		}
  1240  	}
  1241  	return
  1242  }
  1243  
  1244  //go:nosplit
  1245  func morestackc() {
  1246  	throw("attempt to execute system stack code on user stack")
  1247  }
  1248  

View as plain text