Source file src/runtime/panic.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  // throwType indicates the current type of ongoing throw, which affects the
    16  // amount of detail printed to stderr. Higher values include more detail.
    17  type throwType uint32
    18  
    19  const (
    20  	// throwTypeNone means that we are not throwing.
    21  	throwTypeNone throwType = iota
    22  
    23  	// throwTypeUser is a throw due to a problem with the application.
    24  	//
    25  	// These throws do not include runtime frames, system goroutines, or
    26  	// frame metadata.
    27  	throwTypeUser
    28  
    29  	// throwTypeRuntime is a throw due to a problem with Go itself.
    30  	//
    31  	// These throws include as much information as possible to aid in
    32  	// debugging the runtime, including runtime frames, system goroutines,
    33  	// and frame metadata.
    34  	throwTypeRuntime
    35  )
    36  
    37  // We have two different ways of doing defers. The older way involves creating a
    38  // defer record at the time that a defer statement is executing and adding it to a
    39  // defer chain. This chain is inspected by the deferreturn call at all function
    40  // exits in order to run the appropriate defer calls. A cheaper way (which we call
    41  // open-coded defers) is used for functions in which no defer statements occur in
    42  // loops. In that case, we simply store the defer function/arg information into
    43  // specific stack slots at the point of each defer statement, as well as setting a
    44  // bit in a bitmask. At each function exit, we add inline code to directly make
    45  // the appropriate defer calls based on the bitmask and fn/arg information stored
    46  // on the stack. During panic/Goexit processing, the appropriate defer calls are
    47  // made using extra funcdata info that indicates the exact stack slots that
    48  // contain the bitmask and defer fn/args.
    49  
    50  // Check to make sure we can really generate a panic. If the panic
    51  // was generated from the runtime, or from inside malloc, then convert
    52  // to a throw of msg.
    53  // pc should be the program counter of the compiler-generated code that
    54  // triggered this panic.
    55  func panicCheck1(pc uintptr, msg string) {
    56  	if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
    57  		// Note: wasm can't tail call, so we can't get the original caller's pc.
    58  		throw(msg)
    59  	}
    60  	// TODO: is this redundant? How could we be in malloc
    61  	// but not in the runtime? runtime/internal/*, maybe?
    62  	gp := getg()
    63  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    64  		throw(msg)
    65  	}
    66  }
    67  
    68  // Same as above, but calling from the runtime is allowed.
    69  //
    70  // Using this function is necessary for any panic that may be
    71  // generated by runtime.sigpanic, since those are always called by the
    72  // runtime.
    73  func panicCheck2(err string) {
    74  	// panic allocates, so to avoid recursive malloc, turn panics
    75  	// during malloc into throws.
    76  	gp := getg()
    77  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    78  		throw(err)
    79  	}
    80  }
    81  
    82  // Many of the following panic entry-points turn into throws when they
    83  // happen in various runtime contexts. These should never happen in
    84  // the runtime, and if they do, they indicate a serious issue and
    85  // should not be caught by user code.
    86  //
    87  // The panic{Index,Slice,divide,shift} functions are called by
    88  // code generated by the compiler for out of bounds index expressions,
    89  // out of bounds slice expressions, division by zero, and shift by negative.
    90  // The panicdivide (again), panicoverflow, panicfloat, and panicmem
    91  // functions are called by the signal handler when a signal occurs
    92  // indicating the respective problem.
    93  //
    94  // Since panic{Index,Slice,shift} are never called directly, and
    95  // since the runtime package should never have an out of bounds slice
    96  // or array reference or negative shift, if we see those functions called from the
    97  // runtime package we turn the panic into a throw. That will dump the
    98  // entire runtime stack for easier debugging.
    99  //
   100  // The entry points called by the signal handler will be called from
   101  // runtime.sigpanic, so we can't disallow calls from the runtime to
   102  // these (they always look like they're called from the runtime).
   103  // Hence, for these, we just check for clearly bad runtime conditions.
   104  //
   105  // The panic{Index,Slice} functions are implemented in assembly and tail call
   106  // to the goPanic{Index,Slice} functions below. This is done so we can use
   107  // a space-minimal register calling convention.
   108  
   109  // failures in the comparisons for s[x], 0 <= x < y (y == len(s))
   110  //
   111  //go:yeswritebarrierrec
   112  func goPanicIndex(x int, y int) {
   113  	panicCheck1(getcallerpc(), "index out of range")
   114  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
   115  }
   116  
   117  //go:yeswritebarrierrec
   118  func goPanicIndexU(x uint, y int) {
   119  	panicCheck1(getcallerpc(), "index out of range")
   120  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
   121  }
   122  
   123  // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
   124  //
   125  //go:yeswritebarrierrec
   126  func goPanicSliceAlen(x int, y int) {
   127  	panicCheck1(getcallerpc(), "slice bounds out of range")
   128  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
   129  }
   130  
   131  //go:yeswritebarrierrec
   132  func goPanicSliceAlenU(x uint, y int) {
   133  	panicCheck1(getcallerpc(), "slice bounds out of range")
   134  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
   135  }
   136  
   137  //go:yeswritebarrierrec
   138  func goPanicSliceAcap(x int, y int) {
   139  	panicCheck1(getcallerpc(), "slice bounds out of range")
   140  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
   141  }
   142  
   143  //go:yeswritebarrierrec
   144  func goPanicSliceAcapU(x uint, y int) {
   145  	panicCheck1(getcallerpc(), "slice bounds out of range")
   146  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
   147  }
   148  
   149  // failures in the comparisons for s[x:y], 0 <= x <= y
   150  //
   151  //go:yeswritebarrierrec
   152  func goPanicSliceB(x int, y int) {
   153  	panicCheck1(getcallerpc(), "slice bounds out of range")
   154  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
   155  }
   156  
   157  //go:yeswritebarrierrec
   158  func goPanicSliceBU(x uint, y int) {
   159  	panicCheck1(getcallerpc(), "slice bounds out of range")
   160  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
   161  }
   162  
   163  // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
   164  func goPanicSlice3Alen(x int, y int) {
   165  	panicCheck1(getcallerpc(), "slice bounds out of range")
   166  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
   167  }
   168  func goPanicSlice3AlenU(x uint, y int) {
   169  	panicCheck1(getcallerpc(), "slice bounds out of range")
   170  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
   171  }
   172  func goPanicSlice3Acap(x int, y int) {
   173  	panicCheck1(getcallerpc(), "slice bounds out of range")
   174  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
   175  }
   176  func goPanicSlice3AcapU(x uint, y int) {
   177  	panicCheck1(getcallerpc(), "slice bounds out of range")
   178  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
   179  }
   180  
   181  // failures in the comparisons for s[:x:y], 0 <= x <= y
   182  func goPanicSlice3B(x int, y int) {
   183  	panicCheck1(getcallerpc(), "slice bounds out of range")
   184  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
   185  }
   186  func goPanicSlice3BU(x uint, y int) {
   187  	panicCheck1(getcallerpc(), "slice bounds out of range")
   188  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
   189  }
   190  
   191  // failures in the comparisons for s[x:y:], 0 <= x <= y
   192  func goPanicSlice3C(x int, y int) {
   193  	panicCheck1(getcallerpc(), "slice bounds out of range")
   194  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
   195  }
   196  func goPanicSlice3CU(x uint, y int) {
   197  	panicCheck1(getcallerpc(), "slice bounds out of range")
   198  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
   199  }
   200  
   201  // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
   202  func goPanicSliceConvert(x int, y int) {
   203  	panicCheck1(getcallerpc(), "slice length too short to convert to array or pointer to array")
   204  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
   205  }
   206  
   207  // Implemented in assembly, as they take arguments in registers.
   208  // Declared here to mark them as ABIInternal.
   209  func panicIndex(x int, y int)
   210  func panicIndexU(x uint, y int)
   211  func panicSliceAlen(x int, y int)
   212  func panicSliceAlenU(x uint, y int)
   213  func panicSliceAcap(x int, y int)
   214  func panicSliceAcapU(x uint, y int)
   215  func panicSliceB(x int, y int)
   216  func panicSliceBU(x uint, y int)
   217  func panicSlice3Alen(x int, y int)
   218  func panicSlice3AlenU(x uint, y int)
   219  func panicSlice3Acap(x int, y int)
   220  func panicSlice3AcapU(x uint, y int)
   221  func panicSlice3B(x int, y int)
   222  func panicSlice3BU(x uint, y int)
   223  func panicSlice3C(x int, y int)
   224  func panicSlice3CU(x uint, y int)
   225  func panicSliceConvert(x int, y int)
   226  
   227  var shiftError = error(errorString("negative shift amount"))
   228  
   229  //go:yeswritebarrierrec
   230  func panicshift() {
   231  	panicCheck1(getcallerpc(), "negative shift amount")
   232  	panic(shiftError)
   233  }
   234  
   235  var divideError = error(errorString("integer divide by zero"))
   236  
   237  //go:yeswritebarrierrec
   238  func panicdivide() {
   239  	panicCheck2("integer divide by zero")
   240  	panic(divideError)
   241  }
   242  
   243  var overflowError = error(errorString("integer overflow"))
   244  
   245  func panicoverflow() {
   246  	panicCheck2("integer overflow")
   247  	panic(overflowError)
   248  }
   249  
   250  var floatError = error(errorString("floating point error"))
   251  
   252  func panicfloat() {
   253  	panicCheck2("floating point error")
   254  	panic(floatError)
   255  }
   256  
   257  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
   258  
   259  func panicmem() {
   260  	panicCheck2("invalid memory address or nil pointer dereference")
   261  	panic(memoryError)
   262  }
   263  
   264  func panicmemAddr(addr uintptr) {
   265  	panicCheck2("invalid memory address or nil pointer dereference")
   266  	panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
   267  }
   268  
   269  // Create a new deferred function fn, which has no arguments and results.
   270  // The compiler turns a defer statement into a call to this.
   271  func deferproc(fn func()) {
   272  	gp := getg()
   273  	if gp.m.curg != gp {
   274  		// go code on the system stack can't defer
   275  		throw("defer on system stack")
   276  	}
   277  
   278  	d := newdefer()
   279  	d.link = gp._defer
   280  	gp._defer = d
   281  	d.fn = fn
   282  	d.pc = getcallerpc()
   283  	// We must not be preempted between calling getcallersp and
   284  	// storing it to d.sp because getcallersp's result is a
   285  	// uintptr stack pointer.
   286  	d.sp = getcallersp()
   287  
   288  	// deferproc returns 0 normally.
   289  	// a deferred func that stops a panic
   290  	// makes the deferproc return 1.
   291  	// the code the compiler generates always
   292  	// checks the return value and jumps to the
   293  	// end of the function if deferproc returns != 0.
   294  	return0()
   295  	// No code can go here - the C return register has
   296  	// been set and must not be clobbered.
   297  }
   298  
   299  var rangeExitError = error(errorString("range function continued iteration after exit"))
   300  
   301  //go:noinline
   302  func panicrangeexit() {
   303  	panic(rangeExitError)
   304  }
   305  
   306  // deferrangefunc is called by functions that are about to
   307  // execute a range-over-function loop in which the loop body
   308  // may execute a defer statement. That defer needs to add to
   309  // the chain for the current function, not the func literal synthesized
   310  // to represent the loop body. To do that, the original function
   311  // calls deferrangefunc to obtain an opaque token representing
   312  // the current frame, and then the loop body uses deferprocat
   313  // instead of deferproc to add to that frame's defer lists.
   314  //
   315  // The token is an 'any' with underlying type *atomic.Pointer[_defer].
   316  // It is the atomically-updated head of a linked list of _defer structs
   317  // representing deferred calls. At the same time, we create a _defer
   318  // struct on the main g._defer list with d.head set to this head pointer.
   319  //
   320  // The g._defer list is now a linked list of deferred calls,
   321  // but an atomic list hanging off:
   322  //
   323  //		g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
   324  //	                             | .head
   325  //	                             |
   326  //	                             +--> dY -> dX -> nil
   327  //
   328  // with each -> indicating a d.link pointer, and where drangefunc
   329  // has the d.rangefunc = true bit set.
   330  // Note that the function being ranged over may have added
   331  // its own defers (d4 and d3), so drangefunc need not be at the
   332  // top of the list when deferprocat is used. This is why we pass
   333  // the atomic head explicitly.
   334  //
   335  // To keep misbehaving programs from crashing the runtime,
   336  // deferprocat pushes new defers onto the .head list atomically.
   337  // The fact that it is a separate list from the main goroutine
   338  // defer list means that the main goroutine's defers can still
   339  // be handled non-atomically.
   340  //
   341  // In the diagram, dY and dX are meant to be processed when
   342  // drangefunc would be processed, which is to say the defer order
   343  // should be d4, d3, dY, dX, d2, d1. To make that happen,
   344  // when defer processing reaches a d with rangefunc=true,
   345  // it calls deferconvert to atomically take the extras
   346  // away from d.head and then adds them to the main list.
   347  //
   348  // That is, deferconvert changes this list:
   349  //
   350  //		g._defer => drangefunc -> d2 -> d1 -> nil
   351  //	                 | .head
   352  //	                 |
   353  //	                 +--> dY -> dX -> nil
   354  //
   355  // into this list:
   356  //
   357  //	g._defer => dY -> dX -> d2 -> d1 -> nil
   358  //
   359  // It also poisons *drangefunc.head so that any future
   360  // deferprocat using that head will throw.
   361  // (The atomic head is ordinary garbage collected memory so that
   362  // it's not a problem if user code holds onto it beyond
   363  // the lifetime of drangefunc.)
   364  //
   365  // TODO: We could arrange for the compiler to call into the
   366  // runtime after the loop finishes normally, to do an eager
   367  // deferconvert, which would catch calling the loop body
   368  // and having it defer after the loop is done. If we have a
   369  // more general catch of loop body misuse, though, this
   370  // might not be worth worrying about in addition.
   371  //
   372  // See also ../cmd/compile/internal/rangefunc/rewrite.go.
   373  func deferrangefunc() any {
   374  	gp := getg()
   375  	if gp.m.curg != gp {
   376  		// go code on the system stack can't defer
   377  		throw("defer on system stack")
   378  	}
   379  
   380  	d := newdefer()
   381  	d.link = gp._defer
   382  	gp._defer = d
   383  	d.pc = getcallerpc()
   384  	// We must not be preempted between calling getcallersp and
   385  	// storing it to d.sp because getcallersp's result is a
   386  	// uintptr stack pointer.
   387  	d.sp = getcallersp()
   388  
   389  	d.rangefunc = true
   390  	d.head = new(atomic.Pointer[_defer])
   391  
   392  	return d.head
   393  }
   394  
   395  // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
   396  func badDefer() *_defer {
   397  	return (*_defer)(unsafe.Pointer(uintptr(1)))
   398  }
   399  
   400  // deferprocat is like deferproc but adds to the atomic list represented by frame.
   401  // See the doc comment for deferrangefunc for details.
   402  func deferprocat(fn func(), frame any) {
   403  	head := frame.(*atomic.Pointer[_defer])
   404  	if raceenabled {
   405  		racewritepc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferprocat))
   406  	}
   407  	d1 := newdefer()
   408  	d1.fn = fn
   409  	for {
   410  		d1.link = head.Load()
   411  		if d1.link == badDefer() {
   412  			throw("defer after range func returned")
   413  		}
   414  		if head.CompareAndSwap(d1.link, d1) {
   415  			break
   416  		}
   417  	}
   418  
   419  	// Must be last - see deferproc above.
   420  	return0()
   421  }
   422  
   423  // deferconvert converts a rangefunc defer list into an ordinary list.
   424  // See the doc comment for deferrangefunc for details.
   425  func deferconvert(d *_defer) *_defer {
   426  	head := d.head
   427  	if raceenabled {
   428  		racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert))
   429  	}
   430  	tail := d.link
   431  	d.rangefunc = false
   432  	d0 := d
   433  
   434  	for {
   435  		d = head.Load()
   436  		if head.CompareAndSwap(d, badDefer()) {
   437  			break
   438  		}
   439  	}
   440  	if d == nil {
   441  		freedefer(d0)
   442  		return tail
   443  	}
   444  	for d1 := d; ; d1 = d1.link {
   445  		d1.sp = d0.sp
   446  		d1.pc = d0.pc
   447  		if d1.link == nil {
   448  			d1.link = tail
   449  			break
   450  		}
   451  	}
   452  	freedefer(d0)
   453  	return d
   454  }
   455  
   456  // deferprocStack queues a new deferred function with a defer record on the stack.
   457  // The defer record must have its fn field initialized.
   458  // All other fields can contain junk.
   459  // Nosplit because of the uninitialized pointer fields on the stack.
   460  //
   461  //go:nosplit
   462  func deferprocStack(d *_defer) {
   463  	gp := getg()
   464  	if gp.m.curg != gp {
   465  		// go code on the system stack can't defer
   466  		throw("defer on system stack")
   467  	}
   468  	// fn is already set.
   469  	// The other fields are junk on entry to deferprocStack and
   470  	// are initialized here.
   471  	d.heap = false
   472  	d.rangefunc = false
   473  	d.sp = getcallersp()
   474  	d.pc = getcallerpc()
   475  	// The lines below implement:
   476  	//   d.panic = nil
   477  	//   d.fd = nil
   478  	//   d.link = gp._defer
   479  	//   d.head = nil
   480  	//   gp._defer = d
   481  	// But without write barriers. The first three are writes to
   482  	// the stack so they don't need a write barrier, and furthermore
   483  	// are to uninitialized memory, so they must not use a write barrier.
   484  	// The fourth write does not require a write barrier because we
   485  	// explicitly mark all the defer structures, so we don't need to
   486  	// keep track of pointers to them with a write barrier.
   487  	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
   488  	*(*uintptr)(unsafe.Pointer(&d.head)) = 0
   489  	*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
   490  
   491  	return0()
   492  	// No code can go here - the C return register has
   493  	// been set and must not be clobbered.
   494  }
   495  
   496  // Each P holds a pool for defers.
   497  
   498  // Allocate a Defer, usually using per-P pool.
   499  // Each defer must be released with freedefer.  The defer is not
   500  // added to any defer chain yet.
   501  func newdefer() *_defer {
   502  	var d *_defer
   503  	mp := acquirem()
   504  	pp := mp.p.ptr()
   505  	if len(pp.deferpool) == 0 && sched.deferpool != nil {
   506  		lock(&sched.deferlock)
   507  		for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
   508  			d := sched.deferpool
   509  			sched.deferpool = d.link
   510  			d.link = nil
   511  			pp.deferpool = append(pp.deferpool, d)
   512  		}
   513  		unlock(&sched.deferlock)
   514  	}
   515  	if n := len(pp.deferpool); n > 0 {
   516  		d = pp.deferpool[n-1]
   517  		pp.deferpool[n-1] = nil
   518  		pp.deferpool = pp.deferpool[:n-1]
   519  	}
   520  	releasem(mp)
   521  	mp, pp = nil, nil
   522  
   523  	if d == nil {
   524  		// Allocate new defer.
   525  		d = new(_defer)
   526  	}
   527  	d.heap = true
   528  	return d
   529  }
   530  
   531  // Free the given defer.
   532  // The defer cannot be used after this call.
   533  //
   534  // This is nosplit because the incoming defer is in a perilous state.
   535  // It's not on any defer list, so stack copying won't adjust stack
   536  // pointers in it (namely, d.link). Hence, if we were to copy the
   537  // stack, d could then contain a stale pointer.
   538  //
   539  //go:nosplit
   540  func freedefer(d *_defer) {
   541  	d.link = nil
   542  	// After this point we can copy the stack.
   543  
   544  	if d.fn != nil {
   545  		freedeferfn()
   546  	}
   547  	if !d.heap {
   548  		return
   549  	}
   550  
   551  	mp := acquirem()
   552  	pp := mp.p.ptr()
   553  	if len(pp.deferpool) == cap(pp.deferpool) {
   554  		// Transfer half of local cache to the central cache.
   555  		var first, last *_defer
   556  		for len(pp.deferpool) > cap(pp.deferpool)/2 {
   557  			n := len(pp.deferpool)
   558  			d := pp.deferpool[n-1]
   559  			pp.deferpool[n-1] = nil
   560  			pp.deferpool = pp.deferpool[:n-1]
   561  			if first == nil {
   562  				first = d
   563  			} else {
   564  				last.link = d
   565  			}
   566  			last = d
   567  		}
   568  		lock(&sched.deferlock)
   569  		last.link = sched.deferpool
   570  		sched.deferpool = first
   571  		unlock(&sched.deferlock)
   572  	}
   573  
   574  	*d = _defer{}
   575  
   576  	pp.deferpool = append(pp.deferpool, d)
   577  
   578  	releasem(mp)
   579  	mp, pp = nil, nil
   580  }
   581  
   582  // Separate function so that it can split stack.
   583  // Windows otherwise runs out of stack space.
   584  func freedeferfn() {
   585  	// fn must be cleared before d is unlinked from gp.
   586  	throw("freedefer with d.fn != nil")
   587  }
   588  
   589  // deferreturn runs deferred functions for the caller's frame.
   590  // The compiler inserts a call to this at the end of any
   591  // function which calls defer.
   592  func deferreturn() {
   593  	var p _panic
   594  	p.deferreturn = true
   595  
   596  	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
   597  	for {
   598  		fn, ok := p.nextDefer()
   599  		if !ok {
   600  			break
   601  		}
   602  		fn()
   603  	}
   604  }
   605  
   606  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   607  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   608  // is not a panic, any recover calls in those deferred functions will return nil.
   609  //
   610  // Calling Goexit from the main goroutine terminates that goroutine
   611  // without func main returning. Since func main has not returned,
   612  // the program continues execution of other goroutines.
   613  // If all other goroutines exit, the program crashes.
   614  func Goexit() {
   615  	// Create a panic object for Goexit, so we can recognize when it might be
   616  	// bypassed by a recover().
   617  	var p _panic
   618  	p.goexit = true
   619  
   620  	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
   621  	for {
   622  		fn, ok := p.nextDefer()
   623  		if !ok {
   624  			break
   625  		}
   626  		fn()
   627  	}
   628  
   629  	goexit1()
   630  }
   631  
   632  // Call all Error and String methods before freezing the world.
   633  // Used when crashing with panicking.
   634  func preprintpanics(p *_panic) {
   635  	defer func() {
   636  		text := "panic while printing panic value"
   637  		switch r := recover().(type) {
   638  		case nil:
   639  			// nothing to do
   640  		case string:
   641  			throw(text + ": " + r)
   642  		default:
   643  			throw(text + ": type " + toRType(efaceOf(&r)._type).string())
   644  		}
   645  	}()
   646  	for p != nil {
   647  		switch v := p.arg.(type) {
   648  		case error:
   649  			p.arg = v.Error()
   650  		case stringer:
   651  			p.arg = v.String()
   652  		}
   653  		p = p.link
   654  	}
   655  }
   656  
   657  // Print all currently active panics. Used when crashing.
   658  // Should only be called after preprintpanics.
   659  func printpanics(p *_panic) {
   660  	if p.link != nil {
   661  		printpanics(p.link)
   662  		if !p.link.goexit {
   663  			print("\t")
   664  		}
   665  	}
   666  	if p.goexit {
   667  		return
   668  	}
   669  	print("panic: ")
   670  	printany(p.arg)
   671  	if p.recovered {
   672  		print(" [recovered]")
   673  	}
   674  	print("\n")
   675  }
   676  
   677  // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
   678  // uint32 and a pointer to the byte following the varint.
   679  //
   680  // The implementation is the same with runtime.readvarint, except that this function
   681  // uses unsafe.Pointer for speed.
   682  func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
   683  	var r uint32
   684  	var shift int
   685  	for {
   686  		b := *(*uint8)(fd)
   687  		fd = add(fd, unsafe.Sizeof(b))
   688  		if b < 128 {
   689  			return r + uint32(b)<<shift, fd
   690  		}
   691  		r += uint32(b&0x7F) << (shift & 31)
   692  		shift += 7
   693  		if shift > 28 {
   694  			panic("Bad varint")
   695  		}
   696  	}
   697  }
   698  
   699  // A PanicNilError happens when code calls panic(nil).
   700  //
   701  // Before Go 1.21, programs that called panic(nil) observed recover returning nil.
   702  // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError.
   703  // Programs can change back to the old behavior by setting GODEBUG=panicnil=1.
   704  type PanicNilError struct {
   705  	// This field makes PanicNilError structurally different from
   706  	// any other struct in this package, and the _ makes it different
   707  	// from any struct in other packages too.
   708  	// This avoids any accidental conversions being possible
   709  	// between this struct and some other struct sharing the same fields,
   710  	// like happened in go.dev/issue/56603.
   711  	_ [0]*PanicNilError
   712  }
   713  
   714  func (*PanicNilError) Error() string { return "panic called with nil argument" }
   715  func (*PanicNilError) RuntimeError() {}
   716  
   717  var panicnil = &godebugInc{name: "panicnil"}
   718  
   719  // The implementation of the predeclared function panic.
   720  func gopanic(e any) {
   721  	if e == nil {
   722  		if debug.panicnil.Load() != 1 {
   723  			e = new(PanicNilError)
   724  		} else {
   725  			panicnil.IncNonDefault()
   726  		}
   727  	}
   728  
   729  	gp := getg()
   730  	if gp.m.curg != gp {
   731  		print("panic: ")
   732  		printany(e)
   733  		print("\n")
   734  		throw("panic on system stack")
   735  	}
   736  
   737  	if gp.m.mallocing != 0 {
   738  		print("panic: ")
   739  		printany(e)
   740  		print("\n")
   741  		throw("panic during malloc")
   742  	}
   743  	if gp.m.preemptoff != "" {
   744  		print("panic: ")
   745  		printany(e)
   746  		print("\n")
   747  		print("preempt off reason: ")
   748  		print(gp.m.preemptoff)
   749  		print("\n")
   750  		throw("panic during preemptoff")
   751  	}
   752  	if gp.m.locks != 0 {
   753  		print("panic: ")
   754  		printany(e)
   755  		print("\n")
   756  		throw("panic holding locks")
   757  	}
   758  
   759  	var p _panic
   760  	p.arg = e
   761  
   762  	runningPanicDefers.Add(1)
   763  
   764  	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
   765  	for {
   766  		fn, ok := p.nextDefer()
   767  		if !ok {
   768  			break
   769  		}
   770  		fn()
   771  	}
   772  
   773  	// ran out of deferred calls - old-school panic now
   774  	// Because it is unsafe to call arbitrary user code after freezing
   775  	// the world, we call preprintpanics to invoke all necessary Error
   776  	// and String methods to prepare the panic strings before startpanic.
   777  	preprintpanics(&p)
   778  
   779  	fatalpanic(&p)   // should not return
   780  	*(*int)(nil) = 0 // not reached
   781  }
   782  
   783  // start initializes a panic to start unwinding the stack.
   784  //
   785  // If p.goexit is true, then start may return multiple times.
   786  func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
   787  	gp := getg()
   788  
   789  	// Record the caller's PC and SP, so recovery can identify panics
   790  	// that have been recovered. Also, so that if p is from Goexit, we
   791  	// can restart its defer processing loop if a recovered panic tries
   792  	// to jump past it.
   793  	p.startPC = getcallerpc()
   794  	p.startSP = unsafe.Pointer(getcallersp())
   795  
   796  	if p.deferreturn {
   797  		p.sp = sp
   798  
   799  		if s := (*savedOpenDeferState)(gp.param); s != nil {
   800  			// recovery saved some state for us, so that we can resume
   801  			// calling open-coded defers without unwinding the stack.
   802  
   803  			gp.param = nil
   804  
   805  			p.retpc = s.retpc
   806  			p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset))
   807  			p.slotsPtr = add(sp, s.slotsOffset)
   808  		}
   809  
   810  		return
   811  	}
   812  
   813  	p.link = gp._panic
   814  	gp._panic = (*_panic)(noescape(unsafe.Pointer(p)))
   815  
   816  	// Initialize state machine, and find the first frame with a defer.
   817  	//
   818  	// Note: We could use startPC and startSP here, but callers will
   819  	// never have defer statements themselves. By starting at their
   820  	// caller instead, we avoid needing to unwind through an extra
   821  	// frame. It also somewhat simplifies the terminating condition for
   822  	// deferreturn.
   823  	p.lr, p.fp = pc, sp
   824  	p.nextFrame()
   825  }
   826  
   827  // nextDefer returns the next deferred function to invoke, if any.
   828  //
   829  // Note: The "ok bool" result is necessary to correctly handle when
   830  // the deferred function itself was nil (e.g., "defer (func())(nil)").
   831  func (p *_panic) nextDefer() (func(), bool) {
   832  	gp := getg()
   833  
   834  	if !p.deferreturn {
   835  		if gp._panic != p {
   836  			throw("bad panic stack")
   837  		}
   838  
   839  		if p.recovered {
   840  			mcall(recovery) // does not return
   841  			throw("recovery failed")
   842  		}
   843  	}
   844  
   845  	// The assembler adjusts p.argp in wrapper functions that shouldn't
   846  	// be visible to recover(), so we need to restore it each iteration.
   847  	p.argp = add(p.startSP, sys.MinFrameSize)
   848  
   849  	for {
   850  		for p.deferBitsPtr != nil {
   851  			bits := *p.deferBitsPtr
   852  
   853  			// Check whether any open-coded defers are still pending.
   854  			//
   855  			// Note: We need to check this upfront (rather than after
   856  			// clearing the top bit) because it's possible that Goexit
   857  			// invokes a deferred call, and there were still more pending
   858  			// open-coded defers in the frame; but then the deferred call
   859  			// panic and invoked the remaining defers in the frame, before
   860  			// recovering and restarting the Goexit loop.
   861  			if bits == 0 {
   862  				p.deferBitsPtr = nil
   863  				break
   864  			}
   865  
   866  			// Find index of top bit set.
   867  			i := 7 - uintptr(sys.LeadingZeros8(bits))
   868  
   869  			// Clear bit and store it back.
   870  			bits &^= 1 << i
   871  			*p.deferBitsPtr = bits
   872  
   873  			return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
   874  		}
   875  
   876  	Recheck:
   877  		if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
   878  			if d.rangefunc {
   879  				gp._defer = deferconvert(d)
   880  				goto Recheck
   881  			}
   882  
   883  			fn := d.fn
   884  			d.fn = nil
   885  
   886  			// TODO(mdempsky): Instead of having each deferproc call have
   887  			// its own "deferreturn(); return" sequence, we should just make
   888  			// them reuse the one we emit for open-coded defers.
   889  			p.retpc = d.pc
   890  
   891  			// Unlink and free.
   892  			gp._defer = d.link
   893  			freedefer(d)
   894  
   895  			return fn, true
   896  		}
   897  
   898  		if !p.nextFrame() {
   899  			return nil, false
   900  		}
   901  	}
   902  }
   903  
   904  // nextFrame finds the next frame that contains deferred calls, if any.
   905  func (p *_panic) nextFrame() (ok bool) {
   906  	if p.lr == 0 {
   907  		return false
   908  	}
   909  
   910  	gp := getg()
   911  	systemstack(func() {
   912  		var limit uintptr
   913  		if d := gp._defer; d != nil {
   914  			limit = d.sp
   915  		}
   916  
   917  		var u unwinder
   918  		u.initAt(p.lr, uintptr(p.fp), 0, gp, 0)
   919  		for {
   920  			if !u.valid() {
   921  				p.lr = 0
   922  				return // ok == false
   923  			}
   924  
   925  			// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
   926  			// every frame containing a defer (not just open-coded defers),
   927  			// then we can simply loop until we find the next frame where
   928  			// it's non-zero.
   929  
   930  			if u.frame.sp == limit {
   931  				break // found a frame with linked defers
   932  			}
   933  
   934  			if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) {
   935  				break // found a frame with open-coded defers
   936  			}
   937  
   938  			u.next()
   939  		}
   940  
   941  		p.lr = u.frame.lr
   942  		p.sp = unsafe.Pointer(u.frame.sp)
   943  		p.fp = unsafe.Pointer(u.frame.fp)
   944  
   945  		ok = true
   946  	})
   947  
   948  	return
   949  }
   950  
   951  func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
   952  	fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo)
   953  	if fd == nil {
   954  		return false
   955  	}
   956  
   957  	if fn.deferreturn == 0 {
   958  		throw("missing deferreturn")
   959  	}
   960  
   961  	deferBitsOffset, fd := readvarintUnsafe(fd)
   962  	deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset)))
   963  	if *deferBitsPtr == 0 {
   964  		return false // has open-coded defers, but none pending
   965  	}
   966  
   967  	slotsOffset, fd := readvarintUnsafe(fd)
   968  
   969  	p.retpc = fn.entry() + uintptr(fn.deferreturn)
   970  	p.deferBitsPtr = deferBitsPtr
   971  	p.slotsPtr = add(varp, -uintptr(slotsOffset))
   972  
   973  	return true
   974  }
   975  
   976  // The implementation of the predeclared function recover.
   977  // Cannot split the stack because it needs to reliably
   978  // find the stack segment of its caller.
   979  //
   980  // TODO(rsc): Once we commit to CopyStackAlways,
   981  // this doesn't need to be nosplit.
   982  //
   983  //go:nosplit
   984  func gorecover(argp uintptr) any {
   985  	// Must be in a function running as part of a deferred call during the panic.
   986  	// Must be called from the topmost function of the call
   987  	// (the function used in the defer statement).
   988  	// p.argp is the argument pointer of that topmost deferred function call.
   989  	// Compare against argp reported by caller.
   990  	// If they match, the caller is the one who can recover.
   991  	gp := getg()
   992  	p := gp._panic
   993  	if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
   994  		p.recovered = true
   995  		return p.arg
   996  	}
   997  	return nil
   998  }
   999  
  1000  //go:linkname sync_throw sync.throw
  1001  func sync_throw(s string) {
  1002  	throw(s)
  1003  }
  1004  
  1005  //go:linkname sync_fatal sync.fatal
  1006  func sync_fatal(s string) {
  1007  	fatal(s)
  1008  }
  1009  
  1010  // throw triggers a fatal error that dumps a stack trace and exits.
  1011  //
  1012  // throw should be used for runtime-internal fatal errors where Go itself,
  1013  // rather than user code, may be at fault for the failure.
  1014  //
  1015  //go:nosplit
  1016  func throw(s string) {
  1017  	// Everything throw does should be recursively nosplit so it
  1018  	// can be called even when it's unsafe to grow the stack.
  1019  	systemstack(func() {
  1020  		print("fatal error: ", s, "\n")
  1021  	})
  1022  
  1023  	fatalthrow(throwTypeRuntime)
  1024  }
  1025  
  1026  // fatal triggers a fatal error that dumps a stack trace and exits.
  1027  //
  1028  // fatal is equivalent to throw, but is used when user code is expected to be
  1029  // at fault for the failure, such as racing map writes.
  1030  //
  1031  // fatal does not include runtime frames, system goroutines, or frame metadata
  1032  // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
  1033  //
  1034  //go:nosplit
  1035  func fatal(s string) {
  1036  	// Everything fatal does should be recursively nosplit so it
  1037  	// can be called even when it's unsafe to grow the stack.
  1038  	systemstack(func() {
  1039  		print("fatal error: ", s, "\n")
  1040  	})
  1041  
  1042  	fatalthrow(throwTypeUser)
  1043  }
  1044  
  1045  // runningPanicDefers is non-zero while running deferred functions for panic.
  1046  // This is used to try hard to get a panic stack trace out when exiting.
  1047  var runningPanicDefers atomic.Uint32
  1048  
  1049  // panicking is non-zero when crashing the program for an unrecovered panic.
  1050  var panicking atomic.Uint32
  1051  
  1052  // paniclk is held while printing the panic information and stack trace,
  1053  // so that two concurrent panics don't overlap their output.
  1054  var paniclk mutex
  1055  
  1056  // Unwind the stack after a deferred function calls recover
  1057  // after a panic. Then arrange to continue running as though
  1058  // the caller of the deferred function returned normally.
  1059  //
  1060  // However, if unwinding the stack would skip over a Goexit call, we
  1061  // return into the Goexit loop instead, so it can continue processing
  1062  // defers instead.
  1063  func recovery(gp *g) {
  1064  	p := gp._panic
  1065  	pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
  1066  	p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
  1067  
  1068  	// Unwind the panic stack.
  1069  	for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
  1070  		// Don't allow jumping past a pending Goexit.
  1071  		// Instead, have its _panic.start() call return again.
  1072  		//
  1073  		// TODO(mdempsky): In this case, Goexit will resume walking the
  1074  		// stack where it left off, which means it will need to rewalk
  1075  		// frames that we've already processed.
  1076  		//
  1077  		// There's a similar issue with nested panics, when the inner
  1078  		// panic supercedes the outer panic. Again, we end up needing to
  1079  		// walk the same stack frames.
  1080  		//
  1081  		// These are probably pretty rare occurrences in practice, and
  1082  		// they don't seem any worse than the existing logic. But if we
  1083  		// move the unwinding state into _panic, we could detect when we
  1084  		// run into where the last panic started, and then just pick up
  1085  		// where it left off instead.
  1086  		//
  1087  		// With how subtle defer handling is, this might not actually be
  1088  		// worthwhile though.
  1089  		if p.goexit {
  1090  			pc, sp = p.startPC, uintptr(p.startSP)
  1091  			saveOpenDeferState = false // goexit is unwinding the stack anyway
  1092  			break
  1093  		}
  1094  
  1095  		runningPanicDefers.Add(-1)
  1096  	}
  1097  	gp._panic = p
  1098  
  1099  	if p == nil { // must be done with signal
  1100  		gp.sig = 0
  1101  	}
  1102  
  1103  	if gp.param != nil {
  1104  		throw("unexpected gp.param")
  1105  	}
  1106  	if saveOpenDeferState {
  1107  		// If we're returning to deferreturn and there are more open-coded
  1108  		// defers for it to call, save enough state for it to be able to
  1109  		// pick up where p0 left off.
  1110  		gp.param = unsafe.Pointer(&savedOpenDeferState{
  1111  			retpc: p0.retpc,
  1112  
  1113  			// We need to save deferBitsPtr and slotsPtr too, but those are
  1114  			// stack pointers. To avoid issues around heap objects pointing
  1115  			// to the stack, save them as offsets from SP.
  1116  			deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp),
  1117  			slotsOffset:     uintptr(p0.slotsPtr) - uintptr(p0.sp),
  1118  		})
  1119  	}
  1120  
  1121  	// TODO(mdempsky): Currently, we rely on frames containing "defer"
  1122  	// to end with "CALL deferreturn; RET". This allows deferreturn to
  1123  	// finish running any pending defers in the frame.
  1124  	//
  1125  	// But we should be able to tell whether there are still pending
  1126  	// defers here. If there aren't, we can just jump directly to the
  1127  	// "RET" instruction. And if there are, we don't need an actual
  1128  	// "CALL deferreturn" instruction; we can simulate it with something
  1129  	// like:
  1130  	//
  1131  	//	if usesLR {
  1132  	//		lr = pc
  1133  	//	} else {
  1134  	//		sp -= sizeof(pc)
  1135  	//		*(*uintptr)(sp) = pc
  1136  	//	}
  1137  	//	pc = funcPC(deferreturn)
  1138  	//
  1139  	// So that we effectively tail call into deferreturn, such that it
  1140  	// then returns to the simple "RET" epilogue. That would save the
  1141  	// overhead of the "deferreturn" call when there aren't actually any
  1142  	// pending defers left, and shrink the TEXT size of compiled
  1143  	// binaries. (Admittedly, both of these are modest savings.)
  1144  
  1145  	// Ensure we're recovering within the appropriate stack.
  1146  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
  1147  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1148  		throw("bad recovery")
  1149  	}
  1150  
  1151  	// Make the deferproc for this d return again,
  1152  	// this time returning 1. The calling function will
  1153  	// jump to the standard return epilogue.
  1154  	gp.sched.sp = sp
  1155  	gp.sched.pc = pc
  1156  	gp.sched.lr = 0
  1157  	// Restore the bp on platforms that support frame pointers.
  1158  	// N.B. It's fine to not set anything for platforms that don't
  1159  	// support frame pointers, since nothing consumes them.
  1160  	switch {
  1161  	case goarch.IsAmd64 != 0:
  1162  		// on x86, fp actually points one word higher than the top of
  1163  		// the frame since the return address is saved on the stack by
  1164  		// the caller
  1165  		gp.sched.bp = fp - 2*goarch.PtrSize
  1166  	case goarch.IsArm64 != 0:
  1167  		// on arm64, the architectural bp points one word higher
  1168  		// than the sp. fp is totally useless to us here, because it
  1169  		// only gets us to the caller's fp.
  1170  		gp.sched.bp = sp - goarch.PtrSize
  1171  	}
  1172  	gp.sched.ret = 1
  1173  	gogo(&gp.sched)
  1174  }
  1175  
  1176  // fatalthrow implements an unrecoverable runtime throw. It freezes the
  1177  // system, prints stack traces starting from its caller, and terminates the
  1178  // process.
  1179  //
  1180  //go:nosplit
  1181  func fatalthrow(t throwType) {
  1182  	pc := getcallerpc()
  1183  	sp := getcallersp()
  1184  	gp := getg()
  1185  
  1186  	if gp.m.throwing == throwTypeNone {
  1187  		gp.m.throwing = t
  1188  	}
  1189  
  1190  	// Switch to the system stack to avoid any stack growth, which may make
  1191  	// things worse if the runtime is in a bad state.
  1192  	systemstack(func() {
  1193  		if isSecureMode() {
  1194  			exit(2)
  1195  		}
  1196  
  1197  		startpanic_m()
  1198  
  1199  		if dopanic_m(gp, pc, sp) {
  1200  			// crash uses a decent amount of nosplit stack and we're already
  1201  			// low on stack in throw, so crash on the system stack (unlike
  1202  			// fatalpanic).
  1203  			crash()
  1204  		}
  1205  
  1206  		exit(2)
  1207  	})
  1208  
  1209  	*(*int)(nil) = 0 // not reached
  1210  }
  1211  
  1212  // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
  1213  // that if msgs != nil, fatalpanic also prints panic messages and decrements
  1214  // runningPanicDefers once main is blocked from exiting.
  1215  //
  1216  //go:nosplit
  1217  func fatalpanic(msgs *_panic) {
  1218  	pc := getcallerpc()
  1219  	sp := getcallersp()
  1220  	gp := getg()
  1221  	var docrash bool
  1222  	// Switch to the system stack to avoid any stack growth, which
  1223  	// may make things worse if the runtime is in a bad state.
  1224  	systemstack(func() {
  1225  		if startpanic_m() && msgs != nil {
  1226  			// There were panic messages and startpanic_m
  1227  			// says it's okay to try to print them.
  1228  
  1229  			// startpanic_m set panicking, which will
  1230  			// block main from exiting, so now OK to
  1231  			// decrement runningPanicDefers.
  1232  			runningPanicDefers.Add(-1)
  1233  
  1234  			printpanics(msgs)
  1235  		}
  1236  
  1237  		docrash = dopanic_m(gp, pc, sp)
  1238  	})
  1239  
  1240  	if docrash {
  1241  		// By crashing outside the above systemstack call, debuggers
  1242  		// will not be confused when generating a backtrace.
  1243  		// Function crash is marked nosplit to avoid stack growth.
  1244  		crash()
  1245  	}
  1246  
  1247  	systemstack(func() {
  1248  		exit(2)
  1249  	})
  1250  
  1251  	*(*int)(nil) = 0 // not reached
  1252  }
  1253  
  1254  // startpanic_m prepares for an unrecoverable panic.
  1255  //
  1256  // It returns true if panic messages should be printed, or false if
  1257  // the runtime is in bad shape and should just print stacks.
  1258  //
  1259  // It must not have write barriers even though the write barrier
  1260  // explicitly ignores writes once dying > 0. Write barriers still
  1261  // assume that g.m.p != nil, and this function may not have P
  1262  // in some contexts (e.g. a panic in a signal handler for a signal
  1263  // sent to an M with no P).
  1264  //
  1265  //go:nowritebarrierrec
  1266  func startpanic_m() bool {
  1267  	gp := getg()
  1268  	if mheap_.cachealloc.size == 0 { // very early
  1269  		print("runtime: panic before malloc heap initialized\n")
  1270  	}
  1271  	// Disallow malloc during an unrecoverable panic. A panic
  1272  	// could happen in a signal handler, or in a throw, or inside
  1273  	// malloc itself. We want to catch if an allocation ever does
  1274  	// happen (even if we're not in one of these situations).
  1275  	gp.m.mallocing++
  1276  
  1277  	// If we're dying because of a bad lock count, set it to a
  1278  	// good lock count so we don't recursively panic below.
  1279  	if gp.m.locks < 0 {
  1280  		gp.m.locks = 1
  1281  	}
  1282  
  1283  	switch gp.m.dying {
  1284  	case 0:
  1285  		// Setting dying >0 has the side-effect of disabling this G's writebuf.
  1286  		gp.m.dying = 1
  1287  		panicking.Add(1)
  1288  		lock(&paniclk)
  1289  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
  1290  			schedtrace(true)
  1291  		}
  1292  		freezetheworld()
  1293  		return true
  1294  	case 1:
  1295  		// Something failed while panicking.
  1296  		// Just print a stack trace and exit.
  1297  		gp.m.dying = 2
  1298  		print("panic during panic\n")
  1299  		return false
  1300  	case 2:
  1301  		// This is a genuine bug in the runtime, we couldn't even
  1302  		// print the stack trace successfully.
  1303  		gp.m.dying = 3
  1304  		print("stack trace unavailable\n")
  1305  		exit(4)
  1306  		fallthrough
  1307  	default:
  1308  		// Can't even print! Just exit.
  1309  		exit(5)
  1310  		return false // Need to return something.
  1311  	}
  1312  }
  1313  
  1314  var didothers bool
  1315  var deadlock mutex
  1316  
  1317  // gp is the crashing g running on this M, but may be a user G, while getg() is
  1318  // always g0.
  1319  func dopanic_m(gp *g, pc, sp uintptr) bool {
  1320  	if gp.sig != 0 {
  1321  		signame := signame(gp.sig)
  1322  		if signame != "" {
  1323  			print("[signal ", signame)
  1324  		} else {
  1325  			print("[signal ", hex(gp.sig))
  1326  		}
  1327  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
  1328  	}
  1329  
  1330  	level, all, docrash := gotraceback()
  1331  	if level > 0 {
  1332  		if gp != gp.m.curg {
  1333  			all = true
  1334  		}
  1335  		if gp != gp.m.g0 {
  1336  			print("\n")
  1337  			goroutineheader(gp)
  1338  			traceback(pc, sp, 0, gp)
  1339  		} else if level >= 2 || gp.m.throwing >= throwTypeRuntime {
  1340  			print("\nruntime stack:\n")
  1341  			traceback(pc, sp, 0, gp)
  1342  		}
  1343  		if !didothers && all {
  1344  			didothers = true
  1345  			tracebackothers(gp)
  1346  		}
  1347  	}
  1348  	unlock(&paniclk)
  1349  
  1350  	if panicking.Add(-1) != 0 {
  1351  		// Some other m is panicking too.
  1352  		// Let it print what it needs to print.
  1353  		// Wait forever without chewing up cpu.
  1354  		// It will exit when it's done.
  1355  		lock(&deadlock)
  1356  		lock(&deadlock)
  1357  	}
  1358  
  1359  	printDebugLog()
  1360  
  1361  	return docrash
  1362  }
  1363  
  1364  // canpanic returns false if a signal should throw instead of
  1365  // panicking.
  1366  //
  1367  //go:nosplit
  1368  func canpanic() bool {
  1369  	gp := getg()
  1370  	mp := acquirem()
  1371  
  1372  	// Is it okay for gp to panic instead of crashing the program?
  1373  	// Yes, as long as it is running Go code, not runtime code,
  1374  	// and not stuck in a system call.
  1375  	if gp != mp.curg {
  1376  		releasem(mp)
  1377  		return false
  1378  	}
  1379  	// N.B. mp.locks != 1 instead of 0 to account for acquirem.
  1380  	if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
  1381  		releasem(mp)
  1382  		return false
  1383  	}
  1384  	status := readgstatus(gp)
  1385  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
  1386  		releasem(mp)
  1387  		return false
  1388  	}
  1389  	if GOOS == "windows" && mp.libcallsp != 0 {
  1390  		releasem(mp)
  1391  		return false
  1392  	}
  1393  	releasem(mp)
  1394  	return true
  1395  }
  1396  
  1397  // shouldPushSigpanic reports whether pc should be used as sigpanic's
  1398  // return PC (pushing a frame for the call). Otherwise, it should be
  1399  // left alone so that LR is used as sigpanic's return PC, effectively
  1400  // replacing the top-most frame with sigpanic. This is used by
  1401  // preparePanic.
  1402  func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
  1403  	if pc == 0 {
  1404  		// Probably a call to a nil func. The old LR is more
  1405  		// useful in the stack trace. Not pushing the frame
  1406  		// will make the trace look like a call to sigpanic
  1407  		// instead. (Otherwise the trace will end at sigpanic
  1408  		// and we won't get to see who faulted.)
  1409  		return false
  1410  	}
  1411  	// If we don't recognize the PC as code, but we do recognize
  1412  	// the link register as code, then this assumes the panic was
  1413  	// caused by a call to non-code. In this case, we want to
  1414  	// ignore this call to make unwinding show the context.
  1415  	//
  1416  	// If we running C code, we're not going to recognize pc as a
  1417  	// Go function, so just assume it's good. Otherwise, traceback
  1418  	// may try to read a stale LR that looks like a Go code
  1419  	// pointer and wander into the woods.
  1420  	if gp.m.incgo || findfunc(pc).valid() {
  1421  		// This wasn't a bad call, so use PC as sigpanic's
  1422  		// return PC.
  1423  		return true
  1424  	}
  1425  	if findfunc(lr).valid() {
  1426  		// This was a bad call, but the LR is good, so use the
  1427  		// LR as sigpanic's return PC.
  1428  		return false
  1429  	}
  1430  	// Neither the PC or LR is good. Hopefully pushing a frame
  1431  	// will work.
  1432  	return true
  1433  }
  1434  
  1435  // isAbortPC reports whether pc is the program counter at which
  1436  // runtime.abort raises a signal.
  1437  //
  1438  // It is nosplit because it's part of the isgoexception
  1439  // implementation.
  1440  //
  1441  //go:nosplit
  1442  func isAbortPC(pc uintptr) bool {
  1443  	f := findfunc(pc)
  1444  	if !f.valid() {
  1445  		return false
  1446  	}
  1447  	return f.funcID == abi.FuncID_abort
  1448  }
  1449  

View as plain text