...
Run Format

Source file src/runtime/panic.go

Documentation: runtime

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Calling panic with one of the errors below will call errorString.Error
    14  // which will call mallocgc to concatenate strings. That will fail if
    15  // malloc is locked, causing a confusing error message. Throw a better
    16  // error message instead.
    17  func panicCheckMalloc(err error) {
    18  	gp := getg()
    19  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    20  		throw(string(err.(errorString)))
    21  	}
    22  }
    23  
    24  var indexError = error(errorString("index out of range"))
    25  
    26  // The panicindex, panicslice, and panicdivide functions are called by
    27  // code generated by the compiler for out of bounds index expressions,
    28  // out of bounds slice expressions, and division by zero. The
    29  // panicdivide (again), panicoverflow, panicfloat, and panicmem
    30  // functions are called by the signal handler when a signal occurs
    31  // indicating the respective problem.
    32  //
    33  // Since panicindex and panicslice are never called directly, and
    34  // since the runtime package should never have an out of bounds slice
    35  // or array reference, if we see those functions called from the
    36  // runtime package we turn the panic into a throw. That will dump the
    37  // entire runtime stack for easier debugging.
    38  
    39  func panicindex() {
    40  	if hasprefix(funcname(findfunc(getcallerpc())), "runtime.") {
    41  		throw(string(indexError.(errorString)))
    42  	}
    43  	panicCheckMalloc(indexError)
    44  	panic(indexError)
    45  }
    46  
    47  var sliceError = error(errorString("slice bounds out of range"))
    48  
    49  func panicslice() {
    50  	if hasprefix(funcname(findfunc(getcallerpc())), "runtime.") {
    51  		throw(string(sliceError.(errorString)))
    52  	}
    53  	panicCheckMalloc(sliceError)
    54  	panic(sliceError)
    55  }
    56  
    57  var divideError = error(errorString("integer divide by zero"))
    58  
    59  func panicdivide() {
    60  	panicCheckMalloc(divideError)
    61  	panic(divideError)
    62  }
    63  
    64  var overflowError = error(errorString("integer overflow"))
    65  
    66  func panicoverflow() {
    67  	panicCheckMalloc(overflowError)
    68  	panic(overflowError)
    69  }
    70  
    71  var floatError = error(errorString("floating point error"))
    72  
    73  func panicfloat() {
    74  	panicCheckMalloc(floatError)
    75  	panic(floatError)
    76  }
    77  
    78  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    79  
    80  func panicmem() {
    81  	panicCheckMalloc(memoryError)
    82  	panic(memoryError)
    83  }
    84  
    85  func throwinit() {
    86  	throw("recursive call during initialization - linker skew")
    87  }
    88  
    89  // Create a new deferred function fn with siz bytes of arguments.
    90  // The compiler turns a defer statement into a call to this.
    91  //go:nosplit
    92  func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    93  	if getg().m.curg != getg() {
    94  		// go code on the system stack can't defer
    95  		throw("defer on system stack")
    96  	}
    97  
    98  	// the arguments of fn are in a perilous state. The stack map
    99  	// for deferproc does not describe them. So we can't let garbage
   100  	// collection or stack copying trigger until we've copied them out
   101  	// to somewhere safe. The memmove below does that.
   102  	// Until the copy completes, we can only call nosplit routines.
   103  	sp := getcallersp()
   104  	argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
   105  	callerpc := getcallerpc()
   106  
   107  	d := newdefer(siz)
   108  	if d._panic != nil {
   109  		throw("deferproc: d.panic != nil after newdefer")
   110  	}
   111  	d.fn = fn
   112  	d.pc = callerpc
   113  	d.sp = sp
   114  	switch siz {
   115  	case 0:
   116  		// Do nothing.
   117  	case sys.PtrSize:
   118  		*(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
   119  	default:
   120  		memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
   121  	}
   122  
   123  	// deferproc returns 0 normally.
   124  	// a deferred func that stops a panic
   125  	// makes the deferproc return 1.
   126  	// the code the compiler generates always
   127  	// checks the return value and jumps to the
   128  	// end of the function if deferproc returns != 0.
   129  	return0()
   130  	// No code can go here - the C return register has
   131  	// been set and must not be clobbered.
   132  }
   133  
   134  // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
   135  // Each P holds a pool for defers with small arg sizes.
   136  // Assign defer allocations to pools by rounding to 16, to match malloc size classes.
   137  
   138  const (
   139  	deferHeaderSize = unsafe.Sizeof(_defer{})
   140  	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   141  	minDeferArgs    = minDeferAlloc - deferHeaderSize
   142  )
   143  
   144  // defer size class for arg size sz
   145  //go:nosplit
   146  func deferclass(siz uintptr) uintptr {
   147  	if siz <= minDeferArgs {
   148  		return 0
   149  	}
   150  	return (siz - minDeferArgs + 15) / 16
   151  }
   152  
   153  // total size of memory block for defer with arg size sz
   154  func totaldefersize(siz uintptr) uintptr {
   155  	if siz <= minDeferArgs {
   156  		return minDeferAlloc
   157  	}
   158  	return deferHeaderSize + siz
   159  }
   160  
   161  // Ensure that defer arg sizes that map to the same defer size class
   162  // also map to the same malloc size class.
   163  func testdefersizes() {
   164  	var m [len(p{}.deferpool)]int32
   165  
   166  	for i := range m {
   167  		m[i] = -1
   168  	}
   169  	for i := uintptr(0); ; i++ {
   170  		defersc := deferclass(i)
   171  		if defersc >= uintptr(len(m)) {
   172  			break
   173  		}
   174  		siz := roundupsize(totaldefersize(i))
   175  		if m[defersc] < 0 {
   176  			m[defersc] = int32(siz)
   177  			continue
   178  		}
   179  		if m[defersc] != int32(siz) {
   180  			print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   181  			throw("bad defer size class")
   182  		}
   183  	}
   184  }
   185  
   186  // The arguments associated with a deferred call are stored
   187  // immediately after the _defer header in memory.
   188  //go:nosplit
   189  func deferArgs(d *_defer) unsafe.Pointer {
   190  	if d.siz == 0 {
   191  		// Avoid pointer past the defer allocation.
   192  		return nil
   193  	}
   194  	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   195  }
   196  
   197  var deferType *_type // type of _defer struct
   198  
   199  func init() {
   200  	var x interface{}
   201  	x = (*_defer)(nil)
   202  	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   203  }
   204  
   205  // Allocate a Defer, usually using per-P pool.
   206  // Each defer must be released with freedefer.
   207  //
   208  // This must not grow the stack because there may be a frame without
   209  // stack map information when this is called.
   210  //
   211  //go:nosplit
   212  func newdefer(siz int32) *_defer {
   213  	var d *_defer
   214  	sc := deferclass(uintptr(siz))
   215  	gp := getg()
   216  	if sc < uintptr(len(p{}.deferpool)) {
   217  		pp := gp.m.p.ptr()
   218  		if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   219  			// Take the slow path on the system stack so
   220  			// we don't grow newdefer's stack.
   221  			systemstack(func() {
   222  				lock(&sched.deferlock)
   223  				for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   224  					d := sched.deferpool[sc]
   225  					sched.deferpool[sc] = d.link
   226  					d.link = nil
   227  					pp.deferpool[sc] = append(pp.deferpool[sc], d)
   228  				}
   229  				unlock(&sched.deferlock)
   230  			})
   231  		}
   232  		if n := len(pp.deferpool[sc]); n > 0 {
   233  			d = pp.deferpool[sc][n-1]
   234  			pp.deferpool[sc][n-1] = nil
   235  			pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   236  		}
   237  	}
   238  	if d == nil {
   239  		// Allocate new defer+args.
   240  		systemstack(func() {
   241  			total := roundupsize(totaldefersize(uintptr(siz)))
   242  			d = (*_defer)(mallocgc(total, deferType, true))
   243  		})
   244  	}
   245  	d.siz = siz
   246  	d.link = gp._defer
   247  	gp._defer = d
   248  	return d
   249  }
   250  
   251  // Free the given defer.
   252  // The defer cannot be used after this call.
   253  //
   254  // This must not grow the stack because there may be a frame without a
   255  // stack map when this is called.
   256  //
   257  //go:nosplit
   258  func freedefer(d *_defer) {
   259  	if d._panic != nil {
   260  		freedeferpanic()
   261  	}
   262  	if d.fn != nil {
   263  		freedeferfn()
   264  	}
   265  	sc := deferclass(uintptr(d.siz))
   266  	if sc >= uintptr(len(p{}.deferpool)) {
   267  		return
   268  	}
   269  	pp := getg().m.p.ptr()
   270  	if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   271  		// Transfer half of local cache to the central cache.
   272  		//
   273  		// Take this slow path on the system stack so
   274  		// we don't grow freedefer's stack.
   275  		systemstack(func() {
   276  			var first, last *_defer
   277  			for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   278  				n := len(pp.deferpool[sc])
   279  				d := pp.deferpool[sc][n-1]
   280  				pp.deferpool[sc][n-1] = nil
   281  				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   282  				if first == nil {
   283  					first = d
   284  				} else {
   285  					last.link = d
   286  				}
   287  				last = d
   288  			}
   289  			lock(&sched.deferlock)
   290  			last.link = sched.deferpool[sc]
   291  			sched.deferpool[sc] = first
   292  			unlock(&sched.deferlock)
   293  		})
   294  	}
   295  
   296  	// These lines used to be simply `*d = _defer{}` but that
   297  	// started causing a nosplit stack overflow via typedmemmove.
   298  	d.siz = 0
   299  	d.started = false
   300  	d.sp = 0
   301  	d.pc = 0
   302  	// d._panic and d.fn must be nil already.
   303  	// If not, we would have called freedeferpanic or freedeferfn above,
   304  	// both of which throw.
   305  	d.link = nil
   306  
   307  	pp.deferpool[sc] = append(pp.deferpool[sc], d)
   308  }
   309  
   310  // Separate function so that it can split stack.
   311  // Windows otherwise runs out of stack space.
   312  func freedeferpanic() {
   313  	// _panic must be cleared before d is unlinked from gp.
   314  	throw("freedefer with d._panic != nil")
   315  }
   316  
   317  func freedeferfn() {
   318  	// fn must be cleared before d is unlinked from gp.
   319  	throw("freedefer with d.fn != nil")
   320  }
   321  
   322  // Run a deferred function if there is one.
   323  // The compiler inserts a call to this at the end of any
   324  // function which calls defer.
   325  // If there is a deferred function, this will call runtime·jmpdefer,
   326  // which will jump to the deferred function such that it appears
   327  // to have been called by the caller of deferreturn at the point
   328  // just before deferreturn was called. The effect is that deferreturn
   329  // is called again and again until there are no more deferred functions.
   330  // Cannot split the stack because we reuse the caller's frame to
   331  // call the deferred function.
   332  
   333  // The single argument isn't actually used - it just has its address
   334  // taken so it can be matched against pending defers.
   335  //go:nosplit
   336  func deferreturn(arg0 uintptr) {
   337  	gp := getg()
   338  	d := gp._defer
   339  	if d == nil {
   340  		return
   341  	}
   342  	sp := getcallersp()
   343  	if d.sp != sp {
   344  		return
   345  	}
   346  
   347  	// Moving arguments around.
   348  	//
   349  	// Everything called after this point must be recursively
   350  	// nosplit because the garbage collector won't know the form
   351  	// of the arguments until the jmpdefer can flip the PC over to
   352  	// fn.
   353  	switch d.siz {
   354  	case 0:
   355  		// Do nothing.
   356  	case sys.PtrSize:
   357  		*(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d))
   358  	default:
   359  		memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   360  	}
   361  	fn := d.fn
   362  	d.fn = nil
   363  	gp._defer = d.link
   364  	freedefer(d)
   365  	jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   366  }
   367  
   368  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   369  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   370  // is not a panic, any recover calls in those deferred functions will return nil.
   371  //
   372  // Calling Goexit from the main goroutine terminates that goroutine
   373  // without func main returning. Since func main has not returned,
   374  // the program continues execution of other goroutines.
   375  // If all other goroutines exit, the program crashes.
   376  func Goexit() {
   377  	// Run all deferred functions for the current goroutine.
   378  	// This code is similar to gopanic, see that implementation
   379  	// for detailed comments.
   380  	gp := getg()
   381  	for {
   382  		d := gp._defer
   383  		if d == nil {
   384  			break
   385  		}
   386  		if d.started {
   387  			if d._panic != nil {
   388  				d._panic.aborted = true
   389  				d._panic = nil
   390  			}
   391  			d.fn = nil
   392  			gp._defer = d.link
   393  			freedefer(d)
   394  			continue
   395  		}
   396  		d.started = true
   397  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   398  		if gp._defer != d {
   399  			throw("bad defer entry in Goexit")
   400  		}
   401  		d._panic = nil
   402  		d.fn = nil
   403  		gp._defer = d.link
   404  		freedefer(d)
   405  		// Note: we ignore recovers here because Goexit isn't a panic
   406  	}
   407  	goexit1()
   408  }
   409  
   410  // Call all Error and String methods before freezing the world.
   411  // Used when crashing with panicking.
   412  func preprintpanics(p *_panic) {
   413  	defer func() {
   414  		if recover() != nil {
   415  			throw("panic while printing panic value")
   416  		}
   417  	}()
   418  	for p != nil {
   419  		switch v := p.arg.(type) {
   420  		case error:
   421  			p.arg = v.Error()
   422  		case stringer:
   423  			p.arg = v.String()
   424  		}
   425  		p = p.link
   426  	}
   427  }
   428  
   429  // Print all currently active panics. Used when crashing.
   430  // Should only be called after preprintpanics.
   431  func printpanics(p *_panic) {
   432  	if p.link != nil {
   433  		printpanics(p.link)
   434  		print("\t")
   435  	}
   436  	print("panic: ")
   437  	printany(p.arg)
   438  	if p.recovered {
   439  		print(" [recovered]")
   440  	}
   441  	print("\n")
   442  }
   443  
   444  // The implementation of the predeclared function panic.
   445  func gopanic(e interface{}) {
   446  	gp := getg()
   447  	if gp.m.curg != gp {
   448  		print("panic: ")
   449  		printany(e)
   450  		print("\n")
   451  		throw("panic on system stack")
   452  	}
   453  
   454  	if gp.m.mallocing != 0 {
   455  		print("panic: ")
   456  		printany(e)
   457  		print("\n")
   458  		throw("panic during malloc")
   459  	}
   460  	if gp.m.preemptoff != "" {
   461  		print("panic: ")
   462  		printany(e)
   463  		print("\n")
   464  		print("preempt off reason: ")
   465  		print(gp.m.preemptoff)
   466  		print("\n")
   467  		throw("panic during preemptoff")
   468  	}
   469  	if gp.m.locks != 0 {
   470  		print("panic: ")
   471  		printany(e)
   472  		print("\n")
   473  		throw("panic holding locks")
   474  	}
   475  
   476  	var p _panic
   477  	p.arg = e
   478  	p.link = gp._panic
   479  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   480  
   481  	atomic.Xadd(&runningPanicDefers, 1)
   482  
   483  	for {
   484  		d := gp._defer
   485  		if d == nil {
   486  			break
   487  		}
   488  
   489  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   490  		// take defer off list. The earlier panic or Goexit will not continue running.
   491  		if d.started {
   492  			if d._panic != nil {
   493  				d._panic.aborted = true
   494  			}
   495  			d._panic = nil
   496  			d.fn = nil
   497  			gp._defer = d.link
   498  			freedefer(d)
   499  			continue
   500  		}
   501  
   502  		// Mark defer as started, but keep on list, so that traceback
   503  		// can find and update the defer's argument frame if stack growth
   504  		// or a garbage collection happens before reflectcall starts executing d.fn.
   505  		d.started = true
   506  
   507  		// Record the panic that is running the defer.
   508  		// If there is a new panic during the deferred call, that panic
   509  		// will find d in the list and will mark d._panic (this panic) aborted.
   510  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   511  
   512  		p.argp = unsafe.Pointer(getargp(0))
   513  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   514  		p.argp = nil
   515  
   516  		// reflectcall did not panic. Remove d.
   517  		if gp._defer != d {
   518  			throw("bad defer entry in panic")
   519  		}
   520  		d._panic = nil
   521  		d.fn = nil
   522  		gp._defer = d.link
   523  
   524  		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   525  		//GC()
   526  
   527  		pc := d.pc
   528  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   529  		freedefer(d)
   530  		if p.recovered {
   531  			atomic.Xadd(&runningPanicDefers, -1)
   532  
   533  			gp._panic = p.link
   534  			// Aborted panics are marked but remain on the g.panic list.
   535  			// Remove them from the list.
   536  			for gp._panic != nil && gp._panic.aborted {
   537  				gp._panic = gp._panic.link
   538  			}
   539  			if gp._panic == nil { // must be done with signal
   540  				gp.sig = 0
   541  			}
   542  			// Pass information about recovering frame to recovery.
   543  			gp.sigcode0 = uintptr(sp)
   544  			gp.sigcode1 = pc
   545  			mcall(recovery)
   546  			throw("recovery failed") // mcall should not return
   547  		}
   548  	}
   549  
   550  	// ran out of deferred calls - old-school panic now
   551  	// Because it is unsafe to call arbitrary user code after freezing
   552  	// the world, we call preprintpanics to invoke all necessary Error
   553  	// and String methods to prepare the panic strings before startpanic.
   554  	preprintpanics(gp._panic)
   555  
   556  	fatalpanic(gp._panic) // should not return
   557  	*(*int)(nil) = 0      // not reached
   558  }
   559  
   560  // getargp returns the location where the caller
   561  // writes outgoing function call arguments.
   562  //go:nosplit
   563  //go:noinline
   564  func getargp(x int) uintptr {
   565  	// x is an argument mainly so that we can return its address.
   566  	return uintptr(noescape(unsafe.Pointer(&x)))
   567  }
   568  
   569  // The implementation of the predeclared function recover.
   570  // Cannot split the stack because it needs to reliably
   571  // find the stack segment of its caller.
   572  //
   573  // TODO(rsc): Once we commit to CopyStackAlways,
   574  // this doesn't need to be nosplit.
   575  //go:nosplit
   576  func gorecover(argp uintptr) interface{} {
   577  	// Must be in a function running as part of a deferred call during the panic.
   578  	// Must be called from the topmost function of the call
   579  	// (the function used in the defer statement).
   580  	// p.argp is the argument pointer of that topmost deferred function call.
   581  	// Compare against argp reported by caller.
   582  	// If they match, the caller is the one who can recover.
   583  	gp := getg()
   584  	p := gp._panic
   585  	if p != nil && !p.recovered && argp == uintptr(p.argp) {
   586  		p.recovered = true
   587  		return p.arg
   588  	}
   589  	return nil
   590  }
   591  
   592  //go:linkname sync_throw sync.throw
   593  func sync_throw(s string) {
   594  	throw(s)
   595  }
   596  
   597  //go:nosplit
   598  func throw(s string) {
   599  	// Everything throw does should be recursively nosplit so it
   600  	// can be called even when it's unsafe to grow the stack.
   601  	systemstack(func() {
   602  		print("fatal error: ", s, "\n")
   603  	})
   604  	gp := getg()
   605  	if gp.m.throwing == 0 {
   606  		gp.m.throwing = 1
   607  	}
   608  	fatalthrow()
   609  	*(*int)(nil) = 0 // not reached
   610  }
   611  
   612  // runningPanicDefers is non-zero while running deferred functions for panic.
   613  // runningPanicDefers is incremented and decremented atomically.
   614  // This is used to try hard to get a panic stack trace out when exiting.
   615  var runningPanicDefers uint32
   616  
   617  // panicking is non-zero when crashing the program for an unrecovered panic.
   618  // panicking is incremented and decremented atomically.
   619  var panicking uint32
   620  
   621  // paniclk is held while printing the panic information and stack trace,
   622  // so that two concurrent panics don't overlap their output.
   623  var paniclk mutex
   624  
   625  // Unwind the stack after a deferred function calls recover
   626  // after a panic. Then arrange to continue running as though
   627  // the caller of the deferred function returned normally.
   628  func recovery(gp *g) {
   629  	// Info about defer passed in G struct.
   630  	sp := gp.sigcode0
   631  	pc := gp.sigcode1
   632  
   633  	// d's arguments need to be in the stack.
   634  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   635  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   636  		throw("bad recovery")
   637  	}
   638  
   639  	// Make the deferproc for this d return again,
   640  	// this time returning 1.  The calling function will
   641  	// jump to the standard return epilogue.
   642  	gp.sched.sp = sp
   643  	gp.sched.pc = pc
   644  	gp.sched.lr = 0
   645  	gp.sched.ret = 1
   646  	gogo(&gp.sched)
   647  }
   648  
   649  // fatalthrow implements an unrecoverable runtime throw. It freezes the
   650  // system, prints stack traces starting from its caller, and terminates the
   651  // process.
   652  //
   653  //go:nosplit
   654  func fatalthrow() {
   655  	pc := getcallerpc()
   656  	sp := getcallersp()
   657  	gp := getg()
   658  	// Switch to the system stack to avoid any stack growth, which
   659  	// may make things worse if the runtime is in a bad state.
   660  	systemstack(func() {
   661  		startpanic_m()
   662  
   663  		if dopanic_m(gp, pc, sp) {
   664  			// crash uses a decent amount of nosplit stack and we're already
   665  			// low on stack in throw, so crash on the system stack (unlike
   666  			// fatalpanic).
   667  			crash()
   668  		}
   669  
   670  		exit(2)
   671  	})
   672  
   673  	*(*int)(nil) = 0 // not reached
   674  }
   675  
   676  // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
   677  // that if msgs != nil, fatalpanic also prints panic messages and decrements
   678  // runningPanicDefers once main is blocked from exiting.
   679  //
   680  //go:nosplit
   681  func fatalpanic(msgs *_panic) {
   682  	pc := getcallerpc()
   683  	sp := getcallersp()
   684  	gp := getg()
   685  	var docrash bool
   686  	// Switch to the system stack to avoid any stack growth, which
   687  	// may make things worse if the runtime is in a bad state.
   688  	systemstack(func() {
   689  		if startpanic_m() && msgs != nil {
   690  			// There were panic messages and startpanic_m
   691  			// says it's okay to try to print them.
   692  
   693  			// startpanic_m set panicking, which will
   694  			// block main from exiting, so now OK to
   695  			// decrement runningPanicDefers.
   696  			atomic.Xadd(&runningPanicDefers, -1)
   697  
   698  			printpanics(msgs)
   699  		}
   700  
   701  		docrash = dopanic_m(gp, pc, sp)
   702  	})
   703  
   704  	if docrash {
   705  		// By crashing outside the above systemstack call, debuggers
   706  		// will not be confused when generating a backtrace.
   707  		// Function crash is marked nosplit to avoid stack growth.
   708  		crash()
   709  	}
   710  
   711  	systemstack(func() {
   712  		exit(2)
   713  	})
   714  
   715  	*(*int)(nil) = 0 // not reached
   716  }
   717  
   718  // startpanic_m prepares for an unrecoverable panic.
   719  //
   720  // It returns true if panic messages should be printed, or false if
   721  // the runtime is in bad shape and should just print stacks.
   722  //
   723  // It can have write barriers because the write barrier explicitly
   724  // ignores writes once dying > 0.
   725  //
   726  //go:yeswritebarrierrec
   727  func startpanic_m() bool {
   728  	_g_ := getg()
   729  	if mheap_.cachealloc.size == 0 { // very early
   730  		print("runtime: panic before malloc heap initialized\n")
   731  	}
   732  	// Disallow malloc during an unrecoverable panic. A panic
   733  	// could happen in a signal handler, or in a throw, or inside
   734  	// malloc itself. We want to catch if an allocation ever does
   735  	// happen (even if we're not in one of these situations).
   736  	_g_.m.mallocing++
   737  
   738  	// If we're dying because of a bad lock count, set it to a
   739  	// good lock count so we don't recursively panic below.
   740  	if _g_.m.locks < 0 {
   741  		_g_.m.locks = 1
   742  	}
   743  
   744  	switch _g_.m.dying {
   745  	case 0:
   746  		_g_.m.dying = 1
   747  		_g_.writebuf = nil
   748  		atomic.Xadd(&panicking, 1)
   749  		lock(&paniclk)
   750  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
   751  			schedtrace(true)
   752  		}
   753  		freezetheworld()
   754  		return true
   755  	case 1:
   756  		// Something failed while panicking.
   757  		// Just print a stack trace and exit.
   758  		_g_.m.dying = 2
   759  		print("panic during panic\n")
   760  		return false
   761  	case 2:
   762  		// This is a genuine bug in the runtime, we couldn't even
   763  		// print the stack trace successfully.
   764  		_g_.m.dying = 3
   765  		print("stack trace unavailable\n")
   766  		exit(4)
   767  		fallthrough
   768  	default:
   769  		// Can't even print! Just exit.
   770  		exit(5)
   771  		return false // Need to return something.
   772  	}
   773  }
   774  
   775  var didothers bool
   776  var deadlock mutex
   777  
   778  func dopanic_m(gp *g, pc, sp uintptr) bool {
   779  	if gp.sig != 0 {
   780  		signame := signame(gp.sig)
   781  		if signame != "" {
   782  			print("[signal ", signame)
   783  		} else {
   784  			print("[signal ", hex(gp.sig))
   785  		}
   786  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   787  	}
   788  
   789  	level, all, docrash := gotraceback()
   790  	_g_ := getg()
   791  	if level > 0 {
   792  		if gp != gp.m.curg {
   793  			all = true
   794  		}
   795  		if gp != gp.m.g0 {
   796  			print("\n")
   797  			goroutineheader(gp)
   798  			traceback(pc, sp, 0, gp)
   799  		} else if level >= 2 || _g_.m.throwing > 0 {
   800  			print("\nruntime stack:\n")
   801  			traceback(pc, sp, 0, gp)
   802  		}
   803  		if !didothers && all {
   804  			didothers = true
   805  			tracebackothers(gp)
   806  		}
   807  	}
   808  	unlock(&paniclk)
   809  
   810  	if atomic.Xadd(&panicking, -1) != 0 {
   811  		// Some other m is panicking too.
   812  		// Let it print what it needs to print.
   813  		// Wait forever without chewing up cpu.
   814  		// It will exit when it's done.
   815  		lock(&deadlock)
   816  		lock(&deadlock)
   817  	}
   818  
   819  	return docrash
   820  }
   821  
   822  // canpanic returns false if a signal should throw instead of
   823  // panicking.
   824  //
   825  //go:nosplit
   826  func canpanic(gp *g) bool {
   827  	// Note that g is m->gsignal, different from gp.
   828  	// Note also that g->m can change at preemption, so m can go stale
   829  	// if this function ever makes a function call.
   830  	_g_ := getg()
   831  	_m_ := _g_.m
   832  
   833  	// Is it okay for gp to panic instead of crashing the program?
   834  	// Yes, as long as it is running Go code, not runtime code,
   835  	// and not stuck in a system call.
   836  	if gp == nil || gp != _m_.curg {
   837  		return false
   838  	}
   839  	if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
   840  		return false
   841  	}
   842  	status := readgstatus(gp)
   843  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
   844  		return false
   845  	}
   846  	if GOOS == "windows" && _m_.libcallsp != 0 {
   847  		return false
   848  	}
   849  	return true
   850  }
   851  
   852  // shouldPushSigpanic returns true if pc should be used as sigpanic's
   853  // return PC (pushing a frame for the call). Otherwise, it should be
   854  // left alone so that LR is used as sigpanic's return PC, effectively
   855  // replacing the top-most frame with sigpanic. This is used by
   856  // preparePanic.
   857  func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
   858  	if pc == 0 {
   859  		// Probably a call to a nil func. The old LR is more
   860  		// useful in the stack trace. Not pushing the frame
   861  		// will make the trace look like a call to sigpanic
   862  		// instead. (Otherwise the trace will end at sigpanic
   863  		// and we won't get to see who faulted.)
   864  		return false
   865  	}
   866  	// If we don't recognize the PC as code, but we do recognize
   867  	// the link register as code, then this assumes the panic was
   868  	// caused by a call to non-code. In this case, we want to
   869  	// ignore this call to make unwinding show the context.
   870  	//
   871  	// If we running C code, we're not going to recognize pc as a
   872  	// Go function, so just assume it's good. Otherwise, traceback
   873  	// may try to read a stale LR that looks like a Go code
   874  	// pointer and wander into the woods.
   875  	if gp.m.incgo || findfunc(pc).valid() {
   876  		// This wasn't a bad call, so use PC as sigpanic's
   877  		// return PC.
   878  		return true
   879  	}
   880  	if findfunc(lr).valid() {
   881  		// This was a bad call, but the LR is good, so use the
   882  		// LR as sigpanic's return PC.
   883  		return false
   884  	}
   885  	// Neither the PC or LR is good. Hopefully pushing a frame
   886  	// will work.
   887  	return true
   888  }
   889  
   890  // isAbortPC returns true if pc is the program counter at which
   891  // runtime.abort raises a signal.
   892  //
   893  // It is nosplit because it's part of the isgoexception
   894  // implementation.
   895  //
   896  //go:nosplit
   897  func isAbortPC(pc uintptr) bool {
   898  	return pc == funcPC(abort) || ((GOARCH == "arm" || GOARCH == "arm64") && pc == funcPC(abort)+sys.PCQuantum)
   899  }
   900  

View as plain text