...
Run Format

Source file src/runtime/panic.go

     1	// Copyright 2014 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/atomic"
     9		"runtime/internal/sys"
    10		"unsafe"
    11	)
    12	
    13	// Calling panic with one of the errors below will call errorString.Error
    14	// which will call mallocgc to concatenate strings. That will fail if
    15	// malloc is locked, causing a confusing error message. Throw a better
    16	// error message instead.
    17	func panicCheckMalloc(err error) {
    18		gp := getg()
    19		if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    20			throw(string(err.(errorString)))
    21		}
    22	}
    23	
    24	var indexError = error(errorString("index out of range"))
    25	
    26	func panicindex() {
    27		panicCheckMalloc(indexError)
    28		panic(indexError)
    29	}
    30	
    31	var sliceError = error(errorString("slice bounds out of range"))
    32	
    33	func panicslice() {
    34		panicCheckMalloc(sliceError)
    35		panic(sliceError)
    36	}
    37	
    38	var divideError = error(errorString("integer divide by zero"))
    39	
    40	func panicdivide() {
    41		panicCheckMalloc(divideError)
    42		panic(divideError)
    43	}
    44	
    45	var overflowError = error(errorString("integer overflow"))
    46	
    47	func panicoverflow() {
    48		panicCheckMalloc(overflowError)
    49		panic(overflowError)
    50	}
    51	
    52	var floatError = error(errorString("floating point error"))
    53	
    54	func panicfloat() {
    55		panicCheckMalloc(floatError)
    56		panic(floatError)
    57	}
    58	
    59	var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    60	
    61	func panicmem() {
    62		panicCheckMalloc(memoryError)
    63		panic(memoryError)
    64	}
    65	
    66	func throwinit() {
    67		throw("recursive call during initialization - linker skew")
    68	}
    69	
    70	// Create a new deferred function fn with siz bytes of arguments.
    71	// The compiler turns a defer statement into a call to this.
    72	//go:nosplit
    73	func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    74		if getg().m.curg != getg() {
    75			// go code on the system stack can't defer
    76			throw("defer on system stack")
    77		}
    78	
    79		// the arguments of fn are in a perilous state. The stack map
    80		// for deferproc does not describe them. So we can't let garbage
    81		// collection or stack copying trigger until we've copied them out
    82		// to somewhere safe. The memmove below does that.
    83		// Until the copy completes, we can only call nosplit routines.
    84		sp := getcallersp(unsafe.Pointer(&siz))
    85		argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
    86		callerpc := getcallerpc(unsafe.Pointer(&siz))
    87	
    88		d := newdefer(siz)
    89		if d._panic != nil {
    90			throw("deferproc: d.panic != nil after newdefer")
    91		}
    92		d.fn = fn
    93		d.pc = callerpc
    94		d.sp = sp
    95		switch siz {
    96		case 0:
    97			// Do nothing.
    98		case sys.PtrSize:
    99			*(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
   100		default:
   101			memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
   102		}
   103	
   104		// deferproc returns 0 normally.
   105		// a deferred func that stops a panic
   106		// makes the deferproc return 1.
   107		// the code the compiler generates always
   108		// checks the return value and jumps to the
   109		// end of the function if deferproc returns != 0.
   110		return0()
   111		// No code can go here - the C return register has
   112		// been set and must not be clobbered.
   113	}
   114	
   115	// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
   116	// Each P holds a pool for defers with small arg sizes.
   117	// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
   118	
   119	const (
   120		deferHeaderSize = unsafe.Sizeof(_defer{})
   121		minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   122		minDeferArgs    = minDeferAlloc - deferHeaderSize
   123	)
   124	
   125	// defer size class for arg size sz
   126	//go:nosplit
   127	func deferclass(siz uintptr) uintptr {
   128		if siz <= minDeferArgs {
   129			return 0
   130		}
   131		return (siz - minDeferArgs + 15) / 16
   132	}
   133	
   134	// total size of memory block for defer with arg size sz
   135	func totaldefersize(siz uintptr) uintptr {
   136		if siz <= minDeferArgs {
   137			return minDeferAlloc
   138		}
   139		return deferHeaderSize + siz
   140	}
   141	
   142	// Ensure that defer arg sizes that map to the same defer size class
   143	// also map to the same malloc size class.
   144	func testdefersizes() {
   145		var m [len(p{}.deferpool)]int32
   146	
   147		for i := range m {
   148			m[i] = -1
   149		}
   150		for i := uintptr(0); ; i++ {
   151			defersc := deferclass(i)
   152			if defersc >= uintptr(len(m)) {
   153				break
   154			}
   155			siz := roundupsize(totaldefersize(i))
   156			if m[defersc] < 0 {
   157				m[defersc] = int32(siz)
   158				continue
   159			}
   160			if m[defersc] != int32(siz) {
   161				print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   162				throw("bad defer size class")
   163			}
   164		}
   165	}
   166	
   167	// The arguments associated with a deferred call are stored
   168	// immediately after the _defer header in memory.
   169	//go:nosplit
   170	func deferArgs(d *_defer) unsafe.Pointer {
   171		if d.siz == 0 {
   172			// Avoid pointer past the defer allocation.
   173			return nil
   174		}
   175		return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   176	}
   177	
   178	var deferType *_type // type of _defer struct
   179	
   180	func init() {
   181		var x interface{}
   182		x = (*_defer)(nil)
   183		deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   184	}
   185	
   186	// Allocate a Defer, usually using per-P pool.
   187	// Each defer must be released with freedefer.
   188	//
   189	// This must not grow the stack because there may be a frame without
   190	// stack map information when this is called.
   191	//
   192	//go:nosplit
   193	func newdefer(siz int32) *_defer {
   194		var d *_defer
   195		sc := deferclass(uintptr(siz))
   196		gp := getg()
   197		if sc < uintptr(len(p{}.deferpool)) {
   198			pp := gp.m.p.ptr()
   199			if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   200				// Take the slow path on the system stack so
   201				// we don't grow newdefer's stack.
   202				systemstack(func() {
   203					lock(&sched.deferlock)
   204					for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   205						d := sched.deferpool[sc]
   206						sched.deferpool[sc] = d.link
   207						d.link = nil
   208						pp.deferpool[sc] = append(pp.deferpool[sc], d)
   209					}
   210					unlock(&sched.deferlock)
   211				})
   212			}
   213			if n := len(pp.deferpool[sc]); n > 0 {
   214				d = pp.deferpool[sc][n-1]
   215				pp.deferpool[sc][n-1] = nil
   216				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   217			}
   218		}
   219		if d == nil {
   220			// Allocate new defer+args.
   221			systemstack(func() {
   222				total := roundupsize(totaldefersize(uintptr(siz)))
   223				d = (*_defer)(mallocgc(total, deferType, true))
   224			})
   225		}
   226		d.siz = siz
   227		d.link = gp._defer
   228		gp._defer = d
   229		return d
   230	}
   231	
   232	// Free the given defer.
   233	// The defer cannot be used after this call.
   234	//
   235	// This must not grow the stack because there may be a frame without a
   236	// stack map when this is called.
   237	//
   238	//go:nosplit
   239	func freedefer(d *_defer) {
   240		if d._panic != nil {
   241			freedeferpanic()
   242		}
   243		if d.fn != nil {
   244			freedeferfn()
   245		}
   246		sc := deferclass(uintptr(d.siz))
   247		if sc < uintptr(len(p{}.deferpool)) {
   248			pp := getg().m.p.ptr()
   249			if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   250				// Transfer half of local cache to the central cache.
   251				//
   252				// Take this slow path on the system stack so
   253				// we don't grow freedefer's stack.
   254				systemstack(func() {
   255					var first, last *_defer
   256					for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   257						n := len(pp.deferpool[sc])
   258						d := pp.deferpool[sc][n-1]
   259						pp.deferpool[sc][n-1] = nil
   260						pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   261						if first == nil {
   262							first = d
   263						} else {
   264							last.link = d
   265						}
   266						last = d
   267					}
   268					lock(&sched.deferlock)
   269					last.link = sched.deferpool[sc]
   270					sched.deferpool[sc] = first
   271					unlock(&sched.deferlock)
   272				})
   273			}
   274			*d = _defer{}
   275			pp.deferpool[sc] = append(pp.deferpool[sc], d)
   276		}
   277	}
   278	
   279	// Separate function so that it can split stack.
   280	// Windows otherwise runs out of stack space.
   281	func freedeferpanic() {
   282		// _panic must be cleared before d is unlinked from gp.
   283		throw("freedefer with d._panic != nil")
   284	}
   285	
   286	func freedeferfn() {
   287		// fn must be cleared before d is unlinked from gp.
   288		throw("freedefer with d.fn != nil")
   289	}
   290	
   291	// Run a deferred function if there is one.
   292	// The compiler inserts a call to this at the end of any
   293	// function which calls defer.
   294	// If there is a deferred function, this will call runtime·jmpdefer,
   295	// which will jump to the deferred function such that it appears
   296	// to have been called by the caller of deferreturn at the point
   297	// just before deferreturn was called. The effect is that deferreturn
   298	// is called again and again until there are no more deferred functions.
   299	// Cannot split the stack because we reuse the caller's frame to
   300	// call the deferred function.
   301	
   302	// The single argument isn't actually used - it just has its address
   303	// taken so it can be matched against pending defers.
   304	//go:nosplit
   305	func deferreturn(arg0 uintptr) {
   306		gp := getg()
   307		d := gp._defer
   308		if d == nil {
   309			return
   310		}
   311		sp := getcallersp(unsafe.Pointer(&arg0))
   312		if d.sp != sp {
   313			return
   314		}
   315	
   316		// Moving arguments around.
   317		//
   318		// Everything called after this point must be recursively
   319		// nosplit because the garbage collector won't know the form
   320		// of the arguments until the jmpdefer can flip the PC over to
   321		// fn.
   322		switch d.siz {
   323		case 0:
   324			// Do nothing.
   325		case sys.PtrSize:
   326			*(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d))
   327		default:
   328			memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   329		}
   330		fn := d.fn
   331		d.fn = nil
   332		gp._defer = d.link
   333		freedefer(d)
   334		jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   335	}
   336	
   337	// Goexit terminates the goroutine that calls it. No other goroutine is affected.
   338	// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   339	// is not panic, however, any recover calls in those deferred functions will return nil.
   340	//
   341	// Calling Goexit from the main goroutine terminates that goroutine
   342	// without func main returning. Since func main has not returned,
   343	// the program continues execution of other goroutines.
   344	// If all other goroutines exit, the program crashes.
   345	func Goexit() {
   346		// Run all deferred functions for the current goroutine.
   347		// This code is similar to gopanic, see that implementation
   348		// for detailed comments.
   349		gp := getg()
   350		for {
   351			d := gp._defer
   352			if d == nil {
   353				break
   354			}
   355			if d.started {
   356				if d._panic != nil {
   357					d._panic.aborted = true
   358					d._panic = nil
   359				}
   360				d.fn = nil
   361				gp._defer = d.link
   362				freedefer(d)
   363				continue
   364			}
   365			d.started = true
   366			reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   367			if gp._defer != d {
   368				throw("bad defer entry in Goexit")
   369			}
   370			d._panic = nil
   371			d.fn = nil
   372			gp._defer = d.link
   373			freedefer(d)
   374			// Note: we ignore recovers here because Goexit isn't a panic
   375		}
   376		goexit1()
   377	}
   378	
   379	// Call all Error and String methods before freezing the world.
   380	// Used when crashing with panicking.
   381	// This must match types handled by printany.
   382	func preprintpanics(p *_panic) {
   383		defer func() {
   384			if recover() != nil {
   385				throw("panic while printing panic value")
   386			}
   387		}()
   388		for p != nil {
   389			switch v := p.arg.(type) {
   390			case error:
   391				p.arg = v.Error()
   392			case stringer:
   393				p.arg = v.String()
   394			}
   395			p = p.link
   396		}
   397	}
   398	
   399	// Print all currently active panics. Used when crashing.
   400	func printpanics(p *_panic) {
   401		if p.link != nil {
   402			printpanics(p.link)
   403			print("\t")
   404		}
   405		print("panic: ")
   406		printany(p.arg)
   407		if p.recovered {
   408			print(" [recovered]")
   409		}
   410		print("\n")
   411	}
   412	
   413	// The implementation of the predeclared function panic.
   414	func gopanic(e interface{}) {
   415		gp := getg()
   416		if gp.m.curg != gp {
   417			print("panic: ")
   418			printany(e)
   419			print("\n")
   420			throw("panic on system stack")
   421		}
   422	
   423		// m.softfloat is set during software floating point.
   424		// It increments m.locks to avoid preemption.
   425		// We moved the memory loads out, so there shouldn't be
   426		// any reason for it to panic anymore.
   427		if gp.m.softfloat != 0 {
   428			gp.m.locks--
   429			gp.m.softfloat = 0
   430			throw("panic during softfloat")
   431		}
   432		if gp.m.mallocing != 0 {
   433			print("panic: ")
   434			printany(e)
   435			print("\n")
   436			throw("panic during malloc")
   437		}
   438		if gp.m.preemptoff != "" {
   439			print("panic: ")
   440			printany(e)
   441			print("\n")
   442			print("preempt off reason: ")
   443			print(gp.m.preemptoff)
   444			print("\n")
   445			throw("panic during preemptoff")
   446		}
   447		if gp.m.locks != 0 {
   448			print("panic: ")
   449			printany(e)
   450			print("\n")
   451			throw("panic holding locks")
   452		}
   453	
   454		var p _panic
   455		p.arg = e
   456		p.link = gp._panic
   457		gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   458	
   459		for {
   460			d := gp._defer
   461			if d == nil {
   462				break
   463			}
   464	
   465			// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   466			// take defer off list. The earlier panic or Goexit will not continue running.
   467			if d.started {
   468				if d._panic != nil {
   469					d._panic.aborted = true
   470				}
   471				d._panic = nil
   472				d.fn = nil
   473				gp._defer = d.link
   474				freedefer(d)
   475				continue
   476			}
   477	
   478			// Mark defer as started, but keep on list, so that traceback
   479			// can find and update the defer's argument frame if stack growth
   480			// or a garbage collection happens before reflectcall starts executing d.fn.
   481			d.started = true
   482	
   483			// Record the panic that is running the defer.
   484			// If there is a new panic during the deferred call, that panic
   485			// will find d in the list and will mark d._panic (this panic) aborted.
   486			d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   487	
   488			p.argp = unsafe.Pointer(getargp(0))
   489			reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   490			p.argp = nil
   491	
   492			// reflectcall did not panic. Remove d.
   493			if gp._defer != d {
   494				throw("bad defer entry in panic")
   495			}
   496			d._panic = nil
   497			d.fn = nil
   498			gp._defer = d.link
   499	
   500			// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   501			//GC()
   502	
   503			pc := d.pc
   504			sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   505			freedefer(d)
   506			if p.recovered {
   507				gp._panic = p.link
   508				// Aborted panics are marked but remain on the g.panic list.
   509				// Remove them from the list.
   510				for gp._panic != nil && gp._panic.aborted {
   511					gp._panic = gp._panic.link
   512				}
   513				if gp._panic == nil { // must be done with signal
   514					gp.sig = 0
   515				}
   516				// Pass information about recovering frame to recovery.
   517				gp.sigcode0 = uintptr(sp)
   518				gp.sigcode1 = pc
   519				mcall(recovery)
   520				throw("recovery failed") // mcall should not return
   521			}
   522		}
   523	
   524		// ran out of deferred calls - old-school panic now
   525		// Because it is unsafe to call arbitrary user code after freezing
   526		// the world, we call preprintpanics to invoke all necessary Error
   527		// and String methods to prepare the panic strings before startpanic.
   528		preprintpanics(gp._panic)
   529		startpanic()
   530		printpanics(gp._panic)
   531		dopanic(0)       // should not return
   532		*(*int)(nil) = 0 // not reached
   533	}
   534	
   535	// getargp returns the location where the caller
   536	// writes outgoing function call arguments.
   537	//go:nosplit
   538	//go:noinline
   539	func getargp(x int) uintptr {
   540		// x is an argument mainly so that we can return its address.
   541		return uintptr(noescape(unsafe.Pointer(&x)))
   542	}
   543	
   544	// The implementation of the predeclared function recover.
   545	// Cannot split the stack because it needs to reliably
   546	// find the stack segment of its caller.
   547	//
   548	// TODO(rsc): Once we commit to CopyStackAlways,
   549	// this doesn't need to be nosplit.
   550	//go:nosplit
   551	func gorecover(argp uintptr) interface{} {
   552		// Must be in a function running as part of a deferred call during the panic.
   553		// Must be called from the topmost function of the call
   554		// (the function used in the defer statement).
   555		// p.argp is the argument pointer of that topmost deferred function call.
   556		// Compare against argp reported by caller.
   557		// If they match, the caller is the one who can recover.
   558		gp := getg()
   559		p := gp._panic
   560		if p != nil && !p.recovered && argp == uintptr(p.argp) {
   561			p.recovered = true
   562			return p.arg
   563		}
   564		return nil
   565	}
   566	
   567	//go:nosplit
   568	func startpanic() {
   569		systemstack(startpanic_m)
   570	}
   571	
   572	//go:nosplit
   573	func dopanic(unused int) {
   574		pc := getcallerpc(unsafe.Pointer(&unused))
   575		sp := getcallersp(unsafe.Pointer(&unused))
   576		gp := getg()
   577		systemstack(func() {
   578			dopanic_m(gp, pc, sp) // should never return
   579		})
   580		*(*int)(nil) = 0
   581	}
   582	
   583	//go:linkname sync_throw sync.throw
   584	func sync_throw(s string) {
   585		throw(s)
   586	}
   587	
   588	//go:nosplit
   589	func throw(s string) {
   590		print("fatal error: ", s, "\n")
   591		gp := getg()
   592		if gp.m.throwing == 0 {
   593			gp.m.throwing = 1
   594		}
   595		startpanic()
   596		dopanic(0)
   597		*(*int)(nil) = 0 // not reached
   598	}
   599	
   600	//uint32 runtime·panicking;
   601	var paniclk mutex
   602	
   603	// Unwind the stack after a deferred function calls recover
   604	// after a panic. Then arrange to continue running as though
   605	// the caller of the deferred function returned normally.
   606	func recovery(gp *g) {
   607		// Info about defer passed in G struct.
   608		sp := gp.sigcode0
   609		pc := gp.sigcode1
   610	
   611		// d's arguments need to be in the stack.
   612		if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   613			print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   614			throw("bad recovery")
   615		}
   616	
   617		// Make the deferproc for this d return again,
   618		// this time returning 1.  The calling function will
   619		// jump to the standard return epilogue.
   620		gcUnwindBarriers(gp, sp)
   621		gp.sched.sp = sp
   622		gp.sched.pc = pc
   623		gp.sched.lr = 0
   624		gp.sched.ret = 1
   625		gogo(&gp.sched)
   626	}
   627	
   628	func startpanic_m() {
   629		_g_ := getg()
   630		if mheap_.cachealloc.size == 0 { // very early
   631			print("runtime: panic before malloc heap initialized\n")
   632			_g_.m.mallocing = 1 // tell rest of panic not to try to malloc
   633		} else if _g_.m.mcache == nil { // can happen if called from signal handler or throw
   634			_g_.m.mcache = allocmcache()
   635		}
   636	
   637		switch _g_.m.dying {
   638		case 0:
   639			_g_.m.dying = 1
   640			_g_.writebuf = nil
   641			atomic.Xadd(&panicking, 1)
   642			lock(&paniclk)
   643			if debug.schedtrace > 0 || debug.scheddetail > 0 {
   644				schedtrace(true)
   645			}
   646			freezetheworld()
   647			return
   648		case 1:
   649			// Something failed while panicking, probably the print of the
   650			// argument to panic().  Just print a stack trace and exit.
   651			_g_.m.dying = 2
   652			print("panic during panic\n")
   653			dopanic(0)
   654			exit(3)
   655			fallthrough
   656		case 2:
   657			// This is a genuine bug in the runtime, we couldn't even
   658			// print the stack trace successfully.
   659			_g_.m.dying = 3
   660			print("stack trace unavailable\n")
   661			exit(4)
   662			fallthrough
   663		default:
   664			// Can't even print!  Just exit.
   665			exit(5)
   666		}
   667	}
   668	
   669	var didothers bool
   670	var deadlock mutex
   671	
   672	func dopanic_m(gp *g, pc, sp uintptr) {
   673		if gp.sig != 0 {
   674			signame := signame(gp.sig)
   675			if signame != "" {
   676				print("[signal ", signame)
   677			} else {
   678				print("[signal ", hex(gp.sig))
   679			}
   680			print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   681		}
   682	
   683		level, all, docrash := gotraceback()
   684		_g_ := getg()
   685		if level > 0 {
   686			if gp != gp.m.curg {
   687				all = true
   688			}
   689			if gp != gp.m.g0 {
   690				print("\n")
   691				goroutineheader(gp)
   692				traceback(pc, sp, 0, gp)
   693			} else if level >= 2 || _g_.m.throwing > 0 {
   694				print("\nruntime stack:\n")
   695				traceback(pc, sp, 0, gp)
   696			}
   697			if !didothers && all {
   698				didothers = true
   699				tracebackothers(gp)
   700			}
   701		}
   702		unlock(&paniclk)
   703	
   704		if atomic.Xadd(&panicking, -1) != 0 {
   705			// Some other m is panicking too.
   706			// Let it print what it needs to print.
   707			// Wait forever without chewing up cpu.
   708			// It will exit when it's done.
   709			lock(&deadlock)
   710			lock(&deadlock)
   711		}
   712	
   713		if docrash {
   714			crash()
   715		}
   716	
   717		exit(2)
   718	}
   719	
   720	//go:nosplit
   721	func canpanic(gp *g) bool {
   722		// Note that g is m->gsignal, different from gp.
   723		// Note also that g->m can change at preemption, so m can go stale
   724		// if this function ever makes a function call.
   725		_g_ := getg()
   726		_m_ := _g_.m
   727	
   728		// Is it okay for gp to panic instead of crashing the program?
   729		// Yes, as long as it is running Go code, not runtime code,
   730		// and not stuck in a system call.
   731		if gp == nil || gp != _m_.curg {
   732			return false
   733		}
   734		if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
   735			return false
   736		}
   737		status := readgstatus(gp)
   738		if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
   739			return false
   740		}
   741		if GOOS == "windows" && _m_.libcallsp != 0 {
   742			return false
   743		}
   744		return true
   745	}
   746	

View as plain text